Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/botocore/retries/standard.py: 33%
201 statements
« prev ^ index » next coverage.py v7.3.2, created at 2023-12-08 06:51 +0000
« prev ^ index » next coverage.py v7.3.2, created at 2023-12-08 06:51 +0000
1"""Standard retry behavior.
3This contains the default standard retry behavior.
4It provides consistent behavior with other AWS SDKs.
6The key base classes uses for retries:
8 * ``BaseRetryableChecker`` - Use to check a specific condition that
9 indicates a retry should happen. This can include things like
10 max attempts, HTTP status code checks, error code checks etc.
11 * ``RetryBackoff`` - Use to determine how long we should backoff until
12 we retry a request. This is the class that will implement delay such
13 as exponential backoff.
14 * ``RetryPolicy`` - Main class that determines if a retry should
15 happen. It can combine data from a various BaseRetryableCheckers
16 to make a final call as to whether or not a retry should happen.
17 It then uses a ``BaseRetryBackoff`` to determine how long to delay.
18 * ``RetryHandler`` - The bridge between botocore's event system
19 used by endpoint.py to manage retries and the interfaces defined
20 in this module.
22This allows us to define an API that has minimal coupling to the event
23based API used by botocore.
25"""
26import logging
27import random
29from botocore.exceptions import (
30 ConnectionError,
31 ConnectTimeoutError,
32 HTTPClientError,
33 ReadTimeoutError,
34)
35from botocore.retries import quota, special
36from botocore.retries.base import BaseRetryableChecker, BaseRetryBackoff
38DEFAULT_MAX_ATTEMPTS = 3
39logger = logging.getLogger(__name__)
42def register_retry_handler(client, max_attempts=DEFAULT_MAX_ATTEMPTS):
43 retry_quota = RetryQuotaChecker(quota.RetryQuota())
45 service_id = client.meta.service_model.service_id
46 service_event_name = service_id.hyphenize()
47 client.meta.events.register(
48 f'after-call.{service_event_name}', retry_quota.release_retry_quota
49 )
51 handler = RetryHandler(
52 retry_policy=RetryPolicy(
53 retry_checker=StandardRetryConditions(max_attempts=max_attempts),
54 retry_backoff=ExponentialBackoff(),
55 ),
56 retry_event_adapter=RetryEventAdapter(),
57 retry_quota=retry_quota,
58 )
60 unique_id = 'retry-config-%s' % service_event_name
61 client.meta.events.register(
62 'needs-retry.%s' % service_event_name,
63 handler.needs_retry,
64 unique_id=unique_id,
65 )
66 return handler
69class RetryHandler:
70 """Bridge between botocore's event system and this module.
72 This class is intended to be hooked to botocore's event system
73 as an event handler.
74 """
76 def __init__(self, retry_policy, retry_event_adapter, retry_quota):
77 self._retry_policy = retry_policy
78 self._retry_event_adapter = retry_event_adapter
79 self._retry_quota = retry_quota
81 def needs_retry(self, **kwargs):
82 """Connect as a handler to the needs-retry event."""
83 retry_delay = None
84 context = self._retry_event_adapter.create_retry_context(**kwargs)
85 if self._retry_policy.should_retry(context):
86 # Before we can retry we need to ensure we have sufficient
87 # capacity in our retry quota.
88 if self._retry_quota.acquire_retry_quota(context):
89 retry_delay = self._retry_policy.compute_retry_delay(context)
90 logger.debug(
91 "Retry needed, retrying request after delay of: %s",
92 retry_delay,
93 )
94 else:
95 logger.debug(
96 "Retry needed but retry quota reached, "
97 "not retrying request."
98 )
99 else:
100 logger.debug("Not retrying request.")
101 self._retry_event_adapter.adapt_retry_response_from_context(context)
102 return retry_delay
105class RetryEventAdapter:
106 """Adapter to existing retry interface used in the endpoints layer.
108 This existing interface for determining if a retry needs to happen
109 is event based and used in ``botocore.endpoint``. The interface has
110 grown organically over the years and could use some cleanup. This
111 adapter converts that interface into the interface used by the
112 new retry strategies.
114 """
116 def create_retry_context(self, **kwargs):
117 """Create context based on needs-retry kwargs."""
118 response = kwargs['response']
119 if response is None:
120 # If response is None it means that an exception was raised
121 # because we never received a response from the service. This
122 # could be something like a ConnectionError we get from our
123 # http layer.
124 http_response = None
125 parsed_response = None
126 else:
127 http_response, parsed_response = response
128 # This provides isolation between the kwargs emitted in the
129 # needs-retry event, and what this module uses to check for
130 # retries.
131 context = RetryContext(
132 attempt_number=kwargs['attempts'],
133 operation_model=kwargs['operation'],
134 http_response=http_response,
135 parsed_response=parsed_response,
136 caught_exception=kwargs['caught_exception'],
137 request_context=kwargs['request_dict']['context'],
138 )
139 return context
141 def adapt_retry_response_from_context(self, context):
142 """Modify response back to user back from context."""
143 # This will mutate attributes that are returned back to the end
144 # user. We do it this way so that all the various retry classes
145 # don't mutate any input parameters from the needs-retry event.
146 metadata = context.get_retry_metadata()
147 if context.parsed_response is not None:
148 context.parsed_response.setdefault('ResponseMetadata', {}).update(
149 metadata
150 )
153# Implementation note: this is meant to encapsulate all the misc stuff
154# that gets sent in the needs-retry event. This is mapped so that params
155# are more clear and explicit.
156class RetryContext:
157 """Normalize a response that we use to check if a retry should occur.
159 This class smoothes over the different types of responses we may get
160 from a service including:
162 * A modeled error response from the service that contains a service
163 code and error message.
164 * A raw HTTP response that doesn't contain service protocol specific
165 error keys.
166 * An exception received while attempting to retrieve a response.
167 This could be a ConnectionError we receive from our HTTP layer which
168 could represent that we weren't able to receive a response from
169 the service.
171 This class guarantees that at least one of the above attributes will be
172 non None.
174 This class is meant to provide a read-only view into the properties
175 associated with a possible retryable response. None of the properties
176 are meant to be modified directly.
178 """
180 def __init__(
181 self,
182 attempt_number,
183 operation_model=None,
184 parsed_response=None,
185 http_response=None,
186 caught_exception=None,
187 request_context=None,
188 ):
189 # 1-based attempt number.
190 self.attempt_number = attempt_number
191 self.operation_model = operation_model
192 # This is the parsed response dictionary we get from parsing
193 # the HTTP response from the service.
194 self.parsed_response = parsed_response
195 # This is an instance of botocore.awsrequest.AWSResponse.
196 self.http_response = http_response
197 # This is a subclass of Exception that will be non None if
198 # an exception was raised when retrying to retrieve a response.
199 self.caught_exception = caught_exception
200 # This is the request context dictionary that's added to the
201 # request dict. This is used to story any additional state
202 # about the request. We use this for storing retry quota
203 # capacity.
204 if request_context is None:
205 request_context = {}
206 self.request_context = request_context
207 self._retry_metadata = {}
209 # These are misc helper methods to avoid duplication in the various
210 # checkers.
211 def get_error_code(self):
212 """Check if there was a parsed response with an error code.
214 If we could not find any error codes, ``None`` is returned.
216 """
217 if self.parsed_response is None:
218 return
219 error = self.parsed_response.get('Error', {})
220 if not isinstance(error, dict):
221 return
222 return error.get('Code')
224 def add_retry_metadata(self, **kwargs):
225 """Add key/value pairs to the retry metadata.
227 This allows any objects during the retry process to add
228 metadata about any checks/validations that happened.
230 This gets added to the response metadata in the retry handler.
232 """
233 self._retry_metadata.update(**kwargs)
235 def get_retry_metadata(self):
236 return self._retry_metadata.copy()
239class RetryPolicy:
240 def __init__(self, retry_checker, retry_backoff):
241 self._retry_checker = retry_checker
242 self._retry_backoff = retry_backoff
244 def should_retry(self, context):
245 return self._retry_checker.is_retryable(context)
247 def compute_retry_delay(self, context):
248 return self._retry_backoff.delay_amount(context)
251class ExponentialBackoff(BaseRetryBackoff):
252 _BASE = 2
253 _MAX_BACKOFF = 20
255 def __init__(self, max_backoff=20, random=random.random):
256 self._base = self._BASE
257 self._max_backoff = max_backoff
258 self._random = random
260 def delay_amount(self, context):
261 """Calculates delay based on exponential backoff.
263 This class implements truncated binary exponential backoff
264 with jitter::
266 t_i = min(rand(0, 1) * 2 ** attempt, MAX_BACKOFF)
268 where ``i`` is the request attempt (0 based).
270 """
271 # The context.attempt_number is a 1-based value, but we have
272 # to calculate the delay based on i based a 0-based value. We
273 # want the first delay to just be ``rand(0, 1)``.
274 return min(
275 self._random() * (self._base ** (context.attempt_number - 1)),
276 self._max_backoff,
277 )
280class MaxAttemptsChecker(BaseRetryableChecker):
281 def __init__(self, max_attempts):
282 self._max_attempts = max_attempts
284 def is_retryable(self, context):
285 under_max_attempts = context.attempt_number < self._max_attempts
286 retries_context = context.request_context.get('retries')
287 if retries_context:
288 retries_context['max'] = max(
289 retries_context.get('max', 0), self._max_attempts
290 )
291 if not under_max_attempts:
292 logger.debug("Max attempts of %s reached.", self._max_attempts)
293 context.add_retry_metadata(MaxAttemptsReached=True)
294 return under_max_attempts
297class TransientRetryableChecker(BaseRetryableChecker):
298 _TRANSIENT_ERROR_CODES = [
299 'RequestTimeout',
300 'RequestTimeoutException',
301 'PriorRequestNotComplete',
302 ]
303 _TRANSIENT_STATUS_CODES = [500, 502, 503, 504]
304 _TRANSIENT_EXCEPTION_CLS = (
305 ConnectionError,
306 HTTPClientError,
307 )
309 def __init__(
310 self,
311 transient_error_codes=None,
312 transient_status_codes=None,
313 transient_exception_cls=None,
314 ):
315 if transient_error_codes is None:
316 transient_error_codes = self._TRANSIENT_ERROR_CODES[:]
317 if transient_status_codes is None:
318 transient_status_codes = self._TRANSIENT_STATUS_CODES[:]
319 if transient_exception_cls is None:
320 transient_exception_cls = self._TRANSIENT_EXCEPTION_CLS
321 self._transient_error_codes = transient_error_codes
322 self._transient_status_codes = transient_status_codes
323 self._transient_exception_cls = transient_exception_cls
325 def is_retryable(self, context):
326 if context.get_error_code() in self._transient_error_codes:
327 return True
328 if context.http_response is not None:
329 if (
330 context.http_response.status_code
331 in self._transient_status_codes
332 ):
333 return True
334 if context.caught_exception is not None:
335 return isinstance(
336 context.caught_exception, self._transient_exception_cls
337 )
338 return False
341class ThrottledRetryableChecker(BaseRetryableChecker):
342 # This is the union of all error codes we've seen that represent
343 # a throttled error.
344 _THROTTLED_ERROR_CODES = [
345 'Throttling',
346 'ThrottlingException',
347 'ThrottledException',
348 'RequestThrottledException',
349 'TooManyRequestsException',
350 'ProvisionedThroughputExceededException',
351 'TransactionInProgressException',
352 'RequestLimitExceeded',
353 'BandwidthLimitExceeded',
354 'LimitExceededException',
355 'RequestThrottled',
356 'SlowDown',
357 'PriorRequestNotComplete',
358 'EC2ThrottledException',
359 ]
361 def __init__(self, throttled_error_codes=None):
362 if throttled_error_codes is None:
363 throttled_error_codes = self._THROTTLED_ERROR_CODES[:]
364 self._throttled_error_codes = throttled_error_codes
366 def is_retryable(self, context):
367 # Only the error code from a parsed service response is used
368 # to determine if the response is a throttled response.
369 return context.get_error_code() in self._throttled_error_codes
372class ModeledRetryableChecker(BaseRetryableChecker):
373 """Check if an error has been modeled as retryable."""
375 def __init__(self):
376 self._error_detector = ModeledRetryErrorDetector()
378 def is_retryable(self, context):
379 error_code = context.get_error_code()
380 if error_code is None:
381 return False
382 return self._error_detector.detect_error_type(context) is not None
385class ModeledRetryErrorDetector:
386 """Checks whether or not an error is a modeled retryable error."""
388 # There are return values from the detect_error_type() method.
389 TRANSIENT_ERROR = 'TRANSIENT_ERROR'
390 THROTTLING_ERROR = 'THROTTLING_ERROR'
391 # This class is lower level than ModeledRetryableChecker, which
392 # implements BaseRetryableChecker. This object allows you to distinguish
393 # between the various types of retryable errors.
395 def detect_error_type(self, context):
396 """Detect the error type associated with an error code and model.
398 This will either return:
400 * ``self.TRANSIENT_ERROR`` - If the error is a transient error
401 * ``self.THROTTLING_ERROR`` - If the error is a throttling error
402 * ``None`` - If the error is neither type of error.
404 """
405 error_code = context.get_error_code()
406 op_model = context.operation_model
407 if op_model is None or not op_model.error_shapes:
408 return
409 for shape in op_model.error_shapes:
410 if shape.metadata.get('retryable') is not None:
411 # Check if this error code matches the shape. This can
412 # be either by name or by a modeled error code.
413 error_code_to_check = (
414 shape.metadata.get('error', {}).get('code') or shape.name
415 )
416 if error_code == error_code_to_check:
417 if shape.metadata['retryable'].get('throttling'):
418 return self.THROTTLING_ERROR
419 return self.TRANSIENT_ERROR
422class ThrottlingErrorDetector:
423 def __init__(self, retry_event_adapter):
424 self._modeled_error_detector = ModeledRetryErrorDetector()
425 self._fixed_error_code_detector = ThrottledRetryableChecker()
426 self._retry_event_adapter = retry_event_adapter
428 # This expects the kwargs from needs-retry to be passed through.
429 def is_throttling_error(self, **kwargs):
430 context = self._retry_event_adapter.create_retry_context(**kwargs)
431 if self._fixed_error_code_detector.is_retryable(context):
432 return True
433 error_type = self._modeled_error_detector.detect_error_type(context)
434 return error_type == self._modeled_error_detector.THROTTLING_ERROR
437class StandardRetryConditions(BaseRetryableChecker):
438 """Concrete class that implements the standard retry policy checks.
440 Specifically:
442 not max_attempts and (transient or throttled or modeled_retry)
444 """
446 def __init__(self, max_attempts=DEFAULT_MAX_ATTEMPTS):
447 # Note: This class is for convenience so you can have the
448 # standard retry condition in a single class.
449 self._max_attempts_checker = MaxAttemptsChecker(max_attempts)
450 self._additional_checkers = OrRetryChecker(
451 [
452 TransientRetryableChecker(),
453 ThrottledRetryableChecker(),
454 ModeledRetryableChecker(),
455 OrRetryChecker(
456 [
457 special.RetryIDPCommunicationError(),
458 special.RetryDDBChecksumError(),
459 ]
460 ),
461 ]
462 )
464 def is_retryable(self, context):
465 return self._max_attempts_checker.is_retryable(
466 context
467 ) and self._additional_checkers.is_retryable(context)
470class OrRetryChecker(BaseRetryableChecker):
471 def __init__(self, checkers):
472 self._checkers = checkers
474 def is_retryable(self, context):
475 return any(checker.is_retryable(context) for checker in self._checkers)
478class RetryQuotaChecker:
479 _RETRY_COST = 5
480 _NO_RETRY_INCREMENT = 1
481 _TIMEOUT_RETRY_REQUEST = 10
482 _TIMEOUT_EXCEPTIONS = (ConnectTimeoutError, ReadTimeoutError)
484 # Implementation note: We're not making this a BaseRetryableChecker
485 # because this isn't just a check if we can retry. This also changes
486 # state so we have to careful when/how we call this. Making it
487 # a BaseRetryableChecker implies you can call .is_retryable(context)
488 # as many times as you want and not affect anything.
490 def __init__(self, quota):
491 self._quota = quota
492 # This tracks the last amount
493 self._last_amount_acquired = None
495 def acquire_retry_quota(self, context):
496 if self._is_timeout_error(context):
497 capacity_amount = self._TIMEOUT_RETRY_REQUEST
498 else:
499 capacity_amount = self._RETRY_COST
500 success = self._quota.acquire(capacity_amount)
501 if success:
502 # We add the capacity amount to the request context so we know
503 # how much to release later. The capacity amount can vary based
504 # on the error.
505 context.request_context['retry_quota_capacity'] = capacity_amount
506 return True
507 context.add_retry_metadata(RetryQuotaReached=True)
508 return False
510 def _is_timeout_error(self, context):
511 return isinstance(context.caught_exception, self._TIMEOUT_EXCEPTIONS)
513 # This is intended to be hooked up to ``after-call``.
514 def release_retry_quota(self, context, http_response, **kwargs):
515 # There's three possible options.
516 # 1. The HTTP response did not have a 2xx response. In that case we
517 # give no quota back.
518 # 2. The HTTP request was successful and was never retried. In
519 # that case we give _NO_RETRY_INCREMENT back.
520 # 3. The API call had retries, and we eventually receive an HTTP
521 # response with a 2xx status code. In that case we give back
522 # whatever quota was associated with the last acquisition.
523 if http_response is None:
524 return
525 status_code = http_response.status_code
526 if 200 <= status_code < 300:
527 if 'retry_quota_capacity' not in context:
528 self._quota.release(self._NO_RETRY_INCREMENT)
529 else:
530 capacity_amount = context['retry_quota_capacity']
531 self._quota.release(capacity_amount)