def _register_retries(self, service_model):
endpoint_prefix = service_model.endpoint_prefix
# First, we load the entire retry config for all services,
# then pull out just the information we need.
original_config = self._loader.load_data('_retry')
if not original_config:
return
retry_config = self._retry_config_translator.build_retry_config(
endpoint_prefix, original_config.get('retry', {}),
original_config.get('definitions', {}))
logger.debug("Registering retry handlers for service: %s",
service_model.service_name)
handler = self._retry_handler_factory.create_retry_handler(
retry_config, endpoint_prefix)
unique_id = 'retry-config-%s' % endpoint_prefix
self._event_emitter.register('needs-retry.%s' % endpoint_prefix,
handler, unique_id=unique_id)
def load_data(self, name):
"""Load data given a data path.
This is a low level method that will search through the various
search paths until it's able to load a value. This is typically
only needed to load *non* model files (such as _endpoints and
_retry). If you need to load model files, you should prefer
``load_service_model``.
:type name: str
:param name: The data path, i.e ``ec2/2015-03-01/service-2``.
:return: The loaded data. If no data could be found then
a DataNotFoundError is raised.
"""
for possible_path in self._potential_locations(name):
found = self.file_loader.load_file(possible_path)
if found is not None:
return found
# We didn't find anything that matched on any path.
raise DataNotFoundError(data_path=name)
def create_retry_action_from_config(config, operation_name=None):
# The spec has the possibility of supporting per policy
# actions, but right now, we assume this comes from the
# default section, which means that delay functions apply
# for every policy in the retry config (per service).
delay_config = config['__default__']['delay']
if delay_config['type'] == 'exponential':
return create_exponential_delay_function(
base=delay_config['base'],
growth_factor=delay_config['growth_factor'])
def create_exponential_delay_function(base, growth_factor):
"""Create an exponential delay function based on the attempts.
This is used so that you only have to pass it the attempts
parameter to calculate the delay.
"""
return functools.partial(
delay_exponential, base=base, growth_factor=growth_factor)
デフォルトでは、rand を base としているため、base = random.random() で取得した base の値から、リトライの時間間隔を base * (growth_factor ** (attempts - 1)) で計算する。
def delay_exponential(base, growth_factor, attempts):
"""Calculate time to sleep based on exponential function.
The format is::
base * growth_factor ^ (attempts - 1)
If ``base`` is set to 'rand' then a random number between
0 and 1 will be used as the base.
Base must be greater than 0, otherwise a ValueError will be
raised.
"""
if base == 'rand':
base = random.random()
elif base <= 0:
raise ValueError("The 'base' param must be greater than 0, "
"got: %s" % base)
time_to_sleep = base * (growth_factor ** (attempts - 1))
return time_to_sleep
public class PredefinedRetryPolicies {
(省略)
/* SDK default */
/** SDK default max retry count **/
public static final int DEFAULT_MAX_ERROR_RETRY = 3;
/**
* SDK default retry policy (except for AmazonDynamoDBClient,
* whose constructor will replace the DEFAULT with DYNAMODB_DEFAULT.)
*/
public static final RetryPolicy DEFAULT;
/**
* The SDK default retry condition, which checks for various conditions in
* the following order:
**Never retry on requests with non-repeatable content; *
*Retry on client exceptions caused by IOException; *
*Retry on service exceptions that are either 500 internal server * errors, 503 service unavailable errors, service throttling errors or * clock skew errors. *
*/
public static final RetryPolicy.RetryCondition DEFAULT_RETRY_CONDITION = new SDKDefaultRetryCondition();
/**
* The SDK default back-off strategy, which increases exponentially up to a max amount of delay. It also applies a larger
* scale factor upon service throttling exception.
*/
public static final RetryPolicy.BackoffStrategy DEFAULT_BACKOFF_STRATEGY =
new PredefinedBackoffStrategies.SDKDefaultBackoffStrategy();
/**
* Returns the SDK default retry policy. This policy will honor the
* maxErrorRetry set in ClientConfiguration.
*
* @see ClientConfiguration#setMaxErrorRetry(int)
*/
public static RetryPolicy getDefaultRetryPolicy() {
return new RetryPolicy(DEFAULT_RETRY_CONDITION,
DEFAULT_BACKOFF_STRATEGY,
DEFAULT_MAX_ERROR_RETRY,
true);
}
@NotThreadSafe public class ClientConfiguration {
/** The default timeout for creating new connections. */
public static final int DEFAULT_CONNECTION_TIMEOUT = 10 * 1000;
/** The default timeout for reading from a connected socket. */
public static final int DEFAULT_SOCKET_TIMEOUT = 50 * 1000;
/**
* The default timeout for a request. This is disabled by default.
*/
public static final int DEFAULT_REQUEST_TIMEOUT = 0;
/**
* The default timeout for a request. This is disabled by default.
*/
public static final int DEFAULT_CLIENT_EXECUTION_TIMEOUT = 0;
/** The default max connection pool size. */
public static final int DEFAULT_MAX_CONNECTIONS = 50;
@Override
public boolean shouldRetry(AmazonWebServiceRequest originalRequest,
AmazonClientException exception,
int retriesAttempted) {
// Always retry on client exceptions caused by IOException
if (exception.getCause() instanceof IOException) return true;
// Only retry on a subset of service exceptions
if (exception instanceof AmazonServiceException) {
AmazonServiceException ase = (AmazonServiceException)exception;
/*
* For 500 internal server errors and 503 service
* unavailable errors, we want to retry, but we need to use
* an exponential back-off strategy so that we don't overload
* a server with a flood of retries.
*/
if (RetryUtils.isRetryableServiceException(ase)) return true;
/*
* Throttling is reported as a 400 error from newer services. To try
* and smooth out an occasional throttling error, we'll pause and
* retry, hoping that the pause is long enough for the request to
* get through the next time.
*/
if (RetryUtils.isThrottlingException(ase)) return true;
/*
* Clock skew exception. If it is then we will get the time offset
* between the device time and the server time to set the clock skew
* and then retry the request.
*/
if (RetryUtils.isClockSkewError(ase)) return true;
}
return false;
}
/**
* A private class that implements the default back-off strategy.
**/
static class SDKDefaultBackoffStrategy extends V2CompatibleBackoffStrategyAdapter {
private final BackoffStrategy fullJitterBackoffStrategy;
private final BackoffStrategy equalJitterBackoffStrategy;
SDKDefaultBackoffStrategy() {
fullJitterBackoffStrategy = new PredefinedBackoffStrategies.FullJitterBackoffStrategy(
SDK_DEFAULT_BASE_DELAY, SDK_DEFAULT_MAX_BACKOFF_IN_MILLISECONDS);
equalJitterBackoffStrategy = new PredefinedBackoffStrategies.EqualJitterBackoffStrategy(
SDK_DEFAULT_THROTTLED_BASE_DELAY, SDK_DEFAULT_MAX_BACKOFF_IN_MILLISECONDS);
}
SDKDefaultBackoffStrategy(final int baseDelay, final int throttledBaseDelay, final int maxBackoff) {
fullJitterBackoffStrategy = new PredefinedBackoffStrategies.FullJitterBackoffStrategy(
baseDelay, maxBackoff);
equalJitterBackoffStrategy = new PredefinedBackoffStrategies.EqualJitterBackoffStrategy(
throttledBaseDelay, maxBackoff);
}
@Override
public long computeDelayBeforeNextRetry(RetryPolicyContext context) {
/*
* We use the full jitter scheme for non-throttled exceptions and the
* equal jitter scheme for throttled exceptions. This gives a preference
* to quicker response and larger retry distribution for service errors
* and guarantees a minimum delay for throttled exceptions.
*/
if (RetryUtils.isThrottlingException(context.exception())) {
return equalJitterBackoffStrategy.computeDelayBeforeNextRetry(context);
} else {
return fullJitterBackoffStrategy.computeDelayBeforeNextRetry(context);
}
}
}
/**
* How many times a failed request should be retried before giving up.
* the defaultRetryCount can be overriden by service classes.
*
* @api private
*/
numRetries: function numRetries() {
if (this.config.maxRetries !== undefined) {
return this.config.maxRetries;
} else {
return this.defaultRetryCount;
}
},
export interface RetryDelayOptions {
/**
* The base number of milliseconds to use in the exponential backoff for operation retries.
* Defaults to 100 ms.
*/
base?: number
/**
* A custom function that accepts a retry count and returns the amount of time to delay in milliseconds.
* The base option will be ignored if this option is supplied.
*/
customBackoff?: (retryCount: number) => number
}