Open
Description
Describe the bug
Can someone explain how this retry strategy works, i configured timeout configs to 100ms and retry strategy to EXPONENTIAL_BACKOFF. But still i am seeing requests are not getting cancelled and time to get object is more than 1 second.
sample code snippet.
Aws::S3Crt::ClientConfiguration config;
config.httpRequestTimeoutMs = 100;
config.connectTimeoutMs = 100;
config.requestTimeoutMs = 100;
config.crtRetryStrategyConfig.config.maxRetries = 1;
config.crtRetryStrategyConfig.config.scaleFactorMs = 10;
config.crtRetryStrategyConfig.config.maxBackoffSecs = 1;
config.crtRetryStrategyConfig.crtRetryStrategyType = Aws::S3Crt::S3CrtClientConfiguration::CrtRetryStrategyConfig::CrtRetryStrategyType::EXPONENTIAL_BACKOFF;
config.throughputTargetGbps = 1;
config.partSize = 10*1024*1024;
s3_crt_client = Aws::New<Aws::S3Crt::S3CrtClient>("test", config);
Regression Issue
- Select this option if this issue appears to be a regression.
Expected Behavior
With one retry and configure timeout GetObject should exit once timeout is reached.
Current Behavior
GetObject is not exiting even timeout is reached.
Reproduction Steps
Exponential backoff strategy
#include <fcntl.h>
#include <unistd.h>
#include <aws/core/Aws.h>
#include <aws/s3-crt/S3CrtClient.h>
#include <aws/s3-crt/model/HeadObjectRequest.h>
#include <aws/s3-crt/model/GetObjectRequest.h>
#include <aws/s3-crt/S3CrtErrors.h>
void usage(const char *bin)
{
printf("USAGE: %s <s3_uri> <offset in bytes> <read size(0->till end)> <part_size(in bytes)> <out_file(optional)>\n", bin);
}
uint64_t get_curr_time_ms(void)
{
struct timespec ts;
uint64_t ts_64;
clock_gettime(CLOCK_MONOTONIC, &ts);
ts_64 = (uint64_t)((ts.tv_sec * 1000) + (ts.tv_nsec/1000000));
return ts_64;
}
int main (int argc, char* argv[])
{
char *s3_uri;
uint64_t offset;
uint64_t read_size;
uint64_t part_size;
char *buf = NULL;
uint64_t read_left;
uint64_t to_read;
uint64_t ret;
uint64_t iter = 0;
char *out_file = NULL;
int out_fd = -1;
if(argc < 5 || argc > 6)
{
usage(argv[0]);
return 0;
}
s3_uri = argv[1];
offset = strtoull(argv[2], NULL, 10);
read_size = strtoull(argv[3], NULL, 10);
if(read_size == 0) read_size = UINT64_MAX;
part_size = strtoull(argv[4], NULL, 10);
if(argc == 6) out_file = argv[5];
Aws::S3Crt::S3CrtClient *s3_crt_client;
Aws::S3Crt::Model::GetObjectRequest object_request;
Aws::SDKOptions options;
Aws::InitAPI(options);
Aws::S3Crt::ClientConfiguration config;
config.crtRetryStrategyConfig.config.maxRetries = 1;
config.crtRetryStrategyConfig.config.scaleFactorMs = 10;
config.crtRetryStrategyConfig.config.maxBackoffSecs = 1;
config.crtRetryStrategyConfig.crtRetryStrategyType = Aws::S3Crt::S3CrtClientConfiguration::CrtRetryStrategyConfig::CrtRetryStrategyType::EXPONENTIAL_BACKOFF;
config.httpRequestTimeoutMs = 100;
config.connectTimeoutMs = 100;
config.requestTimeoutMs = 100;
config.throughputTargetGbps = 1;
config.partSize = 10*1024*1024;
s3_crt_client = Aws::New<Aws::S3Crt::S3CrtClient>("test", config);
char bucket[128] = {0};
char key[128] = {0};
if(sscanf(s3_uri, "s3://%[^/]/%s", bucket, key) == 2)
{
Aws::S3Crt::Model::HeadObjectRequest head_object_request;
Aws::S3Crt::Model::HeadObjectOutcome outcome;
head_object_request.SetBucket(bucket);
head_object_request.SetKey(key);
outcome = s3_crt_client->HeadObject(head_object_request);
if(outcome.IsSuccess())
printf("Object found, size:%lu, etag %s\n", outcome.GetResult().GetContentLength(), outcome.GetResult().GetETag().c_str());
object_request.SetBucket(bucket);
object_request.SetKey(key);
}
if(out_file)
{
out_fd = open(out_file, O_CREAT | O_WRONLY);
if(out_fd <= 2)
{
printf("Error in opening %s in write mode", out_file);
return 0;
}
}
int64_t pos = 0;
buf = new char[part_size];
read_left = read_size;
while(read_left != 0)
{
ret = 0;
int64_t retry_count = -1;
to_read = part_size <= read_left ? part_size : read_left;
iter ++;
uint64_t start,end;
Aws::S3Crt::Model::GetObjectOutcome outcome;
object_request.SetRange(std::string("bytes=") + std::to_string(pos) + "-" + std::to_string(pos+to_read-1));
object_request.SetResponseStreamFactory(
[buf, to_read]()
{
std::unique_ptr<Aws::StringStream>
stream(Aws::New<Aws::StringStream>("test"));
stream->rdbuf()->pubsetbuf(static_cast<char*>(buf),
to_read);
return stream.release();
});
start = get_curr_time_ms();
outcome = s3_crt_client->GetObject(object_request);
end = get_curr_time_ms();
printf("Time taken for GetObject %lums\n",end-start);
if(outcome.IsSuccess())
{
ret = outcome.GetResult().GetContentLength();
if(ret <= to_read)
{
pos += ret;
//log_info("Testing data %02x %02x", ((uint8_t*)buf)[0], ((uint8_t*)buf)[1]);
}
else
{
printf("This should never happen, pos_:%lu, requested:%lu, read:%lu\n",
pos, to_read, ret);
break;
}
}
else
{
printf("Failed to read, pos_:%lu, requested:%lu, error:%s, error_type:%d, error_code:%d\n",
pos, to_read, outcome.GetError().GetMessage().c_str(), outcome.GetError().GetErrorType(), outcome.GetError().GetResponseCode()) ;
break;
}
if(ret == 0)
{
printf("Nothing read, read_left:%lubytes, iter:%lu\n", read_left, iter);
break;
}
else if(ret < to_read)
{
printf("Read %ldbytes instead of %ldbytes, iter:%lu\n", ret, to_read, iter);
}
else
{
printf("Read %ld bytes, iter:%lu\n", ret, iter);
}
read_left -= ret;
if(ret > 0 && out_fd > 2)
{
if(write(out_fd, buf, ret) != ret)
{
printf("Error in write!!! (%lu bytes)\n", ret);
return 0;
}
fdatasync(out_fd);
}
}
if(s3_crt_client)
{
Aws::Delete(s3_crt_client);
s3_crt_client = NULL;
}
Aws::ShutdownAPI(options);
if(buf) delete[] buf;
if(out_fd > 2) close(out_fd);
return 0;
}
Possible Solution
No response
Additional Information/Context
Can you please tell how exactly retry and timeouts works, we need to terminate out s3crt clients GetObject call if it is taking more time. We dont want our systems to get struck.
AWS CPP SDK version used
1.11.559
Compiler and Version used
12.2.0
Operating System and version
Debian version 12