Merge pull request #1103 from gaul/config/readwrite-timeout

Use consistent default for readwrite_timeout
This commit is contained in:
Takeshi Nakatani 2019-07-17 22:37:02 +09:00 committed by GitHub
commit 3ad1c95e86
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 3 additions and 19 deletions

View File

@ -150,7 +150,7 @@ S3 does not allow copy object api for anonymous users, then s3fs sets nocopyapi
\fB\-o\fR connect_timeout (default="300" seconds)
time to wait for connection before giving up.
.TP
\fB\-o\fR readwrite_timeout (default="60" seconds)
\fB\-o\fR readwrite_timeout (default="120" seconds)
time to wait between read/write activity before giving up.
.TP
\fB\-o\fR list_object_max_keys (default="1000")

View File

@ -354,7 +354,7 @@ bool S3fsCurl::is_cert_check = true; // default
bool S3fsCurl::is_dns_cache = true; // default
bool S3fsCurl::is_ssl_session_cache= true; // default
long S3fsCurl::connect_timeout = 300; // default
time_t S3fsCurl::readwrite_timeout = 60; // default
time_t S3fsCurl::readwrite_timeout = 120; // default
int S3fsCurl::retries = 5; // default
bool S3fsCurl::is_public_bucket = false;
string S3fsCurl::default_acl = "private";

View File

@ -1213,15 +1213,7 @@ int FdEntity::Load(off_t start, off_t size, bool lock_already_held)
// download
if(S3fsCurl::GetMultipartSize() <= need_load_size && !nomultipart){
// parallel request
// Additional time is needed for large files
time_t backup = 0;
if(120 > S3fsCurl::GetReadwriteTimeout()){
backup = S3fsCurl::SetReadwriteTimeout(120);
}
result = S3fsCurl::ParallelGetObjectRequest(path.c_str(), fd, iter->offset, need_load_size);
if(0 != backup){
S3fsCurl::SetReadwriteTimeout(backup);
}
}else{
// single request
if(0 < need_load_size){
@ -1562,15 +1554,7 @@ int FdEntity::RowFlush(const char* tpath, bool force_sync)
}
if(pagelist.Size() >= S3fsCurl::GetMultipartSize() && !nomultipart){
// Additional time is needed for large files
time_t backup = 0;
if(120 > S3fsCurl::GetReadwriteTimeout()){
backup = S3fsCurl::SetReadwriteTimeout(120);
}
result = S3fsCurl::ParallelMultipartUploadRequest(tpath ? tpath : path.c_str(), orgmeta, fd);
if(0 != backup){
S3fsCurl::SetReadwriteTimeout(backup);
}
}else{
S3fsCurl s3fscurl(true);
result = s3fscurl.PutRequest(tpath ? tpath : path.c_str(), orgmeta, fd);

View File

@ -1194,7 +1194,7 @@ void show_help ()
" connect_timeout (default=\"300\" seconds)\n"
" - time to wait for connection before giving up\n"
"\n"
" readwrite_timeout (default=\"60\" seconds)\n"
" readwrite_timeout (default=\"120\" seconds)\n"
" - time to wait between read/write activity before giving up\n"
"\n"
" list_object_max_keys (default=\"1000\")\n"