From febaf6849f78f39fd9c37b0ea21304ea9c9b4d2f Mon Sep 17 00:00:00 2001 From: Andrew Gaul Date: Fri, 5 Jul 2019 12:07:31 -0700 Subject: [PATCH] Issue multipart when object size exceeds part size Previously s3fs issued multipart uploads when the object size was twice the part size. Conjoining this with the part size was confusing and s3fs should add a separate tunable for this if needed, similar to singlepart_copy_limit. Fixes #1058. --- src/fdcache.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/fdcache.cpp b/src/fdcache.cpp index 1eb7938..e1715c3 100644 --- a/src/fdcache.cpp +++ b/src/fdcache.cpp @@ -1194,7 +1194,7 @@ int FdEntity::Load(off_t start, off_t size) off_t over_size = iter->bytes - need_load_size; // download - if(2 * S3fsCurl::GetMultipartSize() <= need_load_size && !nomultipart){ // default 20MB + if(S3fsCurl::GetMultipartSize() <= need_load_size && !nomultipart){ // parallel request // Additional time is needed for large files time_t backup = 0; @@ -1544,7 +1544,7 @@ int FdEntity::RowFlush(const char* tpath, bool force_sync) S3FS_PRN_ERR("fstat is failed by errno(%d), but continue...", errno); } - if(pagelist.Size() >= 2 * S3fsCurl::GetMultipartSize() && !nomultipart){ // default 20MB + if(pagelist.Size() >= S3fsCurl::GetMultipartSize() && !nomultipart){ // Additional time is needed for large files time_t backup = 0; if(120 > S3fsCurl::GetReadwriteTimeout()){