Issue multipart when object size exceeds part size

Previously s3fs issued multipart uploads when the object size was
twice the part size.  Conjoining this with the part size was confusing
and s3fs should add a separate tunable for this if needed, similar to
singlepart_copy_limit.  Fixes #1058.
This commit is contained in:
Andrew Gaul 2019-07-05 12:07:31 -07:00
parent 21321a9d96
commit febaf6849f

View File

@ -1194,7 +1194,7 @@ int FdEntity::Load(off_t start, off_t size)
off_t over_size = iter->bytes - need_load_size; off_t over_size = iter->bytes - need_load_size;
// download // download
if(2 * S3fsCurl::GetMultipartSize() <= need_load_size && !nomultipart){ // default 20MB if(S3fsCurl::GetMultipartSize() <= need_load_size && !nomultipart){
// parallel request // parallel request
// Additional time is needed for large files // Additional time is needed for large files
time_t backup = 0; time_t backup = 0;
@ -1544,7 +1544,7 @@ int FdEntity::RowFlush(const char* tpath, bool force_sync)
S3FS_PRN_ERR("fstat is failed by errno(%d), but continue...", errno); S3FS_PRN_ERR("fstat is failed by errno(%d), but continue...", errno);
} }
if(pagelist.Size() >= 2 * S3fsCurl::GetMultipartSize() && !nomultipart){ // default 20MB if(pagelist.Size() >= S3fsCurl::GetMultipartSize() && !nomultipart){
// Additional time is needed for large files // Additional time is needed for large files
time_t backup = 0; time_t backup = 0;
if(120 > S3fsCurl::GetReadwriteTimeout()){ if(120 > S3fsCurl::GetReadwriteTimeout()){