Remove limit for single-part objects

AWS enforces a 5 GB limit for single-part objects but other S3
implementations like GCS support larger objects.  Fixes #1542.
This commit is contained in:
Andrew Gaul 2021-02-03 22:27:50 +09:00
parent 4b53d4bf6b
commit 67b9381825

View File

@ -726,12 +726,7 @@ int put_headers(const char* path, headers_t& meta, bool is_copy, bool update_mti
// get_object_attribute() returns error with initializing buf.
(void)get_object_attribute(path, &buf);
if(buf.st_size >= FIVE_GB){
// multipart
if(nocopyapi || nomultipart){
S3FS_PRN_WARN("Metadata update failed because the file is larger than 5GB and the options nocopyapi or nomultipart are set: [path=%s]", path);
return -EFBIG; // File too large
}
if(buf.st_size >= FIVE_GB && !nocopyapi && !nomultipart){
if(0 != (result = s3fscurl.MultipartHeadRequest(path, buf.st_size, meta, is_copy))){
return result;
}