mirror of
https://github.com/s3fs-fuse/s3fs-fuse.git
synced 2025-01-24 14:28:24 +00:00
Remove limit for single-part objects
AWS enforces a 5 GB limit for single-part objects but other S3 implementations like GCS support larger objects. Fixes #1542.
This commit is contained in:
parent
4b53d4bf6b
commit
67b9381825
@ -726,12 +726,7 @@ int put_headers(const char* path, headers_t& meta, bool is_copy, bool update_mti
|
|||||||
// get_object_attribute() returns error with initializing buf.
|
// get_object_attribute() returns error with initializing buf.
|
||||||
(void)get_object_attribute(path, &buf);
|
(void)get_object_attribute(path, &buf);
|
||||||
|
|
||||||
if(buf.st_size >= FIVE_GB){
|
if(buf.st_size >= FIVE_GB && !nocopyapi && !nomultipart){
|
||||||
// multipart
|
|
||||||
if(nocopyapi || nomultipart){
|
|
||||||
S3FS_PRN_WARN("Metadata update failed because the file is larger than 5GB and the options nocopyapi or nomultipart are set: [path=%s]", path);
|
|
||||||
return -EFBIG; // File too large
|
|
||||||
}
|
|
||||||
if(0 != (result = s3fscurl.MultipartHeadRequest(path, buf.st_size, meta, is_copy))){
|
if(0 != (result = s3fscurl.MultipartHeadRequest(path, buf.st_size, meta, is_copy))){
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user