Added list_object_max_keys option based on #783 PR

This commit is contained in:
Takeshi Nakatani 2018-07-08 03:49:10 +00:00
parent c2ca7e43b6
commit 7e0c53dfe9
4 changed files with 23 additions and 6 deletions

View File

@ -62,7 +62,7 @@ the default canned acl to apply to all written s3 objects, e.g., "private", "pub
empty string means do not send header.
see http://aws.amazon.com/documentation/s3/ for the full list of canned acls.
.TP
\fB\-o\fR retries (default="2")
\fB\-o\fR retries (default="5")
number of times to retry a failed S3 transaction.
.TP
\fB\-o\fR use_cache (default="" which means disabled)
@ -143,7 +143,10 @@ time to wait for connection before giving up.
\fB\-o\fR readwrite_timeout (default="60" seconds)
time to wait between read/write activity before giving up.
.TP
\fB\-o\fR max_stat_cache_size (default="1000" entries (about 4MB))
\fB\-o\fR list_object_max_keys (default="1000")
specify the maximum number of keys returned by S3 list object API. The default is 1000. you can set this value to 1000 or more.
.TP
\fB\-o\fR max_stat_cache_size (default="100,000" entries (about 40MB))
maximum number of entries in the stat cache
.TP
\fB\-o\fR stat_cache_expire (default is no expire)

View File

@ -345,7 +345,7 @@ bool S3fsCurl::is_dns_cache = true; // default
bool S3fsCurl::is_ssl_session_cache= true; // default
long S3fsCurl::connect_timeout = 300; // default
time_t S3fsCurl::readwrite_timeout = 60; // default
int S3fsCurl::retries = 3; // default
int S3fsCurl::retries = 5; // default
bool S3fsCurl::is_public_bucket = false;
string S3fsCurl::default_acl = "private";
storage_class_t S3fsCurl::storage_class = STANDARD;

View File

@ -136,6 +136,7 @@ static int64_t singlepart_copy_limit = FIVE_GB;
static bool is_specified_endpoint = false;
static int s3fs_init_deferred_exit_status = 0;
static bool support_compat_dir = true;// default supports compatibility directory type
static int max_keys_list_object = 1000;// default is 1000
static const std::string allbucket_fields_type = ""; // special key for mapping(This name is absolutely not used as a bucket name)
static const std::string keyval_fields_type = "\t"; // special key for mapping(This name is absolutely not used as a bucket name)
@ -2494,7 +2495,7 @@ static int list_bucket(const char* path, S3ObjList& head, const char* delimiter,
// For dir with children, expect "dir/" and "dir/child"
query_maxkey += "max-keys=2";
}else{
query_maxkey += "max-keys=1000";
query_maxkey += "max-keys=" + str(max_keys_list_object);
}
while(truncated){
@ -4610,6 +4611,15 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar
S3fsCurl::SetReadwriteTimeout(rwtimeout);
return 0;
}
if(0 == strcmp(arg, "list_object_max_keys")){
int max_keys = static_cast<int>(s3fs_strtoofft(strchr(arg, '=') + sizeof(char)));
if(max_keys < 1000){
S3FS_PRN_EXIT("argument should be over 1000: list_object_max_keys");
return -1;
}
max_keys_list_object = max_keys;
return 0;
}
if(0 == STR2NCMP(arg, "max_stat_cache_size=")){
unsigned long cache_size = static_cast<unsigned long>(s3fs_strtoofft(strchr(arg, '=') + sizeof(char)));
StatCache::getStatCacheData()->SetCacheSize(cache_size);

View File

@ -1008,7 +1008,7 @@ void show_help (void)
" header. see http://aws.amazon.com/documentation/s3/ for the\n"
" full list of canned acls\n"
"\n"
" retries (default=\"2\")\n"
" retries (default=\"5\")\n"
" - number of times to retry a failed s3 transaction\n"
"\n"
" use_cache (default=\"\" which means disabled)\n"
@ -1110,7 +1110,11 @@ void show_help (void)
" readwrite_timeout (default=\"60\" seconds)\n"
" - time to wait between read/write activity before giving up\n"
"\n"
" max_stat_cache_size (default=\"1000\" entries (about 4MB))\n"
" list_object_max_keys (default=\"1000\")\n"
" - specify the maximum number of keys returned by S3 list object\n"
" API. The default is 1000. you can set this value to 1000 or more.\n"
"\n"
" max_stat_cache_size (default=\"100,000\" entries (about 40MB))\n"
" - maximum number of entries in the stat cache\n"
"\n"
" stat_cache_expire (default is no expire)\n"