mirror of
https://github.com/s3fs-fuse/s3fs-fuse.git
synced 2024-11-08 13:54:10 +00:00
Fix typos (#2473)
This commit is contained in:
parent
254d717a4a
commit
fa807a56fb
2
.github/workflows/ci.yml
vendored
2
.github/workflows/ci.yml
vendored
@ -141,7 +141,7 @@ jobs:
|
|||||||
# This product(package) is a workaround for osxfuse which required an OS reboot(macos 11 and later).
|
# This product(package) is a workaround for osxfuse which required an OS reboot(macos 11 and later).
|
||||||
# see. https://github.com/macos-fuse-t/fuse-t
|
# see. https://github.com/macos-fuse-t/fuse-t
|
||||||
# About osxfuse
|
# About osxfuse
|
||||||
# This job doesn't work with Github Actions using macOS 11+ because "load_osxfuse" returns
|
# This job doesn't work with GitHub Actions using macOS 11+ because "load_osxfuse" returns
|
||||||
# "exit code = 1".(requires OS reboot)
|
# "exit code = 1".(requires OS reboot)
|
||||||
#
|
#
|
||||||
macos12:
|
macos12:
|
||||||
|
@ -42,7 +42,7 @@ Keep in mind using the pre-built packages when available.
|
|||||||
./configure
|
./configure
|
||||||
```
|
```
|
||||||
Depending on the TLS library (OpenSSL/GnuTLS/NSS), add `--with-openssl`, `--with-gnutls` or `--with-nss` when executing `configure`. (If omitted, it is equivalent to `--with-openssl`.)
|
Depending on the TLS library (OpenSSL/GnuTLS/NSS), add `--with-openssl`, `--with-gnutls` or `--with-nss` when executing `configure`. (If omitted, it is equivalent to `--with-openssl`.)
|
||||||
3. Bulding:
|
3. Building:
|
||||||
```sh
|
```sh
|
||||||
make
|
make
|
||||||
```
|
```
|
||||||
|
@ -535,7 +535,7 @@ issue #3 - Fixed local timezone was incorrectly being applied to IAM and Last-Mo
|
|||||||
issue #4 - Fix compilation error on MacOSX with missing const
|
issue #4 - Fix compilation error on MacOSX with missing const
|
||||||
|
|
||||||
Version 1.74 -- Nov 24, 2013
|
Version 1.74 -- Nov 24, 2013
|
||||||
This version is initial version on Github, same as on GoogleCodes(s3fs).
|
This version is initial version on GitHub, same as on GoogleCodes(s3fs).
|
||||||
https://github.com/s3fs-fuse/s3fs-fuse/releases/tag/v1.74
|
https://github.com/s3fs-fuse/s3fs-fuse/releases/tag/v1.74
|
||||||
see more detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.74.tar.gz
|
see more detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.74.tar.gz
|
||||||
|
|
||||||
|
@ -298,7 +298,7 @@ Note that this option is still experimental and may change in the future.
|
|||||||
.TP
|
.TP
|
||||||
\fB\-o\fR max_thread_count (default is "5")
|
\fB\-o\fR max_thread_count (default is "5")
|
||||||
Specifies the number of threads waiting for stream uploads.
|
Specifies the number of threads waiting for stream uploads.
|
||||||
Note that this option and Streamm Upload are still experimental and subject to change in the future.
|
Note that this option and Stream Upload are still experimental and subject to change in the future.
|
||||||
This option will be merged with "parallel_count" in the future.
|
This option will be merged with "parallel_count" in the future.
|
||||||
.TP
|
.TP
|
||||||
\fB\-o\fR enable_content_md5 (default is disable)
|
\fB\-o\fR enable_content_md5 (default is disable)
|
||||||
|
@ -3782,7 +3782,7 @@ int S3fsCurl::CheckBucket(const char* check_path, bool compat_dir, bool force_no
|
|||||||
query_string += '&';
|
query_string += '&';
|
||||||
}
|
}
|
||||||
query_string += "prefix=";
|
query_string += "prefix=";
|
||||||
query_string += &check_path[1]; // skip first '/' charactor
|
query_string += &check_path[1]; // skip first '/' character
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if(!query_string.empty()){
|
if(!query_string.empty()){
|
||||||
|
@ -252,7 +252,7 @@ int S3fsMultiCurl::MultiRead()
|
|||||||
}
|
}
|
||||||
}else{
|
}else{
|
||||||
S3FS_PRN_ERR("failed a request(Unknown response code: %s)", s3fscurl->url.c_str());
|
S3FS_PRN_ERR("failed a request(Unknown response code: %s)", s3fscurl->url.c_str());
|
||||||
// Reuse partical file
|
// Reuse particular file
|
||||||
switch(curlCode){
|
switch(curlCode){
|
||||||
case CURLE_OPERATION_TIMEDOUT:
|
case CURLE_OPERATION_TIMEDOUT:
|
||||||
isRetry = true;
|
isRetry = true;
|
||||||
|
@ -1868,7 +1868,7 @@ int FdEntity::RowFlushStreamMultipart(PseudoFdInfo* pseudo_obj, const char* tpat
|
|||||||
// [NOTE]
|
// [NOTE]
|
||||||
// If there is a part where has already been uploading, that part
|
// If there is a part where has already been uploading, that part
|
||||||
// is re-updated after finishing uploading, so the part of the last
|
// is re-updated after finishing uploading, so the part of the last
|
||||||
// uploded must be canceled.
|
// uploaded must be canceled.
|
||||||
// (These are cancel_uploaded_list, cancellation processing means
|
// (These are cancel_uploaded_list, cancellation processing means
|
||||||
// re-uploading the same area.)
|
// re-uploading the same area.)
|
||||||
//
|
//
|
||||||
|
@ -134,7 +134,7 @@ class FdEntity
|
|||||||
return GetOpenCountHasLock();
|
return GetOpenCountHasLock();
|
||||||
}
|
}
|
||||||
int GetOpenCountHasLock() const REQUIRES(FdEntity::fdent_lock);
|
int GetOpenCountHasLock() const REQUIRES(FdEntity::fdent_lock);
|
||||||
// TODO: should thsi require a lock?
|
// TODO: should this require a lock?
|
||||||
const std::string& GetPath() const { return path; }
|
const std::string& GetPath() const { return path; }
|
||||||
bool RenamePath(const std::string& newpath, std::string& fentmapkey);
|
bool RenamePath(const std::string& newpath, std::string& fentmapkey);
|
||||||
int GetPhysicalFd() const { return physical_fd; }
|
int GetPhysicalFd() const { return physical_fd; }
|
||||||
|
@ -129,7 +129,7 @@ PseudoFdInfo::PseudoFdInfo(int fd, int open_flags) : pseudo_fd(-1), physical_fd(
|
|||||||
|
|
||||||
PseudoFdInfo::~PseudoFdInfo()
|
PseudoFdInfo::~PseudoFdInfo()
|
||||||
{
|
{
|
||||||
Clear(); // call before destrying the mutex
|
Clear(); // call before destroying the mutex
|
||||||
|
|
||||||
if(is_lock_init){
|
if(is_lock_init){
|
||||||
int result;
|
int result;
|
||||||
@ -686,11 +686,11 @@ bool PseudoFdInfo::CancelAllThreads()
|
|||||||
bool PseudoFdInfo::ExtractUploadPartsFromUntreatedArea(const off_t& untreated_start, const off_t& untreated_size, mp_part_list_t& to_upload_list, filepart_list_t& cancel_upload_list, off_t max_mp_size)
|
bool PseudoFdInfo::ExtractUploadPartsFromUntreatedArea(const off_t& untreated_start, const off_t& untreated_size, mp_part_list_t& to_upload_list, filepart_list_t& cancel_upload_list, off_t max_mp_size)
|
||||||
{
|
{
|
||||||
if(untreated_start < 0 || untreated_size <= 0){
|
if(untreated_start < 0 || untreated_size <= 0){
|
||||||
S3FS_PRN_ERR("Paramters are wrong(untreated_start=%lld, untreated_size=%lld).", static_cast<long long int>(untreated_start), static_cast<long long int>(untreated_size));
|
S3FS_PRN_ERR("Parameters are wrong(untreated_start=%lld, untreated_size=%lld).", static_cast<long long int>(untreated_start), static_cast<long long int>(untreated_size));
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initiliaze lists
|
// Initialize lists
|
||||||
to_upload_list.clear();
|
to_upload_list.clear();
|
||||||
cancel_upload_list.clear();
|
cancel_upload_list.clear();
|
||||||
|
|
||||||
@ -775,7 +775,7 @@ bool PseudoFdInfo::ExtractUploadPartsFromAllArea(UntreatedParts& untreated_list,
|
|||||||
{
|
{
|
||||||
AutoLock auto_lock(&upload_list_lock);
|
AutoLock auto_lock(&upload_list_lock);
|
||||||
|
|
||||||
// Initiliaze lists
|
// Initialize lists
|
||||||
to_upload_list.clear();
|
to_upload_list.clear();
|
||||||
to_copy_list.clear();
|
to_copy_list.clear();
|
||||||
to_download_list.clear();
|
to_download_list.clear();
|
||||||
@ -843,13 +843,13 @@ bool PseudoFdInfo::ExtractUploadPartsFromAllArea(UntreatedParts& untreated_list,
|
|||||||
// The untreated area exceeds the end of the current area
|
// The untreated area exceeds the end of the current area
|
||||||
//
|
//
|
||||||
|
|
||||||
// Ajust untreated area
|
// Adjust untreated area
|
||||||
tmp_untreated_size = (cur_start + cur_size) - tmp_untreated_start;
|
tmp_untreated_size = (cur_start + cur_size) - tmp_untreated_start;
|
||||||
|
|
||||||
// Add ajusted untreated area to cur_untreated_list
|
// Add adjusted untreated area to cur_untreated_list
|
||||||
cur_untreated_list.emplace_back(tmp_untreated_start, tmp_untreated_size);
|
cur_untreated_list.emplace_back(tmp_untreated_start, tmp_untreated_size);
|
||||||
|
|
||||||
// Remove this ajusted untreated area from the area pointed
|
// Remove this adjusted untreated area from the area pointed
|
||||||
// to by dup_untreated_iter.
|
// to by dup_untreated_iter.
|
||||||
dup_untreated_iter->size = (dup_untreated_iter->start + dup_untreated_iter->size) - (cur_start + cur_size);
|
dup_untreated_iter->size = (dup_untreated_iter->start + dup_untreated_iter->size) - (cur_start + cur_size);
|
||||||
dup_untreated_iter->start = tmp_untreated_start + tmp_untreated_size;
|
dup_untreated_iter->start = tmp_untreated_start + tmp_untreated_size;
|
||||||
@ -995,7 +995,7 @@ bool PseudoFdInfo::ExtractUploadPartsFromAllArea(UntreatedParts& untreated_list,
|
|||||||
((tmp_cur_start + tmp_cur_size) - tmp_cur_untreated_iter->start) >= MIN_MULTIPART_SIZE )
|
((tmp_cur_start + tmp_cur_size) - tmp_cur_untreated_iter->start) >= MIN_MULTIPART_SIZE )
|
||||||
{
|
{
|
||||||
//
|
//
|
||||||
// Unify to this area to previouse copy area.
|
// Unify to this area to previous copy area.
|
||||||
//
|
//
|
||||||
copy_riter->size += tmp_cur_untreated_iter->start - tmp_cur_start;
|
copy_riter->size += tmp_cur_untreated_iter->start - tmp_cur_start;
|
||||||
S3FS_PRN_DBG("Resize to copy: start=%lld, size=%lld", static_cast<long long int>(copy_riter->start), static_cast<long long int>(copy_riter->size));
|
S3FS_PRN_DBG("Resize to copy: start=%lld, size=%lld", static_cast<long long int>(copy_riter->start), static_cast<long long int>(copy_riter->size));
|
||||||
|
@ -270,7 +270,7 @@ bool PageList::CheckZeroAreaInFile(int fd, off_t start, size_t bytes)
|
|||||||
// checkpage: This is one state of the cache file, it is loaded from the stats file.
|
// checkpage: This is one state of the cache file, it is loaded from the stats file.
|
||||||
// sparse_list: This is a list of the results of directly checking the cache file status(HOLE/DATA).
|
// sparse_list: This is a list of the results of directly checking the cache file status(HOLE/DATA).
|
||||||
// In the HOLE area, the "loaded" flag of fdpage is false. The DATA area has it set to true.
|
// In the HOLE area, the "loaded" flag of fdpage is false. The DATA area has it set to true.
|
||||||
// fd: opened file discriptor to target cache file.
|
// fd: opened file descriptor to target cache file.
|
||||||
//
|
//
|
||||||
bool PageList::CheckAreaInSparseFile(const struct fdpage& checkpage, const fdpage_list_t& sparse_list, int fd, fdpage_list_t& err_area_list, fdpage_list_t& warn_area_list)
|
bool PageList::CheckAreaInSparseFile(const struct fdpage& checkpage, const fdpage_list_t& sparse_list, int fd, fdpage_list_t& err_area_list, fdpage_list_t& warn_area_list)
|
||||||
{
|
{
|
||||||
@ -353,7 +353,7 @@ void PageList::FreeList(fdpage_list_t& list)
|
|||||||
list.clear();
|
list.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
PageList::PageList(off_t size, bool is_loaded, bool is_modified, bool shrinked) : is_shrink(shrinked)
|
PageList::PageList(off_t size, bool is_loaded, bool is_modified, bool shrunk) : is_shrink(shrunk)
|
||||||
{
|
{
|
||||||
Init(size, is_loaded, is_modified);
|
Init(size, is_loaded, is_modified);
|
||||||
}
|
}
|
||||||
@ -975,7 +975,7 @@ void PageList::Dump() const
|
|||||||
{
|
{
|
||||||
int cnt = 0;
|
int cnt = 0;
|
||||||
|
|
||||||
S3FS_PRN_DBG("pages (shrinked=%s) = {", (is_shrink ? "yes" : "no"));
|
S3FS_PRN_DBG("pages (shrunk=%s) = {", (is_shrink ? "yes" : "no"));
|
||||||
for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter, ++cnt){
|
for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter, ++cnt){
|
||||||
S3FS_PRN_DBG(" [%08d] -> {%014lld - %014lld : %s / %s}", cnt, static_cast<long long int>(iter->offset), static_cast<long long int>(iter->bytes), iter->loaded ? "loaded" : "unloaded", iter->modified ? "modified" : "not modified");
|
S3FS_PRN_DBG(" [%08d] -> {%014lld - %014lld : %s / %s}", cnt, static_cast<long long int>(iter->offset), static_cast<long long int>(iter->bytes), iter->loaded ? "loaded" : "unloaded", iter->modified ? "modified" : "not modified");
|
||||||
}
|
}
|
||||||
|
@ -77,7 +77,7 @@ class PageList
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
fdpage_list_t pages;
|
fdpage_list_t pages;
|
||||||
bool is_shrink; // [NOTE] true if it has been shrinked even once
|
bool is_shrink; // [NOTE] true if it has been shrunk even once
|
||||||
|
|
||||||
public:
|
public:
|
||||||
enum class page_status : int8_t {
|
enum class page_status : int8_t {
|
||||||
@ -98,7 +98,7 @@ class PageList
|
|||||||
public:
|
public:
|
||||||
static void FreeList(fdpage_list_t& list);
|
static void FreeList(fdpage_list_t& list);
|
||||||
|
|
||||||
explicit PageList(off_t size = 0, bool is_loaded = false, bool is_modified = false, bool shrinked = false);
|
explicit PageList(off_t size = 0, bool is_loaded = false, bool is_modified = false, bool shrunk = false);
|
||||||
PageList(const PageList&) = delete;
|
PageList(const PageList&) = delete;
|
||||||
PageList(PageList&&) = delete;
|
PageList(PageList&&) = delete;
|
||||||
PageList& operator=(const PageList&) = delete;
|
PageList& operator=(const PageList&) = delete;
|
||||||
|
@ -64,7 +64,7 @@ bool UntreatedParts::empty()
|
|||||||
bool UntreatedParts::AddPart(off_t start, off_t size)
|
bool UntreatedParts::AddPart(off_t start, off_t size)
|
||||||
{
|
{
|
||||||
if(start < 0 || size <= 0){
|
if(start < 0 || size <= 0){
|
||||||
S3FS_PRN_ERR("Paramter are wrong(start=%lld, size=%lld).", static_cast<long long int>(start), static_cast<long long int>(size));
|
S3FS_PRN_ERR("Parameter are wrong(start=%lld, size=%lld).", static_cast<long long int>(start), static_cast<long long int>(size));
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
AutoLock auto_lock(&untreated_list_lock);
|
AutoLock auto_lock(&untreated_list_lock);
|
||||||
@ -102,7 +102,7 @@ bool UntreatedParts::AddPart(off_t start, off_t size)
|
|||||||
bool UntreatedParts::RowGetPart(off_t& start, off_t& size, off_t max_size, off_t min_size, bool lastpart) const
|
bool UntreatedParts::RowGetPart(off_t& start, off_t& size, off_t max_size, off_t min_size, bool lastpart) const
|
||||||
{
|
{
|
||||||
if(max_size <= 0 || min_size < 0 || max_size < min_size){
|
if(max_size <= 0 || min_size < 0 || max_size < min_size){
|
||||||
S3FS_PRN_ERR("Paramter are wrong(max_size=%lld, min_size=%lld).", static_cast<long long int>(max_size), static_cast<long long int>(min_size));
|
S3FS_PRN_ERR("Parameter are wrong(max_size=%lld, min_size=%lld).", static_cast<long long int>(max_size), static_cast<long long int>(min_size));
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
AutoLock auto_lock(&untreated_list_lock);
|
AutoLock auto_lock(&untreated_list_lock);
|
||||||
@ -137,7 +137,7 @@ bool UntreatedParts::RowGetPart(off_t& start, off_t& size, off_t max_size, off_t
|
|||||||
bool UntreatedParts::ClearParts(off_t start, off_t size)
|
bool UntreatedParts::ClearParts(off_t start, off_t size)
|
||||||
{
|
{
|
||||||
if(start < 0 || size < 0){
|
if(start < 0 || size < 0){
|
||||||
S3FS_PRN_ERR("Paramter are wrong(start=%lld, size=%lld).", static_cast<long long int>(start), static_cast<long long int>(size));
|
S3FS_PRN_ERR("Parameter are wrong(start=%lld, size=%lld).", static_cast<long long int>(start), static_cast<long long int>(size));
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
AutoLock auto_lock(&untreated_list_lock);
|
AutoLock auto_lock(&untreated_list_lock);
|
||||||
|
@ -2743,7 +2743,7 @@ static int s3fs_truncate(const char* _path, off_t size)
|
|||||||
//
|
//
|
||||||
// [NOTICE]
|
// [NOTICE]
|
||||||
// FdManager::Open() ignores changes that reduce the file size for the
|
// FdManager::Open() ignores changes that reduce the file size for the
|
||||||
// file you are editing. However, if user opens only onece, edit it,
|
// file you are editing. However, if user opens only once, edit it,
|
||||||
// and then shrink the file, it should be done.
|
// and then shrink the file, it should be done.
|
||||||
// When this function is called, the file is already open by FUSE or
|
// When this function is called, the file is already open by FUSE or
|
||||||
// some other operation. Therefore, if the number of open files is 1,
|
// some other operation. Therefore, if the number of open files is 1,
|
||||||
@ -3151,7 +3151,7 @@ static int s3fs_release(const char* _path, struct fuse_file_info* fi)
|
|||||||
// check - for debug
|
// check - for debug
|
||||||
if(S3fsLog::IsS3fsLogDbg()){
|
if(S3fsLog::IsS3fsLogDbg()){
|
||||||
if(FdManager::HasOpenEntityFd(path)){
|
if(FdManager::HasOpenEntityFd(path)){
|
||||||
S3FS_PRN_DBG("file(%s) is still opened(another pseudo fd is opend).", path);
|
S3FS_PRN_DBG("file(%s) is still opened(another pseudo fd is opened).", path);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
S3FS_MALLOCTRIM(0);
|
S3FS_MALLOCTRIM(0);
|
||||||
|
@ -48,7 +48,7 @@ static constexpr char DEFAULT_AWS_PROFILE_NAME[] = "default";
|
|||||||
//
|
//
|
||||||
// detail=false ex. "Custom AWS Credential Library - v1.0.0"
|
// detail=false ex. "Custom AWS Credential Library - v1.0.0"
|
||||||
// detail=true ex. "Custom AWS Credential Library - v1.0.0
|
// detail=true ex. "Custom AWS Credential Library - v1.0.0
|
||||||
// s3fs-fuse credential I/F library for S3 compatible strage X.
|
// s3fs-fuse credential I/F library for S3 compatible storage X.
|
||||||
// Copyright(C) 2022 Foo"
|
// Copyright(C) 2022 Foo"
|
||||||
//
|
//
|
||||||
const char* VersionS3fsCredential(bool detail)
|
const char* VersionS3fsCredential(bool detail)
|
||||||
@ -1119,7 +1119,7 @@ bool S3fsCred::CheckIAMCredentialUpdate(std::string* access_key_id, std::string*
|
|||||||
AutoLock auto_lock(&token_lock);
|
AutoLock auto_lock(&token_lock);
|
||||||
|
|
||||||
if(IsIBMIAMAuth() || IsSetExtCredLib() || is_ecs || IsSetIAMRole()){
|
if(IsIBMIAMAuth() || IsSetExtCredLib() || is_ecs || IsSetIAMRole()){
|
||||||
if(AWSAccessTokenExpire < (time(nullptr) + S3fsCred::IAM_EXPIRE_MERGIN)){
|
if(AWSAccessTokenExpire < (time(nullptr) + S3fsCred::IAM_EXPIRE_MERGING)){
|
||||||
S3FS_PRN_INFO("IAM Access Token refreshing...");
|
S3FS_PRN_INFO("IAM Access Token refreshing...");
|
||||||
|
|
||||||
// update
|
// update
|
||||||
@ -1579,7 +1579,7 @@ bool S3fsCred::CheckAllParams()
|
|||||||
// Load and Initialize external credential library
|
// Load and Initialize external credential library
|
||||||
if(IsSetExtCredLib() || IsSetExtCredLibOpts()){
|
if(IsSetExtCredLib() || IsSetExtCredLibOpts()){
|
||||||
if(!IsSetExtCredLib()){
|
if(!IsSetExtCredLib()){
|
||||||
S3FS_PRN_EXIT("The \"credlib_opts\"(%s) is specifyed but \"credlib\" option is not specified.", credlib_opts.c_str());
|
S3FS_PRN_EXIT("The \"credlib_opts\"(%s) is specified but \"credlib\" option is not specified.", credlib_opts.c_str());
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -49,7 +49,7 @@ class S3fsCred
|
|||||||
static constexpr char AWS_ACCESSKEYID[] = "AWSAccessKeyId";
|
static constexpr char AWS_ACCESSKEYID[] = "AWSAccessKeyId";
|
||||||
static constexpr char AWS_SECRETKEY[] = "AWSSecretKey";
|
static constexpr char AWS_SECRETKEY[] = "AWSSecretKey";
|
||||||
|
|
||||||
static constexpr int IAM_EXPIRE_MERGIN = 20 * 60; // update timing
|
static constexpr int IAM_EXPIRE_MERGING = 20 * 60; // update timing
|
||||||
static constexpr char ECS_IAM_ENV_VAR[] = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI";
|
static constexpr char ECS_IAM_ENV_VAR[] = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI";
|
||||||
static constexpr char IAMCRED_ACCESSKEYID[] = "AccessKeyId";
|
static constexpr char IAMCRED_ACCESSKEYID[] = "AccessKeyId";
|
||||||
static constexpr char IAMCRED_SECRETACCESSKEY[] = "SecretAccessKey";
|
static constexpr char IAMCRED_SECRETACCESSKEY[] = "SecretAccessKey";
|
||||||
|
@ -359,7 +359,7 @@ static constexpr char help_string[] =
|
|||||||
"\n"
|
"\n"
|
||||||
" max_thread_count (default is \"5\")\n"
|
" max_thread_count (default is \"5\")\n"
|
||||||
" - Specifies the number of threads waiting for stream uploads.\n"
|
" - Specifies the number of threads waiting for stream uploads.\n"
|
||||||
" Note that this option and Streamm Upload are still experimental\n"
|
" Note that this option and Stream Upload are still experimental\n"
|
||||||
" and subject to change in the future.\n"
|
" and subject to change in the future.\n"
|
||||||
" This option will be merged with \"parallel_count\" in the future.\n"
|
" This option will be merged with \"parallel_count\" in the future.\n"
|
||||||
"\n"
|
"\n"
|
||||||
|
@ -100,7 +100,7 @@ void* ThreadPoolMan::Worker(void* arg)
|
|||||||
|
|
||||||
void* retval = param.pfunc(param.args);
|
void* retval = param.pfunc(param.args);
|
||||||
if(nullptr != retval){
|
if(nullptr != retval){
|
||||||
S3FS_PRN_WARN("The instruction function returned with somthign error code(%ld).", reinterpret_cast<long>(retval));
|
S3FS_PRN_WARN("The instruction function returned with something error code(%ld).", reinterpret_cast<long>(retval));
|
||||||
}
|
}
|
||||||
if(param.psem){
|
if(param.psem){
|
||||||
param.psem->post();
|
param.psem->post();
|
||||||
|
@ -122,7 +122,7 @@ function test_truncate_shrink_read_file {
|
|||||||
# create file
|
# create file
|
||||||
dd if=/dev/urandom of="${TEST_TEXT_FILE}" bs="${init_size}" count=1
|
dd if=/dev/urandom of="${TEST_TEXT_FILE}" bs="${init_size}" count=1
|
||||||
|
|
||||||
# truncate(shrink) file and read it before flusing
|
# truncate(shrink) file and read it before flushing
|
||||||
../../truncate_read_file "${TEST_TEXT_FILE}" "${shrink_size}"
|
../../truncate_read_file "${TEST_TEXT_FILE}" "${shrink_size}"
|
||||||
|
|
||||||
# check file size
|
# check file size
|
||||||
@ -2551,9 +2551,9 @@ function test_not_boundary_writes {
|
|||||||
# Part number 2: 10,485,760 - 20,971,519 (size = 10MB)
|
# Part number 2: 10,485,760 - 20,971,519 (size = 10MB)
|
||||||
# Part number 3: 20,971,520 - 26,214,399 (size = 5MB)
|
# Part number 3: 20,971,520 - 26,214,399 (size = 5MB)
|
||||||
#
|
#
|
||||||
local BOUNDAY_TEST_FILE_SIZE; BOUNDAY_TEST_FILE_SIZE=$((BIG_FILE_BLOCK_SIZE * BIG_FILE_COUNT))
|
local BOUNDARY_TEST_FILE_SIZE; BOUNDARY_TEST_FILE_SIZE=$((BIG_FILE_BLOCK_SIZE * BIG_FILE_COUNT))
|
||||||
|
|
||||||
../../junk_data "${BOUNDAY_TEST_FILE_SIZE}" > "${TEST_TEXT_FILE}"
|
../../junk_data "${BOUNDARY_TEST_FILE_SIZE}" > "${TEST_TEXT_FILE}"
|
||||||
|
|
||||||
#
|
#
|
||||||
# Write in First boundary
|
# Write in First boundary
|
||||||
|
@ -20,7 +20,7 @@
|
|||||||
#
|
#
|
||||||
|
|
||||||
#
|
#
|
||||||
# This is unsupport sample deleting cache files script.
|
# This is unsupported sample deleting cache files script.
|
||||||
# So s3fs's local cache files(stats and objects) grow up,
|
# So s3fs's local cache files(stats and objects) grow up,
|
||||||
# you need to delete these.
|
# you need to delete these.
|
||||||
# This script deletes these files with total size limit
|
# This script deletes these files with total size limit
|
||||||
|
Loading…
Reference in New Issue
Block a user