mirror of
https://github.com/s3fs-fuse/s3fs-fuse.git
synced 2024-11-16 17:25:13 +00:00
Merge remote-tracking branch 'upstream/master'
This commit is contained in:
commit
989d403b1f
@ -5,6 +5,7 @@ cache: apt
|
||||
before_install:
|
||||
- sudo apt-get update -qq
|
||||
- sudo apt-get install -qq cppcheck libfuse-dev openjdk-7-jdk
|
||||
- sudo update-alternatives --set java /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java
|
||||
script:
|
||||
- ./autogen.sh
|
||||
- ./configure
|
||||
|
@ -89,7 +89,7 @@ If there are some keys after first line, those are used downloading object which
|
||||
So that, you can keep all SSE-C keys in file, that is SSE-C key history.
|
||||
If you specify "custom"("c") without file path, you need to set custom key by load_sse_c option or AWSSSECKEYS environment.(AWSSSECKEYS environment has some SSE-C keys with ":" separator.)
|
||||
This option is used to decide the SSE type.
|
||||
So that if you do not want to encrypt a object at uploading, but you need to decrypt encrypted object at downloaing, you can use load_sse_c option instead of this option.
|
||||
So that if you do not want to encrypt a object at uploading, but you need to decrypt encrypted object at downloading, you can use load_sse_c option instead of this option.
|
||||
For setting SSE-KMS, specify "use_sse=kmsid" or "use_sse=kmsid:<kms id>".
|
||||
You can use "k" for short "kmsid".
|
||||
If you san specify SSE-KMS type with your <kms id> in AWS KMS, you can set it after "kmsid:"(or "k:").
|
||||
@ -211,8 +211,8 @@ If you set this option, you can use the extended attribute.
|
||||
For example, encfs and ecryptfs need to support the extended attribute.
|
||||
Notice: if s3fs handles the extended attribute, s3fs can not work to copy command with preserve=mode.
|
||||
.TP
|
||||
\fB\-o\fR noxmlns - disable registing xml name space.
|
||||
disable registing xml name space for response of ListBucketResult and ListVersionsResult etc. Default name space is looked up from "http://s3.amazonaws.com/doc/2006-03-01".
|
||||
\fB\-o\fR noxmlns - disable registering xml name space.
|
||||
disable registering xml name space for response of ListBucketResult and ListVersionsResult etc. Default name space is looked up from "http://s3.amazonaws.com/doc/2006-03-01".
|
||||
This option should not be specified now, because s3fs looks up xmlns automatically after v1.66.
|
||||
.TP
|
||||
\fB\-o\fR nocopyapi - for other incomplete compatibility object storage.
|
||||
@ -222,10 +222,10 @@ If you set this option, s3fs do not use PUT with "x-amz-copy-source"(copy api).
|
||||
\fB\-o\fR norenameapi - for other incomplete compatibility object storage.
|
||||
For a distributed object storage which is compatibility S3 API without PUT(copy api).
|
||||
This option is a subset of nocopyapi option. The nocopyapi option does not use copy-api for all command(ex. chmod, chown, touch, mv, etc), but this option does not use copy-api for only rename command(ex. mv).
|
||||
If this option is specified with nocopapi, the s3fs ignores it.
|
||||
If this option is specified with nocopyapi, then s3fs ignores it.
|
||||
.TP
|
||||
\fB\-o\fR use_path_request_style (use legacy API calling style)
|
||||
Enble compatibility with S3-like APIs which do not support the virtual-host request style, by using the older path request style.
|
||||
Enable compatibility with S3-like APIs which do not support the virtual-host request style, by using the older path request style.
|
||||
.TP
|
||||
\fB\-o\fR noua (suppress User-Agent header)
|
||||
Usually s3fs outputs of the User-Agent in "s3fs/<version> (commit hash <hash>; <using ssl library name>)" format.
|
||||
|
@ -128,8 +128,8 @@ typedef std::map<std::string, std::string, header_nocase_cmp> headers_t;
|
||||
|
||||
//
|
||||
// Header "x-amz-meta-xattr" is for extended attributes.
|
||||
// This header is url encoded string which is json formated.
|
||||
// x-amz-meta-xattr:urlencod({"xattr-1":"base64(value-1)","xattr-2":"base64(value-2)","xattr-3":"base64(value-3)"})
|
||||
// This header is url encoded string which is json formatted.
|
||||
// x-amz-meta-xattr:urlencode({"xattr-1":"base64(value-1)","xattr-2":"base64(value-2)","xattr-3":"base64(value-3)"})
|
||||
//
|
||||
typedef struct xattr_value{
|
||||
unsigned char* pvalue;
|
||||
@ -147,7 +147,7 @@ typedef struct xattr_value{
|
||||
typedef std::map<std::string, PXATTRVAL> xattrs_t;
|
||||
|
||||
//
|
||||
// Global valiables
|
||||
// Global variables
|
||||
//
|
||||
extern bool foreground;
|
||||
extern bool nomultipart;
|
||||
|
31
src/curl.cpp
31
src/curl.cpp
@ -58,7 +58,7 @@ static const std::string empty_payload_hash = "e3b0c44298fc1c149afbf4c8996fb9242
|
||||
// Utilities
|
||||
//-------------------------------------------------------------------
|
||||
// [TODO]
|
||||
// This function uses tempolary file, but should not use it.
|
||||
// This function uses temporary file, but should not use it.
|
||||
// For not using it, we implement function in each auth file(openssl, nss. gnutls).
|
||||
//
|
||||
static bool make_md5_from_string(const char* pstr, string& md5)
|
||||
@ -308,7 +308,7 @@ void CurlHandlerPool::ReturnHandler(CURL* h)
|
||||
#define MULTIPART_SIZE 10485760 // 10MB
|
||||
#define MAX_MULTI_COPY_SOURCE_SIZE 524288000 // 500MB
|
||||
|
||||
#define IAM_EXPIRE_MERGIN (20 * 60) // update timming
|
||||
#define IAM_EXPIRE_MERGIN (20 * 60) // update timing
|
||||
#define IAM_CRED_URL "http://169.254.169.254/latest/meta-data/iam/security-credentials/"
|
||||
#define IAMCRED_ACCESSKEYID "AccessKeyId"
|
||||
#define IAMCRED_SECRETACCESSKEY "SecretAccessKey"
|
||||
@ -1264,7 +1264,7 @@ int S3fsCurl::ParallelMultipartUploadRequest(const char* tpath, headers_t& meta,
|
||||
|
||||
// Multi request
|
||||
if(0 != (result = curlmulti.Request())){
|
||||
S3FS_PRN_ERR("error occuered in multi request(errno=%d).", result);
|
||||
S3FS_PRN_ERR("error occurred in multi request(errno=%d).", result);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1350,7 +1350,7 @@ int S3fsCurl::ParallelGetObjectRequest(const char* tpath, int fd, off_t start, s
|
||||
|
||||
// Multi request
|
||||
if(0 != (result = curlmulti.Request())){
|
||||
S3FS_PRN_ERR("error occuered in multi request(errno=%d).", result);
|
||||
S3FS_PRN_ERR("error occurred in multi request(errno=%d).", result);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1622,7 +1622,7 @@ bool S3fsCurl::CreateCurlHandle(bool force)
|
||||
S3FS_PRN_ERR("could not destroy handle.");
|
||||
return false;
|
||||
}
|
||||
S3FS_PRN_INFO3("already has handle, so destroied it.");
|
||||
S3FS_PRN_INFO3("already has handle, so destroyed it.");
|
||||
}
|
||||
|
||||
if(NULL == (hCurl = sCurlPool->GetHandler())){
|
||||
@ -2100,7 +2100,7 @@ string S3fsCurl::CalcSignatureV2(const string& method, const string& strMD5, con
|
||||
if(0 < S3fsCurl::IAM_role.size()){
|
||||
if(!S3fsCurl::CheckIAMCredentialUpdate()){
|
||||
S3FS_PRN_ERR("Something error occurred in checking IAM credential.");
|
||||
return Signature; // returns empty string, then it occures error.
|
||||
return Signature; // returns empty string, then it occurs error.
|
||||
}
|
||||
requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-security-token", S3fsCurl::AWSAccessToken.c_str());
|
||||
}
|
||||
@ -2142,7 +2142,7 @@ string S3fsCurl::CalcSignature(const string& method, const string& canonical_uri
|
||||
if(0 < S3fsCurl::IAM_role.size()){
|
||||
if(!S3fsCurl::CheckIAMCredentialUpdate()){
|
||||
S3FS_PRN_ERR("Something error occurred in checking IAM credential.");
|
||||
return Signature; // returns empty string, then it occures error.
|
||||
return Signature; // returns empty string, then it occurs error.
|
||||
}
|
||||
requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-security-token", S3fsCurl::AWSAccessToken.c_str());
|
||||
}
|
||||
@ -2322,7 +2322,7 @@ int S3fsCurl::DeleteRequest(const char* tpath)
|
||||
|
||||
//
|
||||
// Get AccessKeyId/SecretAccessKey/AccessToken/Expiration by IAM role,
|
||||
// and Set these value to class valiable.
|
||||
// and Set these value to class variable.
|
||||
//
|
||||
int S3fsCurl::GetIAMCredentials(void)
|
||||
{
|
||||
@ -2352,7 +2352,7 @@ int S3fsCurl::GetIAMCredentials(void)
|
||||
|
||||
int result = RequestPerform();
|
||||
|
||||
// analizing response
|
||||
// analyzing response
|
||||
if(0 == result && !S3fsCurl::SetIAMCredentials(bodydata->str())){
|
||||
S3FS_PRN_ERR("Something error occurred, could not get IAM credential.");
|
||||
}
|
||||
@ -2389,7 +2389,7 @@ bool S3fsCurl::LoadIAMRoleFromMetaData(void)
|
||||
|
||||
int result = RequestPerform();
|
||||
|
||||
// analizing response
|
||||
// analyzing response
|
||||
if(0 == result && !S3fsCurl::SetIAMRoleFromMetaData(bodydata->str())){
|
||||
S3FS_PRN_ERR("Something error occurred, could not get IAM role name.");
|
||||
result = -EIO;
|
||||
@ -2677,7 +2677,7 @@ int S3fsCurl::PutRequest(const char* tpath, headers_t& meta, int fd)
|
||||
}
|
||||
b_infile = file;
|
||||
}else{
|
||||
// This case is creating zero byte obejct.(calling by create_file_object())
|
||||
// This case is creating zero byte object.(calling by create_file_object())
|
||||
S3FS_PRN_INFO3("create zero byte file object.");
|
||||
}
|
||||
|
||||
@ -3441,7 +3441,8 @@ int S3fsCurl::CopyMultipartPostRequest(const char* from, const char* to, int par
|
||||
if(!CreateCurlHandle(true)){
|
||||
return -1;
|
||||
}
|
||||
string urlargs = "?partNumber=" + str(part_num) + "&uploadId=" + upload_id;
|
||||
string request_uri = "partNumber=" + str(part_num) + "&uploadId=" + upload_id;
|
||||
string urlargs = "?" + request_uri;
|
||||
string resource;
|
||||
string turl;
|
||||
MakeUrlResource(get_realpath(to).c_str(), resource, turl);
|
||||
@ -3481,7 +3482,7 @@ int S3fsCurl::CopyMultipartPostRequest(const char* from, const char* to, int par
|
||||
}
|
||||
|
||||
}else{
|
||||
insertV4Headers("PUT", path, "", "");
|
||||
insertV4Headers("PUT", path, request_uri, "");
|
||||
}
|
||||
|
||||
// setopt
|
||||
@ -3916,7 +3917,7 @@ int S3fsMultiCurl::MultiRead(void)
|
||||
isRetry = true;
|
||||
}
|
||||
}else{
|
||||
S3FS_PRN_ERR("failed a request(Unknown respons code: %s)", s3fscurl->url.c_str());
|
||||
S3FS_PRN_ERR("failed a request(Unknown response code: %s)", s3fscurl->url.c_str());
|
||||
}
|
||||
}else{
|
||||
S3FS_PRN_WARN("failed to read(remaining: %d code: %d msg: %s), so retry this.",
|
||||
@ -4051,7 +4052,7 @@ struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* k
|
||||
return list;
|
||||
}
|
||||
|
||||
// key & value are trimed and lower(only key)
|
||||
// key & value are trimmed and lower (only key)
|
||||
string strkey = trim(string(key));
|
||||
string strval = trim(string(value ? value : ""));
|
||||
string strnew = key + string(": ") + strval;
|
||||
|
@ -159,7 +159,7 @@ typedef std::map<std::string, std::string> iamcredmap_t;
|
||||
typedef std::map<std::string, std::string> sseckeymap_t;
|
||||
typedef std::list<sseckeymap_t> sseckeylist_t;
|
||||
|
||||
// strage class(rrs)
|
||||
// storage class(rrs)
|
||||
enum storage_class_t {
|
||||
STANDARD,
|
||||
STANDARD_IA,
|
||||
@ -328,7 +328,7 @@ class S3fsCurl
|
||||
static int ParallelGetObjectRequest(const char* tpath, int fd, off_t start, ssize_t size);
|
||||
static bool CheckIAMCredentialUpdate(void);
|
||||
|
||||
// class methods(valiables)
|
||||
// class methods(variables)
|
||||
static std::string LookupMimeType(const std::string& name);
|
||||
static bool SetCheckCertificate(bool isCertCheck);
|
||||
static bool SetDnsCache(bool isCache);
|
||||
@ -407,7 +407,7 @@ class S3fsCurl
|
||||
int MultipartUploadRequest(const std::string& upload_id, const char* tpath, int fd, off_t offset, size_t size, etaglist_t& list);
|
||||
int MultipartRenameRequest(const char* from, const char* to, headers_t& meta, off_t size);
|
||||
|
||||
// methods(valiables)
|
||||
// methods(variables)
|
||||
CURL* GetCurlHandle(void) const { return hCurl; }
|
||||
std::string GetPath(void) const { return path; }
|
||||
std::string GetBasePath(void) const { return base_path; }
|
||||
|
@ -745,7 +745,7 @@ int FdEntity::OpenMirrorFile(void)
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
// make tmporary directory
|
||||
// make temporary directory
|
||||
string bupdir;
|
||||
if(!FdManager::MakeCachePath(NULL, bupdir, true, true)){
|
||||
S3FS_PRN_ERR("could not make bup cache directory path or create it.");
|
||||
@ -952,7 +952,7 @@ int FdEntity::Open(headers_t* pmeta, ssize_t size, time_t time)
|
||||
}
|
||||
|
||||
// [NOTE]
|
||||
// This method is called from olny nocopapi functions.
|
||||
// This method is called from only nocopyapi functions.
|
||||
// So we do not check disk space for this option mode, if there is no enough
|
||||
// disk space this method will be failed.
|
||||
//
|
||||
@ -1232,7 +1232,7 @@ int FdEntity::NoCacheLoadAndPost(off_t start, size_t size)
|
||||
if(0 != size && static_cast<size_t>(start + size) <= static_cast<size_t>((*iter)->offset)){
|
||||
break;
|
||||
}
|
||||
// download earch multipart size(default 10MB) in unit
|
||||
// download each multipart size(default 10MB) in unit
|
||||
for(size_t oneread = 0, totalread = ((*iter)->offset < start ? start : 0); totalread < (*iter)->bytes; totalread += oneread){
|
||||
int upload_fd = fd;
|
||||
off_t offset = (*iter)->offset + totalread;
|
||||
@ -1434,7 +1434,7 @@ int FdEntity::RowFlush(const char* tpath, bool force_sync)
|
||||
// check disk space
|
||||
if(FdManager::IsSafeDiskSpace(NULL, restsize)){
|
||||
// enough disk space
|
||||
// Load all unitialized area
|
||||
// Load all uninitialized area
|
||||
if(0 != (result = Load())){
|
||||
S3FS_PRN_ERR("failed to upload all area(errno=%d)", result);
|
||||
return static_cast<ssize_t>(result);
|
||||
@ -1448,7 +1448,7 @@ int FdEntity::RowFlush(const char* tpath, bool force_sync)
|
||||
}
|
||||
}
|
||||
}else{
|
||||
// alreay start miltipart uploading
|
||||
// already start multipart uploading
|
||||
}
|
||||
}
|
||||
|
||||
@ -1627,7 +1627,7 @@ ssize_t FdEntity::Write(const char* bytes, off_t start, size_t size)
|
||||
if(FdManager::IsSafeDiskSpace(NULL, restsize)){
|
||||
// enough disk space
|
||||
|
||||
// Load unitialized area which starts from 0 to (start + size) before writing.
|
||||
// Load uninitialized area which starts from 0 to (start + size) before writing.
|
||||
if(0 < start && 0 != (result = Load(0, static_cast<size_t>(start)))){
|
||||
S3FS_PRN_ERR("failed to load uninitialized area before writing(errno=%d)", result);
|
||||
return static_cast<ssize_t>(result);
|
||||
@ -1647,7 +1647,7 @@ ssize_t FdEntity::Write(const char* bytes, off_t start, size_t size)
|
||||
mp_size = 0;
|
||||
}
|
||||
}else{
|
||||
// alreay start miltipart uploading
|
||||
// already start multipart uploading
|
||||
}
|
||||
|
||||
// Writing
|
||||
@ -1705,7 +1705,7 @@ ssize_t FdEntity::Write(const char* bytes, off_t start, size_t size)
|
||||
#define NOCACHE_PATH_PREFIX_FORM " __S3FS_UNEXISTED_PATH_%lx__ / " // important space words for simply
|
||||
|
||||
//------------------------------------------------
|
||||
// FdManager class valiable
|
||||
// FdManager class variable
|
||||
//------------------------------------------------
|
||||
FdManager FdManager::singleton;
|
||||
pthread_mutex_t FdManager::fd_manager_lock;
|
||||
@ -1832,7 +1832,7 @@ bool FdManager::MakeRandomTempPath(const char* path, string& tmppath)
|
||||
{
|
||||
char szBuff[64];
|
||||
|
||||
sprintf(szBuff, NOCACHE_PATH_PREFIX_FORM, random()); // warry for performance, but maybe don't warry.
|
||||
sprintf(szBuff, NOCACHE_PATH_PREFIX_FORM, random()); // worry for performance, but maybe don't worry.
|
||||
tmppath = szBuff;
|
||||
tmppath += path ? path : "";
|
||||
return true;
|
||||
@ -1941,7 +1941,7 @@ FdEntity* FdManager::GetFdEntity(const char* path, int existfd)
|
||||
if(-1 != existfd){
|
||||
for(iter = fent.begin(); iter != fent.end(); ++iter){
|
||||
if((*iter).second && (*iter).second->GetFd() == existfd){
|
||||
// found opend fd in map
|
||||
// found opened fd in map
|
||||
if(0 == strcmp((*iter).second->GetPath(), path)){
|
||||
return (*iter).second;
|
||||
}
|
||||
@ -1998,7 +1998,7 @@ FdEntity* FdManager::Open(const char* path, headers_t* pmeta, ssize_t size, time
|
||||
// using cache
|
||||
fent[string(path)] = ent;
|
||||
}else{
|
||||
// not using cache, so the key of fdentity is set not really existsing path.
|
||||
// not using cache, so the key of fdentity is set not really existing path.
|
||||
// (but not strictly unexisting path.)
|
||||
//
|
||||
// [NOTE]
|
||||
@ -2033,7 +2033,7 @@ FdEntity* FdManager::ExistOpen(const char* path, int existfd, bool ignore_existf
|
||||
|
||||
for(fdent_map_t::iterator iter = fent.begin(); iter != fent.end(); ++iter){
|
||||
if((*iter).second && (*iter).second->IsOpen() && (ignore_existfd || ((*iter).second->GetFd() == existfd))){
|
||||
// found opend fd in map
|
||||
// found opened fd in map
|
||||
if(0 == strcmp((*iter).second->GetPath(), path)){
|
||||
ent = (*iter).second;
|
||||
ent->Dup();
|
||||
|
114
src/s3fs.cpp
114
src/s3fs.cpp
@ -74,7 +74,7 @@ using namespace std;
|
||||
//-------------------------------------------------------------------
|
||||
// Structs
|
||||
//-------------------------------------------------------------------
|
||||
typedef struct uncomplete_multipart_info{
|
||||
typedef struct incomplete_multipart_info{
|
||||
string key;
|
||||
string id;
|
||||
string date;
|
||||
@ -83,7 +83,7 @@ typedef struct uncomplete_multipart_info{
|
||||
typedef std::list<UNCOMP_MP_INFO> uncomp_mp_list_t;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Global valiables
|
||||
// Global variables
|
||||
//-------------------------------------------------------------------
|
||||
bool foreground = false;
|
||||
bool nomultipart = false;
|
||||
@ -97,7 +97,7 @@ s3fs_log_level debug_level = S3FS_LOG_CRIT;
|
||||
const char* s3fs_log_nest[S3FS_LOG_NEST_MAX] = {"", " ", " ", " "};
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Static valiables
|
||||
// Static variables
|
||||
//-------------------------------------------------------------------
|
||||
static uid_t mp_uid = 0; // owner of mount point(only not specified uid opt)
|
||||
static gid_t mp_gid = 0; // group of mount point(only not specified gid opt)
|
||||
@ -177,7 +177,7 @@ static int check_for_aws_format(void);
|
||||
static int check_passwd_file_perms(void);
|
||||
static int read_passwd_file(void);
|
||||
static int get_access_keys(void);
|
||||
static int set_moutpoint_attribute(struct stat& mpst);
|
||||
static int set_mountpoint_attribute(struct stat& mpst);
|
||||
static int set_bucket(const char* arg);
|
||||
static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_args* outargs);
|
||||
|
||||
@ -321,7 +321,7 @@ static int chk_dir_object_type(const char* path, string& newpath, string& nowpat
|
||||
newpath += "/";
|
||||
}
|
||||
|
||||
// Alwayes check "dir/" at first.
|
||||
// Always check "dir/" at first.
|
||||
if(0 == (result = get_object_attribute(newpath.c_str(), NULL, pmeta, false, &isforce))){
|
||||
// Found "dir/" cache --> Check for "_$folder$", "no dir object"
|
||||
nowcache = newpath;
|
||||
@ -349,7 +349,7 @@ static int chk_dir_object_type(const char* path, string& newpath, string& nowpat
|
||||
if(0 == (result = get_object_attribute(nowpath.c_str(), NULL, pmeta, false, &isforce))){
|
||||
// Found "dir" cache --> this case is only "dir" type.
|
||||
// Because, if object is "_$folder$" or "no dir object", the cache is "dir/" type.
|
||||
// (But "no dir objet" is checked here.)
|
||||
// (But "no dir object" is checked here.)
|
||||
nowcache = nowpath;
|
||||
if(isforce){
|
||||
(*pType) = DIRTYPE_NOOBJ;
|
||||
@ -359,7 +359,7 @@ static int chk_dir_object_type(const char* path, string& newpath, string& nowpat
|
||||
}
|
||||
}else{
|
||||
// Not found cache --> check for "_$folder$" and "no dir object".
|
||||
nowcache = ""; // This case is no cahce.
|
||||
nowcache = ""; // This case is no cache.
|
||||
nowpath += "_$folder$";
|
||||
if(is_special_name_folder_object(nowpath.c_str())){
|
||||
// "_$folder$" type.
|
||||
@ -469,7 +469,7 @@ static int get_object_attribute(const char* path, struct stat* pstbuf, headers_t
|
||||
strpath = strpath.substr(0, strpath.length() - 1);
|
||||
}
|
||||
if(-ENOTEMPTY == directory_empty(strpath.c_str())){
|
||||
// found "no dir obejct".
|
||||
// found "no dir object".
|
||||
strpath += "/";
|
||||
forcedir = true;
|
||||
if(pisforce){
|
||||
@ -563,7 +563,7 @@ static int check_object_access(const char* path, int mask, struct stat* pstbuf)
|
||||
return -EIO;
|
||||
}
|
||||
if(0 != (result = get_object_attribute(path, pst))){
|
||||
// If there is not tha target file(object), reusult is -ENOENT.
|
||||
// If there is not the target file(object), result is -ENOENT.
|
||||
return result;
|
||||
}
|
||||
if(0 == pcxt->uid){
|
||||
@ -598,7 +598,7 @@ static int check_object_access(const char* path, int mask, struct stat* pstbuf)
|
||||
if(pcxt->gid == obj_gid){
|
||||
base_mask |= S_IRWXG;
|
||||
}
|
||||
if(1 == is_uid_inculde_group(pcxt->uid, obj_gid)){
|
||||
if(1 == is_uid_include_group(pcxt->uid, obj_gid)){
|
||||
base_mask |= S_IRWXG;
|
||||
}
|
||||
mode &= base_mask;
|
||||
@ -637,7 +637,7 @@ static int check_object_owner(const char* path, struct stat* pstbuf)
|
||||
return -EIO;
|
||||
}
|
||||
if(0 != (result = get_object_attribute(path, pst))){
|
||||
// If there is not tha target file(object), reusult is -ENOENT.
|
||||
// If there is not the target file(object), result is -ENOENT.
|
||||
return result;
|
||||
}
|
||||
// check owner
|
||||
@ -744,12 +744,12 @@ static FdEntity* get_local_fent(const char* path, bool is_load)
|
||||
bool force_tmpfile = S_ISREG(stobj.st_mode) ? false : true;
|
||||
|
||||
if(NULL == (ent = FdManager::get()->Open(path, &meta, static_cast<ssize_t>(stobj.st_size), mtime, force_tmpfile, true))){
|
||||
S3FS_PRN_ERR("Coult not open file. errno(%d)", errno);
|
||||
S3FS_PRN_ERR("Could not open file. errno(%d)", errno);
|
||||
return NULL;
|
||||
}
|
||||
// load
|
||||
if(is_load && !ent->OpenAndLoadAll(&meta)){
|
||||
S3FS_PRN_ERR("Coult not load file. errno(%d)", errno);
|
||||
S3FS_PRN_ERR("Could not load file. errno(%d)", errno);
|
||||
FdManager::get()->Close(ent);
|
||||
return NULL;
|
||||
}
|
||||
@ -771,7 +771,7 @@ static int put_headers(const char* path, headers_t& meta, bool is_copy)
|
||||
|
||||
// files larger than 5GB must be modified via the multipart interface
|
||||
// *** If there is not target object(a case of move command),
|
||||
// get_object_attribute() returns error with initilizing buf.
|
||||
// get_object_attribute() returns error with initializing buf.
|
||||
(void)get_object_attribute(path, &buf);
|
||||
|
||||
if(buf.st_size >= FIVE_GB){
|
||||
@ -815,7 +815,7 @@ static int s3fs_getattr(const char* path, struct stat* stbuf)
|
||||
if(0 != (result = check_object_access(path, F_OK, stbuf))){
|
||||
return result;
|
||||
}
|
||||
// If has already opened fd, the st_size shuld be instead.
|
||||
// If has already opened fd, the st_size should be instead.
|
||||
// (See: Issue 241)
|
||||
if(stbuf){
|
||||
FdEntity* ent;
|
||||
@ -1130,7 +1130,7 @@ static int s3fs_rmdir(const char* path)
|
||||
}
|
||||
}
|
||||
// If there is no "dir" and "dir/" object(this case is made by s3cmd/s3sync),
|
||||
// the cache key is "dir/". So we get error only onece(delete "dir/").
|
||||
// the cache key is "dir/". So we get error only once(delete "dir/").
|
||||
|
||||
// check for "_$folder$" object.
|
||||
// This processing is necessary for other S3 clients compatibility.
|
||||
@ -1205,11 +1205,11 @@ static int rename_object(const char* from, const char* to)
|
||||
S3FS_PRN_INFO1("[from=%s][to=%s]", from , to);
|
||||
|
||||
if(0 != (result = check_parent_object_access(to, W_OK | X_OK))){
|
||||
// not permmit writing "to" object parent dir.
|
||||
// not permit writing "to" object parent dir.
|
||||
return result;
|
||||
}
|
||||
if(0 != (result = check_parent_object_access(from, W_OK | X_OK))){
|
||||
// not permmit removing "from" object parent dir.
|
||||
// not permit removing "from" object parent dir.
|
||||
return result;
|
||||
}
|
||||
if(0 != (result = get_object_attribute(from, NULL, &meta))){
|
||||
@ -1240,11 +1240,11 @@ static int rename_object_nocopy(const char* from, const char* to)
|
||||
S3FS_PRN_INFO1("[from=%s][to=%s]", from , to);
|
||||
|
||||
if(0 != (result = check_parent_object_access(to, W_OK | X_OK))){
|
||||
// not permmit writing "to" object parent dir.
|
||||
// not permit writing "to" object parent dir.
|
||||
return result;
|
||||
}
|
||||
if(0 != (result = check_parent_object_access(from, W_OK | X_OK))){
|
||||
// not permmit removing "from" object parent dir.
|
||||
// not permit removing "from" object parent dir.
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -1288,11 +1288,11 @@ static int rename_large_object(const char* from, const char* to)
|
||||
S3FS_PRN_INFO1("[from=%s][to=%s]", from , to);
|
||||
|
||||
if(0 != (result = check_parent_object_access(to, W_OK | X_OK))){
|
||||
// not permmit writing "to" object parent dir.
|
||||
// not permit writing "to" object parent dir.
|
||||
return result;
|
||||
}
|
||||
if(0 != (result = check_parent_object_access(from, W_OK | X_OK))){
|
||||
// not permmit removing "from" object parent dir.
|
||||
// not permit removing "from" object parent dir.
|
||||
return result;
|
||||
}
|
||||
if(0 != (result = get_object_attribute(from, &buf, &meta, false))){
|
||||
@ -1471,11 +1471,11 @@ static int s3fs_rename(const char* from, const char* to)
|
||||
S3FS_PRN_INFO("[from=%s][to=%s]", from, to);
|
||||
|
||||
if(0 != (result = check_parent_object_access(to, W_OK | X_OK))){
|
||||
// not permmit writing "to" object parent dir.
|
||||
// not permit writing "to" object parent dir.
|
||||
return result;
|
||||
}
|
||||
if(0 != (result = check_parent_object_access(from, W_OK | X_OK))){
|
||||
// not permmit removing "from" object parent dir.
|
||||
// not permit removing "from" object parent dir.
|
||||
return result;
|
||||
}
|
||||
if(0 != (result = get_object_attribute(from, &buf, NULL))){
|
||||
@ -1595,7 +1595,7 @@ static int s3fs_chmod_nocopy(const char* path, mode_t mode)
|
||||
S3FS_PRN_INFO1("[path=%s][mode=%04o]", path, mode);
|
||||
|
||||
if(0 == strcmp(path, "/")){
|
||||
S3FS_PRN_ERR("Could not change mode for maount point.");
|
||||
S3FS_PRN_ERR("Could not change mode for mount point.");
|
||||
return -EIO;
|
||||
}
|
||||
if(0 != (result = check_parent_object_access(path, X_OK))){
|
||||
@ -1675,7 +1675,7 @@ static int s3fs_chown(const char* path, uid_t uid, gid_t gid)
|
||||
S3FS_PRN_INFO("[path=%s][uid=%u][gid=%u]", path, (unsigned int)uid, (unsigned int)gid);
|
||||
|
||||
if(0 == strcmp(path, "/")){
|
||||
S3FS_PRN_ERR("Could not change owner for maount point.");
|
||||
S3FS_PRN_ERR("Could not change owner for mount point.");
|
||||
return -EIO;
|
||||
}
|
||||
if(0 != (result = check_parent_object_access(path, X_OK))){
|
||||
@ -1756,7 +1756,7 @@ static int s3fs_chown_nocopy(const char* path, uid_t uid, gid_t gid)
|
||||
S3FS_PRN_INFO1("[path=%s][uid=%u][gid=%u]", path, (unsigned int)uid, (unsigned int)gid);
|
||||
|
||||
if(0 == strcmp(path, "/")){
|
||||
S3FS_PRN_ERR("Could not change owner for maount point.");
|
||||
S3FS_PRN_ERR("Could not change owner for mount point.");
|
||||
return -EIO;
|
||||
}
|
||||
if(0 != (result = check_parent_object_access(path, X_OK))){
|
||||
@ -1846,7 +1846,7 @@ static int s3fs_utimens(const char* path, const struct timespec ts[2])
|
||||
S3FS_PRN_INFO("[path=%s][mtime=%jd]", path, (intmax_t)(ts[1].tv_sec));
|
||||
|
||||
if(0 == strcmp(path, "/")){
|
||||
S3FS_PRN_ERR("Could not change mtime for maount point.");
|
||||
S3FS_PRN_ERR("Could not change mtime for mount point.");
|
||||
return -EIO;
|
||||
}
|
||||
if(0 != (result = check_parent_object_access(path, X_OK))){
|
||||
@ -2389,20 +2389,20 @@ static int readdir_multi_head(const char* path, S3ObjList& head, void* buf, fuse
|
||||
|
||||
// Multi request
|
||||
if(0 != (result = curlmulti.Request())){
|
||||
// If result is -EIO, it is somthing error occurred.
|
||||
// If result is -EIO, it is something error occurred.
|
||||
// This case includes that the object is encrypting(SSE) and s3fs does not have keys.
|
||||
// So s3fs set result to 0 in order to continue the process.
|
||||
if(-EIO == result){
|
||||
S3FS_PRN_WARN("error occuered in multi request(errno=%d), but continue...", result);
|
||||
S3FS_PRN_WARN("error occurred in multi request(errno=%d), but continue...", result);
|
||||
result = 0;
|
||||
}else{
|
||||
S3FS_PRN_ERR("error occuered in multi request(errno=%d).", result);
|
||||
S3FS_PRN_ERR("error occurred in multi request(errno=%d).", result);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// populate fuse buffer
|
||||
// here is best posision, because a case is cache size < files in directory
|
||||
// here is best position, because a case is cache size < files in directory
|
||||
//
|
||||
for(iter = fillerlist.begin(); fillerlist.end() != iter; ++iter){
|
||||
struct stat st;
|
||||
@ -2528,7 +2528,7 @@ static int list_bucket(const char* path, S3ObjList& head, const char* delimiter,
|
||||
xmlFree(tmpch);
|
||||
}else{
|
||||
// If did not specify "delimiter", s3 did not return "NextMarker".
|
||||
// On this case, can use lastest name for next marker.
|
||||
// On this case, can use last name for next marker.
|
||||
//
|
||||
string lastname;
|
||||
if(!head.GetLastName(lastname)){
|
||||
@ -3132,7 +3132,7 @@ static int s3fs_getxattr(const char* path, const char* name, char* value, size_t
|
||||
return result;
|
||||
}
|
||||
|
||||
// get headders
|
||||
// get headers
|
||||
if(0 != (result = get_object_attribute(path, NULL, &meta))){
|
||||
return result;
|
||||
}
|
||||
@ -3196,7 +3196,7 @@ static int s3fs_listxattr(const char* path, char* list, size_t size)
|
||||
return result;
|
||||
}
|
||||
|
||||
// get headders
|
||||
// get headers
|
||||
if(0 != (result = get_object_attribute(path, NULL, &meta))){
|
||||
return result;
|
||||
}
|
||||
@ -3376,7 +3376,7 @@ static void* s3fs_init(struct fuse_conn_info* conn)
|
||||
|
||||
// cache(remove cache dirs at first)
|
||||
if(is_remove_cache && (!CacheFileStat::DeleteCacheFileStatDirectory() || !FdManager::DeleteCacheDirectory())){
|
||||
S3FS_PRN_DBG("Could not inilialize cache directory.");
|
||||
S3FS_PRN_DBG("Could not initialize cache directory.");
|
||||
}
|
||||
|
||||
// ssl init
|
||||
@ -3674,7 +3674,7 @@ static int s3fs_utility_mode(void)
|
||||
S3FS_PRN_EXIT("Could not get list multipart upload.");
|
||||
result = EXIT_FAILURE;
|
||||
}else{
|
||||
// perse result(uncomplete multipart upload information)
|
||||
// parse result(incomplete multipart upload information)
|
||||
S3FS_PRN_DBG("response body = {\n%s\n}", body.c_str());
|
||||
|
||||
xmlDocPtr doc;
|
||||
@ -3714,13 +3714,13 @@ static int s3fs_utility_mode(void)
|
||||
}
|
||||
|
||||
//
|
||||
// If calling with wrong region, s3fs gets following error body as 400 erro code.
|
||||
// If calling with wrong region, s3fs gets following error body as 400 error code.
|
||||
// "<Error><Code>AuthorizationHeaderMalformed</Code><Message>The authorization header is
|
||||
// malformed; the region 'us-east-1' is wrong; expecting 'ap-northeast-1'</Message>
|
||||
// <Region>ap-northeast-1</Region><RequestId>...</RequestId><HostId>...</HostId>
|
||||
// </Error>"
|
||||
//
|
||||
// So this is cheep codes but s3fs should get correct reagion automatically.
|
||||
// So this is cheep codes but s3fs should get correct region automatically.
|
||||
//
|
||||
static bool check_region_error(const char* pbody, string& expectregion)
|
||||
{
|
||||
@ -3840,7 +3840,7 @@ static int s3fs_check_service(void)
|
||||
|
||||
// Return: 1 - OK(could read and set accesskey etc.)
|
||||
// 0 - NG(could not read)
|
||||
// -1 - Should shoutdown immidiatly
|
||||
// -1 - Should shutdown immediately
|
||||
static int check_for_aws_format(void)
|
||||
{
|
||||
size_t first_pos = string::npos;
|
||||
@ -4186,7 +4186,7 @@ static int get_access_keys(void)
|
||||
//
|
||||
// Check & Set attributes for mount point.
|
||||
//
|
||||
static int set_moutpoint_attribute(struct stat& mpst)
|
||||
static int set_mountpoint_attribute(struct stat& mpst)
|
||||
{
|
||||
mp_uid = geteuid();
|
||||
mp_gid = getegid();
|
||||
@ -4200,7 +4200,7 @@ static int set_moutpoint_attribute(struct stat& mpst)
|
||||
return true;
|
||||
}
|
||||
// check group permission
|
||||
if(mpst.st_gid == mp_gid || 1 == is_uid_inculde_group(mp_uid, mpst.st_gid)){
|
||||
if(mpst.st_gid == mp_gid || 1 == is_uid_include_group(mp_uid, mpst.st_gid)){
|
||||
if(S_IRWXG == (mpst.st_mode & S_IRWXG)){
|
||||
return true;
|
||||
}
|
||||
@ -4274,7 +4274,7 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar
|
||||
S3FS_PRN_EXIT("MOUNTPOINT: %s is not a directory.", mountpoint.c_str());
|
||||
return -1;
|
||||
}
|
||||
if(!set_moutpoint_attribute(stbuf)){
|
||||
if(!set_mountpoint_attribute(stbuf)){
|
||||
S3FS_PRN_EXIT("MOUNTPOINT: %s permission denied.", mountpoint.c_str());
|
||||
return -1;
|
||||
}
|
||||
@ -4298,7 +4298,7 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar
|
||||
return 1;
|
||||
}
|
||||
|
||||
// Unknow option
|
||||
// Unknown option
|
||||
if(0 == utility_mode){
|
||||
S3FS_PRN_EXIT("specified unknown third optioni(%s).", arg);
|
||||
}else{
|
||||
@ -4417,14 +4417,14 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar
|
||||
//
|
||||
// load_sse_c=file Load Server Side Encrypting custom keys
|
||||
//
|
||||
// AWSSSECKEYS Loaing Environment for Server Side Encrypting custom keys
|
||||
// AWSSSEKMSID Loaing Environment for Server Side Encrypting Key id
|
||||
// AWSSSECKEYS Loading Environment for Server Side Encrypting custom keys
|
||||
// AWSSSEKMSID Loading Environment for Server Side Encrypting Key id
|
||||
//
|
||||
if(0 == STR2NCMP(arg, "use_sse")){
|
||||
if(0 == strcmp(arg, "use_sse") || 0 == strcmp(arg, "use_sse=1")){ // use_sse=1 is old type paraemter
|
||||
if(0 == strcmp(arg, "use_sse") || 0 == strcmp(arg, "use_sse=1")){ // use_sse=1 is old type parameter
|
||||
// sse type is SSE_S3
|
||||
if(!S3fsCurl::IsSseDisable() && !S3fsCurl::IsSseS3Type()){
|
||||
S3FS_PRN_EXIT("already set SSE another type, so confrict use_sse option or environment.");
|
||||
S3FS_PRN_EXIT("already set SSE another type, so conflict use_sse option or environment.");
|
||||
return -1;
|
||||
}
|
||||
S3fsCurl::SetSseType(SSE_S3);
|
||||
@ -4432,11 +4432,11 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar
|
||||
}else if(0 == strcmp(arg, "use_sse=kmsid") || 0 == strcmp(arg, "use_sse=k")){
|
||||
// sse type is SSE_KMS with out kmsid(expecting id is loaded by environment)
|
||||
if(!S3fsCurl::IsSseDisable() && !S3fsCurl::IsSseKmsType()){
|
||||
S3FS_PRN_EXIT("already set SSE another type, so confrict use_sse option or environment.");
|
||||
S3FS_PRN_EXIT("already set SSE another type, so conflict use_sse option or environment.");
|
||||
return -1;
|
||||
}
|
||||
if(!S3fsCurl::IsSetSseKmsId()){
|
||||
S3FS_PRN_EXIT("use_sse=kms but not loaded kms id by environemnt.");
|
||||
S3FS_PRN_EXIT("use_sse=kms but not loaded kms id by environment.");
|
||||
return -1;
|
||||
}
|
||||
S3fsCurl::SetSseType(SSE_KMS);
|
||||
@ -4444,7 +4444,7 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar
|
||||
}else if(0 == STR2NCMP(arg, "use_sse=kmsid:") || 0 == STR2NCMP(arg, "use_sse=k:")){
|
||||
// sse type is SSE_KMS with kmsid
|
||||
if(!S3fsCurl::IsSseDisable() && !S3fsCurl::IsSseKmsType()){
|
||||
S3FS_PRN_EXIT("already set SSE another type, so confrict use_sse option or environment.");
|
||||
S3FS_PRN_EXIT("already set SSE another type, so conflict use_sse option or environment.");
|
||||
return -1;
|
||||
}
|
||||
const char* kmsid;
|
||||
@ -4460,9 +4460,9 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar
|
||||
S3fsCurl::SetSseType(SSE_KMS);
|
||||
|
||||
}else if(0 == strcmp(arg, "use_sse=custom") || 0 == strcmp(arg, "use_sse=c")){
|
||||
// sse type is SSE_C with out custom keys(expecting keays are loaded by environment or load_sse_c option)
|
||||
// sse type is SSE_C with out custom keys(expecting keys are loaded by environment or load_sse_c option)
|
||||
if(!S3fsCurl::IsSseDisable() && !S3fsCurl::IsSseCType()){
|
||||
S3FS_PRN_EXIT("already set SSE another type, so confrict use_sse option or environment.");
|
||||
S3FS_PRN_EXIT("already set SSE another type, so conflict use_sse option or environment.");
|
||||
return -1;
|
||||
}
|
||||
// [NOTE]
|
||||
@ -4473,7 +4473,7 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar
|
||||
}else if(0 == STR2NCMP(arg, "use_sse=custom:") || 0 == STR2NCMP(arg, "use_sse=c:")){
|
||||
// sse type is SSE_C with custom keys
|
||||
if(!S3fsCurl::IsSseDisable() && !S3fsCurl::IsSseCType()){
|
||||
S3FS_PRN_EXIT("already set SSE another type, so confrict use_sse option or environment.");
|
||||
S3FS_PRN_EXIT("already set SSE another type, so conflict use_sse option or environment.");
|
||||
return -1;
|
||||
}
|
||||
const char* ssecfile;
|
||||
@ -4488,7 +4488,7 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar
|
||||
}
|
||||
S3fsCurl::SetSseType(SSE_C);
|
||||
|
||||
}else if(0 == strcmp(arg, "use_sse=")){ // this type is old style(paraemter is custom key file path)
|
||||
}else if(0 == strcmp(arg, "use_sse=")){ // this type is old style(parameter is custom key file path)
|
||||
// SSE_C with custom keys.
|
||||
const char* ssecfile = &arg[strlen("use_sse=")];
|
||||
if(!S3fsCurl::SetSseCKeys(ssecfile)){
|
||||
@ -4499,7 +4499,7 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar
|
||||
|
||||
}else{
|
||||
// never come here.
|
||||
S3FS_PRN_EXIT("something wrong use_sse optino.");
|
||||
S3FS_PRN_EXIT("something wrong use_sse option.");
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
@ -4796,7 +4796,7 @@ int main(int argc, char* argv[])
|
||||
xmlInitParser();
|
||||
LIBXML_TEST_VERSION
|
||||
|
||||
// get progam name - emulate basename
|
||||
// get program name - emulate basename
|
||||
size_t found = string::npos;
|
||||
program_name.assign(argv[0]);
|
||||
found = program_name.find_last_of("/");
|
||||
|
@ -46,7 +46,7 @@
|
||||
using namespace std;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Global valiables
|
||||
// Global variables
|
||||
//-------------------------------------------------------------------
|
||||
std::string mount_prefix = "";
|
||||
|
||||
@ -110,7 +110,7 @@ bool S3ObjList::insert(const char* name, const char* etag, bool is_dir)
|
||||
if(objects.end() != (iter = objects.find(chkname))){
|
||||
// found "dir/" object --> not add new object.
|
||||
// and add normalization
|
||||
return insert_nomalized(orgname.c_str(), chkname.c_str(), true);
|
||||
return insert_normalized(orgname.c_str(), chkname.c_str(), true);
|
||||
}
|
||||
}
|
||||
|
||||
@ -135,10 +135,10 @@ bool S3ObjList::insert(const char* name, const char* etag, bool is_dir)
|
||||
}
|
||||
|
||||
// add normalization
|
||||
return insert_nomalized(orgname.c_str(), newname.c_str(), is_dir);
|
||||
return insert_normalized(orgname.c_str(), newname.c_str(), is_dir);
|
||||
}
|
||||
|
||||
bool S3ObjList::insert_nomalized(const char* name, const char* normalized, bool is_dir)
|
||||
bool S3ObjList::insert_normalized(const char* name, const char* normalized, bool is_dir)
|
||||
{
|
||||
if(!name || '\0' == name[0] || !normalized || '\0' == normalized[0]){
|
||||
return false;
|
||||
@ -441,7 +441,7 @@ AutoLock::~AutoLock()
|
||||
// get user name from uid
|
||||
string get_username(uid_t uid)
|
||||
{
|
||||
static size_t maxlen = 0; // set onece
|
||||
static size_t maxlen = 0; // set once
|
||||
char* pbuf;
|
||||
struct passwd pwinfo;
|
||||
struct passwd* ppwinfo = NULL;
|
||||
@ -476,9 +476,9 @@ string get_username(uid_t uid)
|
||||
return name;
|
||||
}
|
||||
|
||||
int is_uid_inculde_group(uid_t uid, gid_t gid)
|
||||
int is_uid_include_group(uid_t uid, gid_t gid)
|
||||
{
|
||||
static size_t maxlen = 0; // set onece
|
||||
static size_t maxlen = 0; // set once
|
||||
int result;
|
||||
char* pbuf;
|
||||
struct group ginfo;
|
||||
@ -599,7 +599,7 @@ bool check_exist_dir_permission(const char* dirpath)
|
||||
// could not access directory
|
||||
return false;
|
||||
}
|
||||
// somthing error occured
|
||||
// something error occurred
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -616,7 +616,7 @@ bool check_exist_dir_permission(const char* dirpath)
|
||||
return false;
|
||||
}
|
||||
}else{
|
||||
if(1 == is_uid_inculde_group(myuid, st.st_gid)){
|
||||
if(1 == is_uid_include_group(myuid, st.st_gid)){
|
||||
if(S_IRWXG != (st.st_mode & S_IRWXG)){
|
||||
return false;
|
||||
}
|
||||
@ -959,7 +959,7 @@ void show_help (void)
|
||||
" with \":\" separator.) This option is used to decide the\n"
|
||||
" SSE type. So that if you do not want to encrypt a object\n"
|
||||
" object at uploading, but you need to decrypt encrypted\n"
|
||||
" object at downloaing, you can use load_sse_c option instead\n"
|
||||
" object at downloading, you can use load_sse_c option instead\n"
|
||||
" of this option.\n"
|
||||
" For setting SSE-KMS, specify \"use_sse=kmsid\" or\n"
|
||||
" \"use_sse=kmsid:<kms id>\". You can use \"k\" for short \"kmsid\".\n"
|
||||
@ -971,9 +971,9 @@ void show_help (void)
|
||||
" region.\n"
|
||||
"\n"
|
||||
" load_sse_c - specify SSE-C keys\n"
|
||||
" Specify the custom-provided encription keys file path for decrypting\n"
|
||||
" at duwnloading.\n"
|
||||
" If you use the custom-provided encription key at uploading, you\n"
|
||||
" Specify the custom-provided encryption keys file path for decrypting\n"
|
||||
" at downloading.\n"
|
||||
" If you use the custom-provided encryption key at uploading, you\n"
|
||||
" specify with \"use_sse=custom\". The file has many lines, one line\n"
|
||||
" means one custom key. So that you can keep all SSE-C keys in file,\n"
|
||||
" that is SSE-C key history. AWSSSECKEYS environment is as same as this\n"
|
||||
@ -1131,7 +1131,7 @@ void show_help (void)
|
||||
" nocopyapi, then s3fs ignores it.\n"
|
||||
"\n"
|
||||
" use_path_request_style (use legacy API calling style)\n"
|
||||
" Enble compatibility with S3-like APIs which do not support\n"
|
||||
" Enable compatibility with S3-like APIs which do not support\n"
|
||||
" the virtual-host request style, by using the older path request\n"
|
||||
" style.\n"
|
||||
"\n"
|
||||
@ -1169,7 +1169,7 @@ void show_help (void)
|
||||
" -d --debug Turn on DEBUG messages to syslog. Specifying -d\n"
|
||||
" twice turns on FUSE debug messages to STDOUT.\n"
|
||||
" -f FUSE foreground option - do not run as daemon.\n"
|
||||
" -s FUSE singlethread option\n"
|
||||
" -s FUSE singlethreaded option\n"
|
||||
" disable multi-threaded operation\n"
|
||||
"\n"
|
||||
"\n"
|
||||
|
@ -27,7 +27,7 @@
|
||||
// Struct
|
||||
//
|
||||
struct s3obj_entry{
|
||||
std::string normalname; // normalized name: if empty, object is nomalized name.
|
||||
std::string normalname; // normalized name: if empty, object is normalized name.
|
||||
std::string orgname; // original name: if empty, object is original name.
|
||||
std::string etag;
|
||||
bool is_dir;
|
||||
@ -47,7 +47,7 @@ class S3ObjList
|
||||
s3obj_t objects;
|
||||
|
||||
private:
|
||||
bool insert_nomalized(const char* name, const char* normalized, bool is_dir);
|
||||
bool insert_normalized(const char* name, const char* normalized, bool is_dir);
|
||||
const s3obj_entry* GetS3Obj(const char* name) const;
|
||||
|
||||
s3obj_t::const_iterator begin(void) const {
|
||||
@ -104,7 +104,7 @@ MVNODE *add_mvnode(MVNODE** head, MVNODE** tail, const char *old_path, const cha
|
||||
void free_mvnodes(MVNODE *head);
|
||||
|
||||
std::string get_username(uid_t uid);
|
||||
int is_uid_inculde_group(uid_t uid, gid_t gid);
|
||||
int is_uid_include_group(uid_t uid, gid_t gid);
|
||||
|
||||
std::string mydirname(const char* path);
|
||||
std::string mydirname(std::string path);
|
||||
|
@ -50,7 +50,7 @@ export S3_URL
|
||||
export TEST_SCRIPT_DIR=`pwd`
|
||||
export TEST_BUCKET_MOUNT_POINT_1=${TEST_BUCKET_1}
|
||||
|
||||
S3PROXY_VERSION="1.4.0"
|
||||
S3PROXY_VERSION="1.5.1"
|
||||
S3PROXY_BINARY=${S3PROXY_BINARY-"s3proxy-${S3PROXY_VERSION}"}
|
||||
|
||||
if [ ! -f "$S3FS_CREDENTIALS_FILE" ]
|
||||
@ -157,8 +157,6 @@ function start_s3fs {
|
||||
#
|
||||
# TODO: Allow all these options to be overriden with env variables
|
||||
#
|
||||
# sigv2
|
||||
# Historically because S3Proxy only supports sigv2.
|
||||
# use_path_request_style
|
||||
# The test env doesn't have virtual hosts
|
||||
# createbucket
|
||||
@ -181,7 +179,6 @@ function start_s3fs {
|
||||
${VALGRIND_EXEC} ${S3FS} \
|
||||
$TEST_BUCKET_1 \
|
||||
$TEST_BUCKET_MOUNT_POINT_1 \
|
||||
-o sigv2 \
|
||||
-o use_path_request_style \
|
||||
-o url=${S3_URL} \
|
||||
-o createbucket \
|
||||
|
@ -42,7 +42,7 @@ function test_truncate_file {
|
||||
}
|
||||
|
||||
function test_truncate_empty_file {
|
||||
echo "Testing truncate empty file ..."
|
||||
describe "Testing truncate empty file ..."
|
||||
# Write an empty test file
|
||||
touch ${TEST_TEXT_FILE}
|
||||
|
||||
@ -412,11 +412,11 @@ function add_all_tests {
|
||||
# TODO: broken: https://github.com/s3fs-fuse/s3fs-fuse/issues/145
|
||||
#add_tests test_rename_before_close
|
||||
add_tests test_multipart_upload
|
||||
# TODO: test disabled until S3Proxy 1.5.0 is released
|
||||
#add_tests test_multipart_copy
|
||||
add_tests test_multipart_copy
|
||||
add_tests test_special_characters
|
||||
add_tests test_symlink
|
||||
add_tests test_extended_attributes
|
||||
add_tests test_mtime_file
|
||||
add_tests test_rm_rf_dir
|
||||
add_tests test_write_after_seek_ahead
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
s3proxy.endpoint=http://127.0.0.1:8080
|
||||
s3proxy.authorization=aws-v2
|
||||
s3proxy.authorization=aws-v4
|
||||
s3proxy.identity=local-identity
|
||||
s3proxy.credential=local-credential
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
# S3FS: Samlpe ahbe_conf parameter file.
|
||||
# S3FS: Sample ahbe_conf parameter file.
|
||||
#
|
||||
# This file is configuration file for additional header by extension(ahbe).
|
||||
# s3fs loads this file at starting.
|
||||
|
Loading…
Reference in New Issue
Block a user