From b5ffd419d82b00a1499248aa6a6669adf70d9cdf Mon Sep 17 00:00:00 2001 From: Takeshi Nakatani Date: Sat, 22 Aug 2020 12:40:53 +0000 Subject: [PATCH] Source file division and set 4 spaces and cleanup --- Makefile.am | 9 + autogen.sh | 8 + configure.ac | 8 + src/Makefile.am | 51 +- src/addhead.cpp | 383 +- src/addhead.h | 53 +- src/autolock.cpp | 82 + src/autolock.h | 60 + src/bodydata.cpp | 124 + src/bodydata.h | 72 + src/cache.cpp | 1096 ++-- src/cache.h | 219 +- src/common.h | 180 +- src/common_auth.cpp | 76 +- src/curl.cpp | 6794 ++++++++++------------ src/curl.h | 788 +-- src/curl_handlerpool.cpp | 129 + src/curl_handlerpool.h | 64 + src/curl_multi.cpp | 344 ++ src/curl_multi.h | 79 + src/curl_util.cpp | 397 ++ src/curl_util.h | 57 + src/fdcache.cpp | 3890 ++----------- src/fdcache.h | 302 +- src/fdcache_entity.cpp | 1536 +++++ src/fdcache_entity.h | 124 + src/fdcache_page.cpp | 925 +++ src/fdcache_page.h | 130 + src/fdcache_stat.cpp | 282 + src/fdcache_stat.h | 64 + src/gnutls_auth.cpp | 435 +- src/metaheader.cpp | 322 ++ src/metaheader.h | 72 + src/mpu_util.cpp | 161 + src/mpu_util.h | 64 + src/mvnode.cpp | 142 + src/mvnode.h | 53 + src/nss_auth.cpp | 261 +- src/openssl_auth.cpp | 379 +- src/psemaphore.h | 76 +- src/s3fs.cpp | 8687 +++++++++++++--------------- src/s3fs.h | 29 +- src/s3fs_auth.h | 9 +- src/s3fs_global.cpp | 51 + src/s3fs_help.cpp | 524 ++ src/s3fs_help.h | 40 + src/s3fs_logger.cpp | 36 + src/s3fs_logger.h | 154 + src/s3fs_util.cpp | 1692 +----- src/s3fs_util.h | 119 +- src/s3fs_xml.cpp | 499 ++ src/s3fs_xml.h | 53 + src/s3objlist.cpp | 286 + src/s3objlist.h | 79 + src/sighandlers.cpp | 323 +- src/sighandlers.h | 52 +- src/string_util.cpp | 803 +-- src/string_util.h | 67 +- src/test_string_util.cpp | 153 +- src/test_util.h | 90 +- src/types.h | 397 +- test/Makefile.am | 22 +- test/integration-test-common.sh | 28 + test/integration-test-main.sh | 28 + test/mergedir.sh | 27 +- test/require-root.sh | 28 + test/run_tests_using_sanitizers.sh | 28 + test/sample_delcache.sh | 117 +- test/small-integration-test.sh | 28 + test/test-utils.sh | 28 + test/ut_test.py | 27 + test/write_multiple_offsets.py | 28 + 72 files changed, 18206 insertions(+), 16617 deletions(-) create mode 100644 src/autolock.cpp create mode 100644 src/autolock.h create mode 100644 src/bodydata.cpp create mode 100644 src/bodydata.h create mode 100644 src/curl_handlerpool.cpp create mode 100644 src/curl_handlerpool.h create mode 100644 src/curl_multi.cpp create mode 100644 src/curl_multi.h create mode 100644 src/curl_util.cpp create mode 100644 src/curl_util.h create mode 100644 src/fdcache_entity.cpp create mode 100644 src/fdcache_entity.h create mode 100644 src/fdcache_page.cpp create mode 100644 src/fdcache_page.h create mode 100644 src/fdcache_stat.cpp create mode 100644 src/fdcache_stat.h create mode 100644 src/metaheader.cpp create mode 100644 src/metaheader.h create mode 100644 src/mpu_util.cpp create mode 100644 src/mpu_util.h create mode 100644 src/mvnode.cpp create mode 100644 src/mvnode.h create mode 100644 src/s3fs_global.cpp create mode 100644 src/s3fs_help.cpp create mode 100644 src/s3fs_help.h create mode 100644 src/s3fs_logger.cpp create mode 100644 src/s3fs_logger.h create mode 100644 src/s3fs_xml.cpp create mode 100644 src/s3fs_xml.h create mode 100644 src/s3objlist.cpp create mode 100644 src/s3objlist.h diff --git a/Makefile.am b/Makefile.am index bf2bf14..3877d0b 100644 --- a/Makefile.am +++ b/Makefile.am @@ -43,3 +43,12 @@ cppcheck: --suppress=missingIncludeSystem \ --suppress=unmatchedSuppression \ src/ test/ + +# +# Local variables: +# tab-width: 4 +# c-basic-offset: 4 +# End: +# vim600: expandtab sw=4 ts= fdm=marker +# vim<600: expandtab sw=4 ts=4 +# diff --git a/autogen.sh b/autogen.sh index ae16921..700fc7d 100755 --- a/autogen.sh +++ b/autogen.sh @@ -44,3 +44,11 @@ echo "--- Finished autotools ----------" exit 0 +# +# Local variables: +# tab-width: 4 +# c-basic-offset: 4 +# End: +# vim600: expandtab sw=4 ts= fdm=marker +# vim<600: expandtab sw=4 ts=4 +# diff --git a/configure.ac b/configure.ac index 0cd4956..1cb5d69 100644 --- a/configure.ac +++ b/configure.ac @@ -341,3 +341,11 @@ dnl ---------------------------------------------- dnl end configuration dnl ---------------------------------------------- +# +# Local variables: +# tab-width: 4 +# c-basic-offset: 4 +# End: +# vim600: expandtab sw=4 ts= fdm=marker +# vim<600: expandtab sw=4 ts=4 +# diff --git a/src/Makefile.am b/src/Makefile.am index 66e9490..237a82e 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -21,27 +21,43 @@ bin_PROGRAMS=s3fs AM_CPPFLAGS = $(DEPS_CFLAGS) if USE_GNUTLS_NETTLE - AM_CPPFLAGS += -DUSE_GNUTLS_NETTLE + AM_CPPFLAGS += -DUSE_GNUTLS_NETTLE endif s3fs_SOURCES = \ - s3fs.cpp \ - curl.cpp \ - cache.cpp \ - string_util.cpp \ - s3fs_util.cpp \ - fdcache.cpp \ - common_auth.cpp \ - addhead.cpp \ - sighandlers.cpp + s3fs.cpp \ + s3fs_global.cpp \ + s3fs_help.cpp \ + s3fs_logger.cpp \ + s3fs_xml.cpp \ + metaheader.cpp \ + mpu_util.cpp \ + mvnode.cpp \ + curl.cpp \ + curl_handlerpool.cpp \ + curl_multi.cpp \ + curl_util.cpp \ + bodydata.cpp \ + s3objlist.cpp \ + cache.cpp \ + string_util.cpp \ + s3fs_util.cpp \ + fdcache.cpp \ + fdcache_entity.cpp \ + fdcache_page.cpp \ + fdcache_stat.cpp \ + addhead.cpp \ + sighandlers.cpp \ + autolock.cpp \ + common_auth.cpp if USE_SSL_OPENSSL - s3fs_SOURCES += openssl_auth.cpp + s3fs_SOURCES += openssl_auth.cpp endif if USE_SSL_GNUTLS - s3fs_SOURCES += gnutls_auth.cpp + s3fs_SOURCES += gnutls_auth.cpp endif if USE_SSL_NSS - s3fs_SOURCES += nss_auth.cpp + s3fs_SOURCES += nss_auth.cpp endif s3fs_LDADD = $(DEPS_LIBS) @@ -54,3 +70,12 @@ TESTS = test_string_util clang-tidy: clang-tidy $(s3fs_SOURCES) -- $(DEPS_CFLAGS) $(CPPFLAGS) + +# +# Local variables: +# tab-width: 4 +# c-basic-offset: 4 +# End: +# vim600: expandtab sw=4 ts= fdm=marker +# vim<600: expandtab sw=4 ts=4 +# diff --git a/src/addhead.cpp b/src/addhead.cpp index 19a44cc..0712f07 100644 --- a/src/addhead.cpp +++ b/src/addhead.cpp @@ -20,20 +20,13 @@ #include #include -#include -#include -#include #include #include -#include -#include -#include -#include #include "common.h" -#include "addhead.h" -#include "curl.h" #include "s3fs.h" +#include "addhead.h" +#include "curl_util.h" using namespace std; @@ -52,236 +45,236 @@ AdditionalHeader AdditionalHeader::singleton; //------------------------------------------------------------------- AdditionalHeader::AdditionalHeader() { - if(this == AdditionalHeader::get()){ - is_enable = false; - }else{ - abort(); - } + if(this == AdditionalHeader::get()){ + is_enable = false; + }else{ + abort(); + } } AdditionalHeader::~AdditionalHeader() { - if(this == AdditionalHeader::get()){ - Unload(); - }else{ - abort(); - } + if(this == AdditionalHeader::get()){ + Unload(); + }else{ + abort(); + } } bool AdditionalHeader::Load(const char* file) { - if(!file){ - S3FS_PRN_WARN("file is NULL."); - return false; - } - Unload(); + if(!file){ + S3FS_PRN_WARN("file is NULL."); + return false; + } + Unload(); - ifstream AH(file); - if(!AH.good()){ - S3FS_PRN_WARN("Could not open file(%s).", file); - return false; - } - - // read file - string line; - ADDHEAD *paddhead; - while(getline(AH, line)){ - if('#' == line[0]){ - continue; - } - if(line.empty()){ - continue; - } - // load a line - istringstream ss(line); - string key; // suffix(key) - string head; // additional HTTP header - string value; // header value - if(0 == isblank(line[0])){ - ss >> key; - } - if(ss){ - ss >> head; - if(ss && static_cast(ss.tellg()) < line.size()){ - value = line.substr(static_cast(ss.tellg()) + 1); - } + ifstream AH(file); + if(!AH.good()){ + S3FS_PRN_WARN("Could not open file(%s).", file); + return false; } - // check it - if(head.empty()){ - if(key.empty()){ - continue; - } - S3FS_PRN_ERR("file format error: %s key(suffix) is no HTTP header value.", key.c_str()); - Unload(); - return false; + // read file + string line; + ADDHEAD *paddhead; + while(getline(AH, line)){ + if('#' == line[0]){ + continue; + } + if(line.empty()){ + continue; + } + // load a line + istringstream ss(line); + string key; // suffix(key) + string head; // additional HTTP header + string value; // header value + if(0 == isblank(line[0])){ + ss >> key; + } + if(ss){ + ss >> head; + if(ss && static_cast(ss.tellg()) < line.size()){ + value = line.substr(static_cast(ss.tellg()) + 1); + } + } + + // check it + if(head.empty()){ + if(key.empty()){ + continue; + } + S3FS_PRN_ERR("file format error: %s key(suffix) is no HTTP header value.", key.c_str()); + Unload(); + return false; + } + + paddhead = new ADDHEAD; + if(0 == strncasecmp(key.c_str(), ADD_HEAD_REGEX, strlen(ADD_HEAD_REGEX))){ + // regex + if(key.size() <= strlen(ADD_HEAD_REGEX)){ + S3FS_PRN_ERR("file format error: %s key(suffix) does not have key string.", key.c_str()); + delete paddhead; + continue; + } + key = key.substr(strlen(ADD_HEAD_REGEX)); + + // compile + regex_t* preg = new regex_t; + int result; + if(0 != (result = regcomp(preg, key.c_str(), REG_EXTENDED | REG_NOSUB))){ // we do not need matching info + char errbuf[256]; + regerror(result, preg, errbuf, sizeof(errbuf)); + S3FS_PRN_ERR("failed to compile regex from %s key by %s.", key.c_str(), errbuf); + delete preg; + delete paddhead; + continue; + } + + // set + paddhead->pregex = preg; + paddhead->basestring = key; + paddhead->headkey = head; + paddhead->headvalue = value; + + }else{ + // not regex, directly comparing + paddhead->pregex = NULL; + paddhead->basestring = key; + paddhead->headkey = head; + paddhead->headvalue = value; + } + + // add list + addheadlist.push_back(paddhead); + + // set flag + if(!is_enable){ + is_enable = true; + } } - - paddhead = new ADDHEAD; - if(0 == strncasecmp(key.c_str(), ADD_HEAD_REGEX, strlen(ADD_HEAD_REGEX))){ - // regex - if(key.size() <= strlen(ADD_HEAD_REGEX)){ - S3FS_PRN_ERR("file format error: %s key(suffix) does not have key string.", key.c_str()); - delete paddhead; - continue; - } - key = key.substr(strlen(ADD_HEAD_REGEX)); - - // compile - regex_t* preg = new regex_t; - int result; - if(0 != (result = regcomp(preg, key.c_str(), REG_EXTENDED | REG_NOSUB))){ // we do not need matching info - char errbuf[256]; - regerror(result, preg, errbuf, sizeof(errbuf)); - S3FS_PRN_ERR("failed to compile regex from %s key by %s.", key.c_str(), errbuf); - delete preg; - delete paddhead; - continue; - } - - // set - paddhead->pregex = preg; - paddhead->basestring = key; - paddhead->headkey = head; - paddhead->headvalue = value; - - }else{ - // not regex, directly comparing - paddhead->pregex = NULL; - paddhead->basestring = key; - paddhead->headkey = head; - paddhead->headvalue = value; - } - - // add list - addheadlist.push_back(paddhead); - - // set flag - if(!is_enable){ - is_enable = true; - } - } - return true; + return true; } void AdditionalHeader::Unload() { - is_enable = false; + is_enable = false; - for(addheadlist_t::iterator iter = addheadlist.begin(); iter != addheadlist.end(); ++iter){ - ADDHEAD *paddhead = *iter; - if(paddhead){ - if(paddhead->pregex){ - regfree(paddhead->pregex); - delete paddhead->pregex; - } - delete paddhead; + for(addheadlist_t::iterator iter = addheadlist.begin(); iter != addheadlist.end(); ++iter){ + ADDHEAD *paddhead = *iter; + if(paddhead){ + if(paddhead->pregex){ + regfree(paddhead->pregex); + delete paddhead->pregex; + } + delete paddhead; + } } - } - addheadlist.clear(); + addheadlist.clear(); } bool AdditionalHeader::AddHeader(headers_t& meta, const char* path) const { - if(!is_enable){ - return true; - } - if(!path){ - S3FS_PRN_WARN("path is NULL."); - return false; - } - - size_t pathlength = strlen(path); - - // loop - // - // [NOTE] - // Because to allow duplicate key, and then scanning the entire table. - // - for(addheadlist_t::const_iterator iter = addheadlist.begin(); iter != addheadlist.end(); ++iter){ - const ADDHEAD *paddhead = *iter; - if(!paddhead){ - continue; + if(!is_enable){ + return true; + } + if(!path){ + S3FS_PRN_WARN("path is NULL."); + return false; } - if(paddhead->pregex){ - // regex - regmatch_t match; // not use - if(0 == regexec(paddhead->pregex, path, 1, &match, 0)){ - // match -> adding header - meta[paddhead->headkey] = paddhead->headvalue; - } - }else{ - // directly comparing - if(paddhead->basestring.length() < pathlength){ - if(0 == paddhead->basestring.length() || 0 == strcmp(&path[pathlength - paddhead->basestring.length()], paddhead->basestring.c_str())){ - // match -> adding header - meta[paddhead->headkey] = paddhead->headvalue; + size_t pathlength = strlen(path); + + // loop + // + // [NOTE] + // Because to allow duplicate key, and then scanning the entire table. + // + for(addheadlist_t::const_iterator iter = addheadlist.begin(); iter != addheadlist.end(); ++iter){ + const ADDHEAD *paddhead = *iter; + if(!paddhead){ + continue; + } + + if(paddhead->pregex){ + // regex + regmatch_t match; // not use + if(0 == regexec(paddhead->pregex, path, 1, &match, 0)){ + // match -> adding header + meta[paddhead->headkey] = paddhead->headvalue; + } + }else{ + // directly comparing + if(paddhead->basestring.length() < pathlength){ + if(0 == paddhead->basestring.length() || 0 == strcmp(&path[pathlength - paddhead->basestring.length()], paddhead->basestring.c_str())){ + // match -> adding header + meta[paddhead->headkey] = paddhead->headvalue; + } + } } - } } - } - return true; + return true; } struct curl_slist* AdditionalHeader::AddHeader(struct curl_slist* list, const char* path) const { - headers_t meta; + headers_t meta; - if(!AddHeader(meta, path)){ + if(!AddHeader(meta, path)){ + return list; + } + for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){ + // Adding header + list = curl_slist_sort_insert(list, iter->first.c_str(), iter->second.c_str()); + } + meta.clear(); + S3FS_MALLOCTRIM(0); return list; - } - for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){ - // Adding header - list = curl_slist_sort_insert(list, iter->first.c_str(), iter->second.c_str()); - } - meta.clear(); - S3FS_MALLOCTRIM(0); - return list; } bool AdditionalHeader::Dump() const { - if(!IS_S3FS_LOG_DBG()){ - return true; - } - - ostringstream ssdbg; - int cnt = 1; - - ssdbg << "Additional Header list[" << addheadlist.size() << "] = {" << endl; - - for(addheadlist_t::const_iterator iter = addheadlist.begin(); iter != addheadlist.end(); ++iter, ++cnt){ - const ADDHEAD *paddhead = *iter; - - ssdbg << " [" << cnt << "] = {" << endl; - - if(paddhead){ - if(paddhead->pregex){ - ssdbg << " type\t\t--->\tregex" << endl; - }else{ - ssdbg << " type\t\t--->\tsuffix matching" << endl; - } - ssdbg << " base string\t--->\t" << paddhead->basestring << endl; - ssdbg << " add header\t--->\t" << paddhead->headkey << ": " << paddhead->headvalue << endl; + if(!IS_S3FS_LOG_DBG()){ + return true; + } + + ostringstream ssdbg; + int cnt = 1; + + ssdbg << "Additional Header list[" << addheadlist.size() << "] = {" << endl; + + for(addheadlist_t::const_iterator iter = addheadlist.begin(); iter != addheadlist.end(); ++iter, ++cnt){ + const ADDHEAD *paddhead = *iter; + + ssdbg << " [" << cnt << "] = {" << endl; + + if(paddhead){ + if(paddhead->pregex){ + ssdbg << " type\t\t--->\tregex" << endl; + }else{ + ssdbg << " type\t\t--->\tsuffix matching" << endl; + } + ssdbg << " base string\t--->\t" << paddhead->basestring << endl; + ssdbg << " add header\t--->\t" << paddhead->headkey << ": " << paddhead->headvalue << endl; + } + ssdbg << " }" << endl; } - ssdbg << " }" << endl; - } - ssdbg << "}" << endl; + ssdbg << "}" << endl; - // print all - S3FS_PRN_DBG("%s", ssdbg.str().c_str()); + // print all + S3FS_PRN_DBG("%s", ssdbg.str().c_str()); - return true; + return true; } /* * Local variables: -* tab-width: 2 -* c-basic-offset: 2 +* tab-width: 4 +* c-basic-offset: 4 * End: -* vim600: expandtab sw=2 ts=2 fdm=marker -* vim<600: expandtab sw=2 ts=2 +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 */ diff --git a/src/addhead.h b/src/addhead.h index 350ef79..693c795 100644 --- a/src/addhead.h +++ b/src/addhead.h @@ -23,48 +23,53 @@ #include +#include "metaheader.h" + //---------------------------------------------- -// class AdditionalHeader +// Structure / Typedef //---------------------------------------------- typedef struct add_header{ - regex_t* pregex; // not NULL means using regex, NULL means comparing suffix directly. - std::string basestring; - std::string headkey; - std::string headvalue; + regex_t* pregex; // not NULL means using regex, NULL means comparing suffix directly. + std::string basestring; + std::string headkey; + std::string headvalue; }ADDHEAD; typedef std::vector addheadlist_t; +//---------------------------------------------- +// Class AdditionalHeader +//---------------------------------------------- class AdditionalHeader { - private: - static AdditionalHeader singleton; - bool is_enable; - addheadlist_t addheadlist; + private: + static AdditionalHeader singleton; + bool is_enable; + addheadlist_t addheadlist; - protected: - AdditionalHeader(); - ~AdditionalHeader(); + protected: + AdditionalHeader(); + ~AdditionalHeader(); - public: - // Reference singleton - static AdditionalHeader* get(void) { return &singleton; } + public: + // Reference singleton + static AdditionalHeader* get(void) { return &singleton; } - bool Load(const char* file); - void Unload(void); + bool Load(const char* file); + void Unload(void); - bool AddHeader(headers_t& meta, const char* path) const; - struct curl_slist* AddHeader(struct curl_slist* list, const char* path) const; - bool Dump(void) const; + bool AddHeader(headers_t& meta, const char* path) const; + struct curl_slist* AddHeader(struct curl_slist* list, const char* path) const; + bool Dump(void) const; }; #endif // S3FS_ADDHEAD_H_ /* * Local variables: -* tab-width: 2 -* c-basic-offset: 2 +* tab-width: 4 +* c-basic-offset: 4 * End: -* vim600: expandtab sw=2 ts=2 fdm=marker -* vim<600: expandtab sw=2 ts=2 +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 */ diff --git a/src/autolock.cpp b/src/autolock.cpp new file mode 100644 index 0000000..4152845 --- /dev/null +++ b/src/autolock.cpp @@ -0,0 +1,82 @@ +/* + * s3fs - FUSE-based file system backed by Amazon S3 + * + * Copyright(C) 2007 Takeshi Nakatani + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include +#include +#include + +#include "common.h" +#include "s3fs.h" +#include "autolock.h" + +using namespace std; + +//------------------------------------------------------------------- +// Class AutoLock +//------------------------------------------------------------------- +AutoLock::AutoLock(pthread_mutex_t* pmutex, Type type) : auto_mutex(pmutex) +{ + if (type == ALREADY_LOCKED) { + is_lock_acquired = false; + } else if (type == NO_WAIT) { + int res = pthread_mutex_trylock(auto_mutex); + if(res == 0){ + is_lock_acquired = true; + }else if(res == EBUSY){ + is_lock_acquired = false; + }else{ + S3FS_PRN_CRIT("pthread_mutex_trylock returned: %d", res); + abort(); + } + } else { + int res = pthread_mutex_lock(auto_mutex); + if(res == 0){ + is_lock_acquired = true; + }else{ + S3FS_PRN_CRIT("pthread_mutex_lock returned: %d", res); + abort(); + } + } +} + +bool AutoLock::isLockAcquired() const +{ + return is_lock_acquired; +} + +AutoLock::~AutoLock() +{ + if (is_lock_acquired) { + int res = pthread_mutex_unlock(auto_mutex); + if(res != 0){ + S3FS_PRN_CRIT("pthread_mutex_lock returned: %d", res); + abort(); + } + } +} + +/* +* Local variables: +* tab-width: 4 +* c-basic-offset: 4 +* End: +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 +*/ diff --git a/src/autolock.h b/src/autolock.h new file mode 100644 index 0000000..8a61d17 --- /dev/null +++ b/src/autolock.h @@ -0,0 +1,60 @@ +/* + * s3fs - FUSE-based file system backed by Amazon S3 + * + * Copyright(C) 2007 Randy Rizun + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef S3FS_AUTOLOCK_H_ +#define S3FS_AUTOLOCK_H_ + +#include + +//------------------------------------------------------------------- +// AutoLock Class +//------------------------------------------------------------------- +class AutoLock +{ + public: + enum Type { + NO_WAIT = 1, + ALREADY_LOCKED = 2, + NONE = 0 + }; + + private: + pthread_mutex_t* const auto_mutex; + bool is_lock_acquired; + + private: + AutoLock(const AutoLock&); + + public: + explicit AutoLock(pthread_mutex_t* pmutex, Type type = NONE); + ~AutoLock(); + bool isLockAcquired() const; +}; + +#endif // S3FS_AUTOLOCK_H_ + +/* +* Local variables: +* tab-width: 4 +* c-basic-offset: 4 +* End: +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 +*/ diff --git a/src/bodydata.cpp b/src/bodydata.cpp new file mode 100644 index 0000000..3fb4faf --- /dev/null +++ b/src/bodydata.cpp @@ -0,0 +1,124 @@ +/* + * s3fs - FUSE-based file system backed by Amazon S3 + * + * Copyright(C) 2007 Randy Rizun + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include +#include + +#include "common.h" +#include "s3fs.h" +#include "bodydata.h" + +using namespace std; + +//------------------------------------------------------------------- +// Variables +//------------------------------------------------------------------- +static const int BODYDATA_RESIZE_APPEND_MIN = 1024; +static const int BODYDATA_RESIZE_APPEND_MID = 1024 * 1024; +static const int BODYDATA_RESIZE_APPEND_MAX = 10 * 1024 * 1024; + +//------------------------------------------------------------------- +// Utility Functions +//------------------------------------------------------------------- +static size_t adjust_block(size_t bytes, size_t block) +{ + return ((bytes / block) + ((bytes % block) ? 1 : 0)) * block; +} + +//------------------------------------------------------------------- +// Class BodyData +//------------------------------------------------------------------- +bool BodyData::Resize(size_t addbytes) +{ + if(IsSafeSize(addbytes)){ + return true; + } + + // New size + size_t need_size = adjust_block((lastpos + addbytes + 1) - bufsize, sizeof(off_t)); + + if(BODYDATA_RESIZE_APPEND_MAX < bufsize){ + need_size = (BODYDATA_RESIZE_APPEND_MAX < need_size ? need_size : BODYDATA_RESIZE_APPEND_MAX); + }else if(BODYDATA_RESIZE_APPEND_MID < bufsize){ + need_size = (BODYDATA_RESIZE_APPEND_MID < need_size ? need_size : BODYDATA_RESIZE_APPEND_MID); + }else if(BODYDATA_RESIZE_APPEND_MIN < bufsize){ + need_size = ((bufsize * 2) < need_size ? need_size : (bufsize * 2)); + }else{ + need_size = (BODYDATA_RESIZE_APPEND_MIN < need_size ? need_size : BODYDATA_RESIZE_APPEND_MIN); + } + // realloc + char* newtext; + if(NULL == (newtext = (char*)realloc(text, (bufsize + need_size)))){ + S3FS_PRN_CRIT("not enough memory (realloc returned NULL)"); + free(text); + text = NULL; + return false; + } + text = newtext; + bufsize += need_size; + + return true; +} + +void BodyData::Clear() +{ + if(text){ + free(text); + text = NULL; + } + lastpos = 0; + bufsize = 0; +} + +bool BodyData::Append(void* ptr, size_t bytes) +{ + if(!ptr){ + return false; + } + if(0 == bytes){ + return true; + } + if(!Resize(bytes)){ + return false; + } + memcpy(&text[lastpos], ptr, bytes); + lastpos += bytes; + text[lastpos] = '\0'; + + return true; +} + +const char* BodyData::str() const +{ + if(!text){ + static const char* strnull = ""; + return strnull; + } + return text; +} + +/* +* Local variables: +* tab-width: 4 +* c-basic-offset: 4 +* End: +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 +*/ diff --git a/src/bodydata.h b/src/bodydata.h new file mode 100644 index 0000000..1a54b7d --- /dev/null +++ b/src/bodydata.h @@ -0,0 +1,72 @@ +/* + * s3fs - FUSE-based file system backed by Amazon S3 + * + * Copyright(C) 2007 Randy Rizun + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef S3FS_BODYDATA_H_ +#define S3FS_BODYDATA_H_ + +//---------------------------------------------- +// Class BodyData +//---------------------------------------------- +// memory class for curl write memory callback +// +class BodyData +{ + private: + char* text; + size_t lastpos; + size_t bufsize; + + private: + bool IsSafeSize(size_t addbytes) const + { + return ((lastpos + addbytes + 1) > bufsize ? false : true); + } + bool Resize(size_t addbytes); + + public: + BodyData() : text(NULL), lastpos(0), bufsize(0) {} + ~BodyData() + { + Clear(); + } + + void Clear(void); + bool Append(void* ptr, size_t bytes); + bool Append(void* ptr, size_t blockSize, size_t numBlocks) + { + return Append(ptr, (blockSize * numBlocks)); + } + const char* str() const; + size_t size() const + { + return lastpos; + } +}; + +#endif // S3FS_BODYDATA_H_ + +/* +* Local variables: +* tab-width: 4 +* c-basic-offset: 4 +* End: +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 +*/ diff --git a/src/cache.cpp b/src/cache.cpp index 51c956c..8933567 100644 --- a/src/cache.cpp +++ b/src/cache.cpp @@ -19,25 +19,17 @@ */ #include -#include -#include +#include #ifndef HAVE_CLOCK_GETTIME #include #endif -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "cache.h" +#include + +#include "common.h" #include "s3fs.h" -#include "s3fs_util.h" +#include "cache.h" +#include "autolock.h" #include "string_util.h" using namespace std; @@ -58,59 +50,59 @@ using namespace std; #ifdef HAVE_CLOCK_GETTIME static int s3fs_clock_gettime(int clk_id, struct timespec* ts) { - return clock_gettime(static_cast(clk_id), ts); + return clock_gettime(static_cast(clk_id), ts); } #else static int s3fs_clock_gettime(int clk_id, struct timespec* ts) { - struct timeval now; - if(0 != gettimeofday(&now, NULL)){ - return -1; - } - ts->tv_sec = now.tv_sec; - ts->tv_nsec = now.tv_usec * 1000; - return 0; + struct timeval now; + if(0 != gettimeofday(&now, NULL)){ + return -1; + } + ts->tv_sec = now.tv_sec; + ts->tv_nsec = now.tv_usec * 1000; + return 0; } #endif inline void SetStatCacheTime(struct timespec& ts) { - if(-1 == s3fs_clock_gettime(CLOCK_MONOTONIC_COARSE, &ts)){ - ts.tv_sec = time(NULL); - ts.tv_nsec = 0; - } + if(-1 == s3fs_clock_gettime(CLOCK_MONOTONIC_COARSE, &ts)){ + ts.tv_sec = time(NULL); + ts.tv_nsec = 0; + } } inline void InitStatCacheTime(struct timespec& ts) { - ts.tv_sec = 0; - ts.tv_nsec = 0; + ts.tv_sec = 0; + ts.tv_nsec = 0; } inline int CompareStatCacheTime(const struct timespec& ts1, const struct timespec& ts2) { - // return -1: ts1 < ts2 - // 0: ts1 == ts2 - // 1: ts1 > ts2 - if(ts1.tv_sec < ts2.tv_sec){ - return -1; - }else if(ts1.tv_sec > ts2.tv_sec){ - return 1; - }else{ - if(ts1.tv_nsec < ts2.tv_nsec){ - return -1; - }else if(ts1.tv_nsec > ts2.tv_nsec){ - return 1; + // return -1: ts1 < ts2 + // 0: ts1 == ts2 + // 1: ts1 > ts2 + if(ts1.tv_sec < ts2.tv_sec){ + return -1; + }else if(ts1.tv_sec > ts2.tv_sec){ + return 1; + }else{ + if(ts1.tv_nsec < ts2.tv_nsec){ + return -1; + }else if(ts1.tv_nsec > ts2.tv_nsec){ + return 1; + } } - } - return 0; + return 0; } inline bool IsExpireStatCacheTime(const struct timespec& ts, const time_t& expire) { - struct timespec nowts; - SetStatCacheTime(nowts); - return ((ts.tv_sec + expire) < nowts.tv_sec); + struct timespec nowts; + SetStatCacheTime(nowts); + return ((ts.tv_sec + expire) < nowts.tv_sec); } // @@ -119,17 +111,17 @@ inline bool IsExpireStatCacheTime(const struct timespec& ts, const time_t& expir typedef std::vector statiterlist_t; struct sort_statiterlist{ - // ascending order - bool operator()(const stat_cache_t::iterator& src1, const stat_cache_t::iterator& src2) const - { - int result = CompareStatCacheTime(src1->second->cache_date, src2->second->cache_date); - if(0 == result){ - if(src1->second->hit_count < src2->second->hit_count){ - result = -1; - } + // ascending order + bool operator()(const stat_cache_t::iterator& src1, const stat_cache_t::iterator& src2) const + { + int result = CompareStatCacheTime(src1->second->cache_date, src2->second->cache_date); + if(0 == result){ + if(src1->second->hit_count < src2->second->hit_count){ + result = -1; + } + } + return (result < 0); } - return (result < 0); - } }; // @@ -138,17 +130,17 @@ struct sort_statiterlist{ typedef std::vector symlinkiterlist_t; struct sort_symlinkiterlist{ - // ascending order - bool operator()(const symlink_cache_t::iterator& src1, const symlink_cache_t::iterator& src2) const - { - int result = CompareStatCacheTime(src1->second->cache_date, src2->second->cache_date); // use the same as Stats - if(0 == result){ - if(src1->second->hit_count < src2->second->hit_count){ - result = -1; - } + // ascending order + bool operator()(const symlink_cache_t::iterator& src1, const symlink_cache_t::iterator& src2) const + { + int result = CompareStatCacheTime(src1->second->cache_date, src2->second->cache_date); // use the same as Stats + if(0 == result){ + if(src1->second->hit_count < src2->second->hit_count){ + result = -1; + } + } + return (result < 0); } - return (result < 0); - } }; //------------------------------------------------------------------- @@ -162,35 +154,35 @@ pthread_mutex_t StatCache::stat_cache_lock; //------------------------------------------------------------------- StatCache::StatCache() : IsExpireTime(false), IsExpireIntervalType(false), ExpireTime(15 * 60), CacheSize(100000), IsCacheNoObject(false) { - if(this == StatCache::getStatCacheData()){ - stat_cache.clear(); - pthread_mutexattr_t attr; - pthread_mutexattr_init(&attr); + if(this == StatCache::getStatCacheData()){ + stat_cache.clear(); + pthread_mutexattr_t attr; + pthread_mutexattr_init(&attr); #if S3FS_PTHREAD_ERRORCHECK - pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK); + pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK); #endif - int res; - if(0 != (res = pthread_mutex_init(&StatCache::stat_cache_lock, &attr))){ - S3FS_PRN_CRIT("failed to init stat_cache_lock: %d", res); - abort(); + int res; + if(0 != (res = pthread_mutex_init(&StatCache::stat_cache_lock, &attr))){ + S3FS_PRN_CRIT("failed to init stat_cache_lock: %d", res); + abort(); + } + }else{ + abort(); } - }else{ - abort(); - } } StatCache::~StatCache() { - if(this == StatCache::getStatCacheData()){ - Clear(); - int res = pthread_mutex_destroy(&StatCache::stat_cache_lock); - if(res != 0){ - S3FS_PRN_CRIT("failed to destroy stat_cache_lock: %d", res); - abort(); + if(this == StatCache::getStatCacheData()){ + Clear(); + int res = pthread_mutex_destroy(&StatCache::stat_cache_lock); + if(res != 0){ + S3FS_PRN_CRIT("failed to destroy stat_cache_lock: %d", res); + abort(); + } + }else{ + abort(); } - }else{ - abort(); - } } //------------------------------------------------------------------- @@ -198,566 +190,564 @@ StatCache::~StatCache() //------------------------------------------------------------------- unsigned long StatCache::GetCacheSize() const { - return CacheSize; + return CacheSize; } unsigned long StatCache::SetCacheSize(unsigned long size) { - unsigned long old = CacheSize; - CacheSize = size; - return old; + unsigned long old = CacheSize; + CacheSize = size; + return old; } time_t StatCache::GetExpireTime() const { - return (IsExpireTime ? ExpireTime : (-1)); + return (IsExpireTime ? ExpireTime : (-1)); } time_t StatCache::SetExpireTime(time_t expire, bool is_interval) { - time_t old = ExpireTime; - ExpireTime = expire; - IsExpireTime = true; - IsExpireIntervalType = is_interval; - return old; + time_t old = ExpireTime; + ExpireTime = expire; + IsExpireTime = true; + IsExpireIntervalType = is_interval; + return old; } time_t StatCache::UnsetExpireTime() { - time_t old = IsExpireTime ? ExpireTime : (-1); - ExpireTime = 0; - IsExpireTime = false; - IsExpireIntervalType = false; - return old; + time_t old = IsExpireTime ? ExpireTime : (-1); + ExpireTime = 0; + IsExpireTime = false; + IsExpireIntervalType = false; + return old; } bool StatCache::SetCacheNoObject(bool flag) { - bool old = IsCacheNoObject; - IsCacheNoObject = flag; - return old; + bool old = IsCacheNoObject; + IsCacheNoObject = flag; + return old; } void StatCache::Clear() { - AutoLock lock(&StatCache::stat_cache_lock); + AutoLock lock(&StatCache::stat_cache_lock); - for(stat_cache_t::iterator iter = stat_cache.begin(); iter != stat_cache.end(); ++iter){ - delete (*iter).second; - } - stat_cache.clear(); - S3FS_MALLOCTRIM(0); + for(stat_cache_t::iterator iter = stat_cache.begin(); iter != stat_cache.end(); ++iter){ + delete (*iter).second; + } + stat_cache.clear(); + S3FS_MALLOCTRIM(0); } bool StatCache::GetStat(const string& key, struct stat* pst, headers_t* meta, bool overcheck, const char* petag, bool* pisforce) { - bool is_delete_cache = false; - string strpath = key; + bool is_delete_cache = false; + string strpath = key; - AutoLock lock(&StatCache::stat_cache_lock); + AutoLock lock(&StatCache::stat_cache_lock); - stat_cache_t::iterator iter = stat_cache.end(); - if(overcheck && '/' != strpath[strpath.length() - 1]){ - strpath += "/"; - iter = stat_cache.find(strpath); - } - if(iter == stat_cache.end()){ - strpath = key; - iter = stat_cache.find(strpath); - } - - if(iter != stat_cache.end() && (*iter).second){ - stat_cache_entry* ent = (*iter).second; - if(!IsExpireTime || !IsExpireStatCacheTime(ent->cache_date, ExpireTime)){ - if(ent->noobjcache){ - if(!IsCacheNoObject){ - // need to delete this cache. - DelStat(strpath, /*lock_already_held=*/ true); - }else{ - // noobjcache = true means no object. - } - return false; - } - // hit without checking etag - string stretag; - if(petag){ - // find & check ETag - for(headers_t::iterator hiter = ent->meta.begin(); hiter != ent->meta.end(); ++hiter){ - string tag = lower(hiter->first); - if(tag == "etag"){ - stretag = hiter->second; - if('\0' != petag[0] && 0 != strcmp(petag, stretag.c_str())){ - is_delete_cache = true; - } - break; - } - } - } - if(is_delete_cache){ - // not hit by different ETag - S3FS_PRN_DBG("stat cache not hit by ETag[path=%s][time=%lld.%09ld][hit count=%lu][ETag(%s)!=(%s)]", - strpath.c_str(), static_cast(ent->cache_date.tv_sec), ent->cache_date.tv_nsec, ent->hit_count, petag ? petag : "null", stretag.c_str()); - }else{ - // hit - S3FS_PRN_DBG("stat cache hit [path=%s][time=%lld.%09ld][hit count=%lu]", - strpath.c_str(), static_cast(ent->cache_date.tv_sec), ent->cache_date.tv_nsec, ent->hit_count); - - if(pst!= NULL){ - *pst= ent->stbuf; - } - if(meta != NULL){ - *meta = ent->meta; - } - if(pisforce != NULL){ - (*pisforce) = ent->isforce; - } - ent->hit_count++; - - if(IsExpireIntervalType){ - SetStatCacheTime(ent->cache_date); - } - return true; - } - - }else{ - // timeout - is_delete_cache = true; + stat_cache_t::iterator iter = stat_cache.end(); + if(overcheck && '/' != strpath[strpath.length() - 1]){ + strpath += "/"; + iter = stat_cache.find(strpath); + } + if(iter == stat_cache.end()){ + strpath = key; + iter = stat_cache.find(strpath); } - } - if(is_delete_cache){ - DelStat(strpath, /*lock_already_held=*/ true); - } - return false; + if(iter != stat_cache.end() && (*iter).second){ + stat_cache_entry* ent = (*iter).second; + if(!IsExpireTime || !IsExpireStatCacheTime(ent->cache_date, ExpireTime)){ + if(ent->noobjcache){ + if(!IsCacheNoObject){ + // need to delete this cache. + DelStat(strpath, /*lock_already_held=*/ true); + }else{ + // noobjcache = true means no object. + } + return false; + } + // hit without checking etag + string stretag; + if(petag){ + // find & check ETag + for(headers_t::iterator hiter = ent->meta.begin(); hiter != ent->meta.end(); ++hiter){ + string tag = lower(hiter->first); + if(tag == "etag"){ + stretag = hiter->second; + if('\0' != petag[0] && 0 != strcmp(petag, stretag.c_str())){ + is_delete_cache = true; + } + break; + } + } + } + if(is_delete_cache){ + // not hit by different ETag + S3FS_PRN_DBG("stat cache not hit by ETag[path=%s][time=%lld.%09ld][hit count=%lu][ETag(%s)!=(%s)]", + strpath.c_str(), static_cast(ent->cache_date.tv_sec), ent->cache_date.tv_nsec, ent->hit_count, petag ? petag : "null", stretag.c_str()); + }else{ + // hit + S3FS_PRN_DBG("stat cache hit [path=%s][time=%lld.%09ld][hit count=%lu]", + strpath.c_str(), static_cast(ent->cache_date.tv_sec), ent->cache_date.tv_nsec, ent->hit_count); + + if(pst!= NULL){ + *pst= ent->stbuf; + } + if(meta != NULL){ + *meta = ent->meta; + } + if(pisforce != NULL){ + (*pisforce) = ent->isforce; + } + ent->hit_count++; + + if(IsExpireIntervalType){ + SetStatCacheTime(ent->cache_date); + } + return true; + } + + }else{ + // timeout + is_delete_cache = true; + } + } + + if(is_delete_cache){ + DelStat(strpath, /*lock_already_held=*/ true); + } + return false; } bool StatCache::IsNoObjectCache(const string& key, bool overcheck) { - bool is_delete_cache = false; - string strpath = key; + bool is_delete_cache = false; + string strpath = key; - if(!IsCacheNoObject){ - return false; - } - - AutoLock lock(&StatCache::stat_cache_lock); - - stat_cache_t::iterator iter = stat_cache.end(); - if(overcheck && '/' != strpath[strpath.length() - 1]){ - strpath += "/"; - iter = stat_cache.find(strpath); - } - if(iter == stat_cache.end()){ - strpath = key; - iter = stat_cache.find(strpath); - } - - if(iter != stat_cache.end() && (*iter).second) { - if(!IsExpireTime || !IsExpireStatCacheTime((*iter).second->cache_date, ExpireTime)){ - if((*iter).second->noobjcache){ - // noobjcache = true means no object. - SetStatCacheTime((*iter).second->cache_date); - return true; - } - }else{ - // timeout - is_delete_cache = true; + if(!IsCacheNoObject){ + return false; } - } - if(is_delete_cache){ - DelStat(strpath, /*lock_already_held=*/ true); - } - return false; + AutoLock lock(&StatCache::stat_cache_lock); + + stat_cache_t::iterator iter = stat_cache.end(); + if(overcheck && '/' != strpath[strpath.length() - 1]){ + strpath += "/"; + iter = stat_cache.find(strpath); + } + if(iter == stat_cache.end()){ + strpath = key; + iter = stat_cache.find(strpath); + } + + if(iter != stat_cache.end() && (*iter).second) { + if(!IsExpireTime || !IsExpireStatCacheTime((*iter).second->cache_date, ExpireTime)){ + if((*iter).second->noobjcache){ + // noobjcache = true means no object. + SetStatCacheTime((*iter).second->cache_date); + return true; + } + }else{ + // timeout + is_delete_cache = true; + } + } + + if(is_delete_cache){ + DelStat(strpath, /*lock_already_held=*/ true); + } + return false; } bool StatCache::AddStat(const std::string& key, headers_t& meta, bool forcedir, bool no_truncate) { - if(!no_truncate && CacheSize< 1){ - return true; - } - S3FS_PRN_INFO3("add stat cache entry[path=%s]", key.c_str()); + if(!no_truncate && CacheSize< 1){ + return true; + } + S3FS_PRN_INFO3("add stat cache entry[path=%s]", key.c_str()); - bool found; - bool do_truncate; - { - AutoLock lock(&StatCache::stat_cache_lock); - found = stat_cache.end() != stat_cache.find(key); - do_truncate = stat_cache.size() > CacheSize; - } + bool found; + bool do_truncate; + { + AutoLock lock(&StatCache::stat_cache_lock); + found = stat_cache.end() != stat_cache.find(key); + do_truncate = stat_cache.size() > CacheSize; + } - if(found){ - DelStat(key.c_str()); - }else{ - if(do_truncate){ - if(!TruncateCache()){ + if(found){ + DelStat(key.c_str()); + }else{ + if(do_truncate){ + if(!TruncateCache()){ + return false; + } + } + } + + // make new + stat_cache_entry* ent = new stat_cache_entry(); + if(!convert_header_to_stat(key.c_str(), meta, &(ent->stbuf), forcedir)){ + delete ent; return false; - } } - } - - // make new - stat_cache_entry* ent = new stat_cache_entry(); - if(!convert_header_to_stat(key.c_str(), meta, &(ent->stbuf), forcedir)){ - delete ent; - return false; - } - ent->hit_count = 0; - ent->isforce = forcedir; - ent->noobjcache = false; - ent->notruncate = (no_truncate ? 1L : 0L); - ent->meta.clear(); - SetStatCacheTime(ent->cache_date); // Set time. - //copy only some keys - for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){ - string tag = lower(iter->first); - string value = iter->second; - if(tag == "content-type"){ - ent->meta[iter->first] = value; - }else if(tag == "content-length"){ - ent->meta[iter->first] = value; - }else if(tag == "etag"){ - ent->meta[iter->first] = value; - }else if(tag == "last-modified"){ - ent->meta[iter->first] = value; - }else if(tag.substr(0, 5) == "x-amz"){ - ent->meta[tag] = value; // key is lower case for "x-amz" + ent->hit_count = 0; + ent->isforce = forcedir; + ent->noobjcache = false; + ent->notruncate = (no_truncate ? 1L : 0L); + ent->meta.clear(); + SetStatCacheTime(ent->cache_date); // Set time. + //copy only some keys + for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){ + string tag = lower(iter->first); + string value = iter->second; + if(tag == "content-type"){ + ent->meta[iter->first] = value; + }else if(tag == "content-length"){ + ent->meta[iter->first] = value; + }else if(tag == "etag"){ + ent->meta[iter->first] = value; + }else if(tag == "last-modified"){ + ent->meta[iter->first] = value; + }else if(tag.substr(0, 5) == "x-amz"){ + ent->meta[tag] = value; // key is lower case for "x-amz" + } } - } - // add - AutoLock lock(&StatCache::stat_cache_lock); + // add + AutoLock lock(&StatCache::stat_cache_lock); - stat_cache_t::iterator iter = stat_cache.find(key); // recheck for same key exists - if(stat_cache.end() != iter){ - delete iter->second; - stat_cache.erase(iter); - } - stat_cache[key] = ent; - - // check symbolic link cache - if(!S_ISLNK(ent->stbuf.st_mode)){ - if(symlink_cache.end() != symlink_cache.find(key)){ - // if symbolic link cache has key, thus remove it. - DelSymlink(key.c_str(), true); + stat_cache_t::iterator iter = stat_cache.find(key); // recheck for same key exists + if(stat_cache.end() != iter){ + delete iter->second; + stat_cache.erase(iter); } - } + stat_cache[key] = ent; - return true; + // check symbolic link cache + if(!S_ISLNK(ent->stbuf.st_mode)){ + if(symlink_cache.end() != symlink_cache.find(key)){ + // if symbolic link cache has key, thus remove it. + DelSymlink(key.c_str(), true); + } + } + return true; } bool StatCache::AddNoObjectCache(const string& key) { - if(!IsCacheNoObject){ - return true; // pretend successful - } - if(CacheSize < 1){ - return true; - } - S3FS_PRN_INFO3("add no object cache entry[path=%s]", key.c_str()); - - bool found; - bool do_truncate; - { - AutoLock lock(&StatCache::stat_cache_lock); - found = stat_cache.end() != stat_cache.find(key); - do_truncate = stat_cache.size() > CacheSize; - } - - if(found){ - DelStat(key.c_str()); - }else{ - if(do_truncate){ - if(!TruncateCache()){ - return false; - } + if(!IsCacheNoObject){ + return true; // pretend successful } - } + if(CacheSize < 1){ + return true; + } + S3FS_PRN_INFO3("add no object cache entry[path=%s]", key.c_str()); - // make new - stat_cache_entry* ent = new stat_cache_entry(); - memset(&(ent->stbuf), 0, sizeof(struct stat)); - ent->hit_count = 0; - ent->isforce = false; - ent->noobjcache = true; - ent->notruncate = 0L; - ent->meta.clear(); - SetStatCacheTime(ent->cache_date); // Set time. + bool found; + bool do_truncate; + { + AutoLock lock(&StatCache::stat_cache_lock); + found = stat_cache.end() != stat_cache.find(key); + do_truncate = stat_cache.size() > CacheSize; + } - // add - AutoLock lock(&StatCache::stat_cache_lock); + if(found){ + DelStat(key.c_str()); + }else{ + if(do_truncate){ + if(!TruncateCache()){ + return false; + } + } + } - stat_cache_t::iterator iter = stat_cache.find(key); // recheck for same key exists - if(stat_cache.end() != iter){ - delete iter->second; - stat_cache.erase(iter); - } - stat_cache[key] = ent; + // make new + stat_cache_entry* ent = new stat_cache_entry(); + memset(&(ent->stbuf), 0, sizeof(struct stat)); + ent->hit_count = 0; + ent->isforce = false; + ent->noobjcache = true; + ent->notruncate = 0L; + ent->meta.clear(); + SetStatCacheTime(ent->cache_date); // Set time. - // check symbolic link cache - if(symlink_cache.end() != symlink_cache.find(key)){ - // if symbolic link cache has key, thus remove it. - DelSymlink(key.c_str(), true); - } + // add + AutoLock lock(&StatCache::stat_cache_lock); - return true; + stat_cache_t::iterator iter = stat_cache.find(key); // recheck for same key exists + if(stat_cache.end() != iter){ + delete iter->second; + stat_cache.erase(iter); + } + stat_cache[key] = ent; + + // check symbolic link cache + if(symlink_cache.end() != symlink_cache.find(key)){ + // if symbolic link cache has key, thus remove it. + DelSymlink(key.c_str(), true); + } + return true; } void StatCache::ChangeNoTruncateFlag(const std::string& key, bool no_truncate) { - AutoLock lock(&StatCache::stat_cache_lock); - stat_cache_t::iterator iter = stat_cache.find(key); + AutoLock lock(&StatCache::stat_cache_lock); + stat_cache_t::iterator iter = stat_cache.find(key); - if(stat_cache.end() != iter){ - stat_cache_entry* ent = iter->second; - if(ent){ - if(no_truncate){ - ++(ent->notruncate); - }else{ - if(0L < ent->notruncate){ - --(ent->notruncate); + if(stat_cache.end() != iter){ + stat_cache_entry* ent = iter->second; + if(ent){ + if(no_truncate){ + ++(ent->notruncate); + }else{ + if(0L < ent->notruncate){ + --(ent->notruncate); + } + } } - } } - } } bool StatCache::TruncateCache() { - AutoLock lock(&StatCache::stat_cache_lock); + AutoLock lock(&StatCache::stat_cache_lock); + + if(stat_cache.empty()){ + return true; + } + + // 1) erase over expire time + if(IsExpireTime){ + for(stat_cache_t::iterator iter = stat_cache.begin(); iter != stat_cache.end(); ){ + stat_cache_entry* entry = iter->second; + if(!entry || (0L == entry->notruncate && IsExpireStatCacheTime(entry->cache_date, ExpireTime))){ + delete entry; + stat_cache.erase(iter++); + }else{ + ++iter; + } + } + } + + // 2) check stat cache count + if(stat_cache.size() < CacheSize){ + return true; + } + + // 3) erase from the old cache in order + size_t erase_count= stat_cache.size() - CacheSize + 1; + statiterlist_t erase_iters; + for(stat_cache_t::iterator iter = stat_cache.begin(); iter != stat_cache.end() && 0 < erase_count; ++iter){ + // check no truncate + stat_cache_entry* ent = iter->second; + if(ent && 0L < ent->notruncate){ + // skip for no truncate entry and keep extra counts for this entity. + if(0 < erase_count){ + --erase_count; // decrement + } + }else{ + // iter is not have notruncate flag + erase_iters.push_back(iter); + } + if(erase_count < erase_iters.size()){ + sort(erase_iters.begin(), erase_iters.end(), sort_statiterlist()); + while(erase_count < erase_iters.size()){ + erase_iters.pop_back(); + } + } + } + for(statiterlist_t::iterator iiter = erase_iters.begin(); iiter != erase_iters.end(); ++iiter){ + stat_cache_t::iterator siter = *iiter; + + S3FS_PRN_DBG("truncate stat cache[path=%s]", siter->first.c_str()); + delete siter->second; + stat_cache.erase(siter); + } + S3FS_MALLOCTRIM(0); - if(stat_cache.empty()){ return true; - } - - // 1) erase over expire time - if(IsExpireTime){ - for(stat_cache_t::iterator iter = stat_cache.begin(); iter != stat_cache.end(); ){ - stat_cache_entry* entry = iter->second; - if(!entry || (0L == entry->notruncate && IsExpireStatCacheTime(entry->cache_date, ExpireTime))){ - delete entry; - stat_cache.erase(iter++); - }else{ - ++iter; - } - } - } - - // 2) check stat cache count - if(stat_cache.size() < CacheSize){ - return true; - } - - // 3) erase from the old cache in order - size_t erase_count= stat_cache.size() - CacheSize + 1; - statiterlist_t erase_iters; - for(stat_cache_t::iterator iter = stat_cache.begin(); iter != stat_cache.end() && 0 < erase_count; ++iter){ - // check no truncate - stat_cache_entry* ent = iter->second; - if(ent && 0L < ent->notruncate){ - // skip for no truncate entry and keep extra counts for this entity. - if(0 < erase_count){ - --erase_count; // decrement - } - }else{ - // iter is not have notruncate flag - erase_iters.push_back(iter); - } - if(erase_count < erase_iters.size()){ - sort(erase_iters.begin(), erase_iters.end(), sort_statiterlist()); - while(erase_count < erase_iters.size()){ - erase_iters.pop_back(); - } - } - } - for(statiterlist_t::iterator iiter = erase_iters.begin(); iiter != erase_iters.end(); ++iiter){ - stat_cache_t::iterator siter = *iiter; - - S3FS_PRN_DBG("truncate stat cache[path=%s]", siter->first.c_str()); - delete siter->second; - stat_cache.erase(siter); - } - S3FS_MALLOCTRIM(0); - - return true; } bool StatCache::DelStat(const char* key, bool lock_already_held) { - if(!key){ - return false; - } - S3FS_PRN_INFO3("delete stat cache entry[path=%s]", key); - - AutoLock lock(&StatCache::stat_cache_lock, lock_already_held ? AutoLock::ALREADY_LOCKED : AutoLock::NONE); - - stat_cache_t::iterator iter; - if(stat_cache.end() != (iter = stat_cache.find(string(key)))){ - delete (*iter).second; - stat_cache.erase(iter); - } - if(0 < strlen(key) && 0 != strcmp(key, "/")){ - string strpath = key; - if('/' == strpath[strpath.length() - 1]){ - // If there is "path" cache, delete it. - strpath = strpath.substr(0, strpath.length() - 1); - }else{ - // If there is "path/" cache, delete it. - strpath += "/"; + if(!key){ + return false; } - if(stat_cache.end() != (iter = stat_cache.find(strpath))){ - delete (*iter).second; - stat_cache.erase(iter); - } - } - S3FS_MALLOCTRIM(0); + S3FS_PRN_INFO3("delete stat cache entry[path=%s]", key); - return true; + AutoLock lock(&StatCache::stat_cache_lock, lock_already_held ? AutoLock::ALREADY_LOCKED : AutoLock::NONE); + + stat_cache_t::iterator iter; + if(stat_cache.end() != (iter = stat_cache.find(string(key)))){ + delete (*iter).second; + stat_cache.erase(iter); + } + if(0 < strlen(key) && 0 != strcmp(key, "/")){ + string strpath = key; + if('/' == strpath[strpath.length() - 1]){ + // If there is "path" cache, delete it. + strpath = strpath.substr(0, strpath.length() - 1); + }else{ + // If there is "path/" cache, delete it. + strpath += "/"; + } + if(stat_cache.end() != (iter = stat_cache.find(strpath))){ + delete (*iter).second; + stat_cache.erase(iter); + } + } + S3FS_MALLOCTRIM(0); + + return true; } bool StatCache::GetSymlink(const string& key, string& value) { - bool is_delete_cache = false; - const string& strpath = key; + bool is_delete_cache = false; + const string& strpath = key; - AutoLock lock(&StatCache::stat_cache_lock); + AutoLock lock(&StatCache::stat_cache_lock); - symlink_cache_t::iterator iter = symlink_cache.find(strpath); - if(iter != symlink_cache.end() && iter->second){ - symlink_cache_entry* ent = iter->second; - if(!IsExpireTime || !IsExpireStatCacheTime(ent->cache_date, ExpireTime)){ // use the same as Stats - // found - S3FS_PRN_DBG("symbolic link cache hit [path=%s][time=%lld.%09ld][hit count=%lu]", - strpath.c_str(), static_cast(ent->cache_date.tv_sec), ent->cache_date.tv_nsec, ent->hit_count); + symlink_cache_t::iterator iter = symlink_cache.find(strpath); + if(iter != symlink_cache.end() && iter->second){ + symlink_cache_entry* ent = iter->second; + if(!IsExpireTime || !IsExpireStatCacheTime(ent->cache_date, ExpireTime)){ // use the same as Stats + // found + S3FS_PRN_DBG("symbolic link cache hit [path=%s][time=%lld.%09ld][hit count=%lu]", + strpath.c_str(), static_cast(ent->cache_date.tv_sec), ent->cache_date.tv_nsec, ent->hit_count); - value = ent->link; + value = ent->link; - ent->hit_count++; - if(IsExpireIntervalType){ - SetStatCacheTime(ent->cache_date); + ent->hit_count++; + if(IsExpireIntervalType){ + SetStatCacheTime(ent->cache_date); + } + return true; + }else{ + // timeout + is_delete_cache = true; } - return true; - }else{ - // timeout - is_delete_cache = true; } - } - if(is_delete_cache){ - DelSymlink(strpath.c_str(), /*lock_already_held=*/ true); - } - return false; + if(is_delete_cache){ + DelSymlink(strpath.c_str(), /*lock_already_held=*/ true); + } + return false; } bool StatCache::AddSymlink(const string& key, const string& value) { - if(CacheSize< 1){ - return true; - } - S3FS_PRN_INFO3("add symbolic link cache entry[path=%s, value=%s]", key.c_str(), value.c_str()); - - bool found; - bool do_truncate; - { - AutoLock lock(&StatCache::stat_cache_lock); - found = symlink_cache.end() != symlink_cache.find(key); - do_truncate = symlink_cache.size() > CacheSize; - } - - if(found){ - DelSymlink(key.c_str()); - }else{ - if(do_truncate){ - if(!TruncateSymlink()){ - return false; - } + if(CacheSize< 1){ + return true; } - } + S3FS_PRN_INFO3("add symbolic link cache entry[path=%s, value=%s]", key.c_str(), value.c_str()); - // make new - symlink_cache_entry* ent = new symlink_cache_entry(); - ent->link = value; - ent->hit_count = 0; - SetStatCacheTime(ent->cache_date); // Set time(use the same as Stats). + bool found; + bool do_truncate; + { + AutoLock lock(&StatCache::stat_cache_lock); + found = symlink_cache.end() != symlink_cache.find(key); + do_truncate = symlink_cache.size() > CacheSize; + } - // add - AutoLock lock(&StatCache::stat_cache_lock); + if(found){ + DelSymlink(key.c_str()); + }else{ + if(do_truncate){ + if(!TruncateSymlink()){ + return false; + } + } + } - symlink_cache_t::iterator iter = symlink_cache.find(key); // recheck for same key exists - if(symlink_cache.end() != iter){ - delete iter->second; - symlink_cache.erase(iter); - } - symlink_cache[key] = ent; + // make new + symlink_cache_entry* ent = new symlink_cache_entry(); + ent->link = value; + ent->hit_count = 0; + SetStatCacheTime(ent->cache_date); // Set time(use the same as Stats). - return true; + // add + AutoLock lock(&StatCache::stat_cache_lock); + + symlink_cache_t::iterator iter = symlink_cache.find(key); // recheck for same key exists + if(symlink_cache.end() != iter){ + delete iter->second; + symlink_cache.erase(iter); + } + symlink_cache[key] = ent; + + return true; } bool StatCache::TruncateSymlink() { - AutoLock lock(&StatCache::stat_cache_lock); + AutoLock lock(&StatCache::stat_cache_lock); - if(symlink_cache.empty()){ - return true; - } - - // 1) erase over expire time - if(IsExpireTime){ - for(symlink_cache_t::iterator iter = symlink_cache.begin(); iter != symlink_cache.end(); ){ - symlink_cache_entry* entry = iter->second; - if(!entry || IsExpireStatCacheTime(entry->cache_date, ExpireTime)){ // use the same as Stats - delete entry; - symlink_cache.erase(iter++); - }else{ - ++iter; - } + if(symlink_cache.empty()){ + return true; } - } - // 2) check stat cache count - if(symlink_cache.size() < CacheSize){ - return true; - } - - // 3) erase from the old cache in order - size_t erase_count= symlink_cache.size() - CacheSize + 1; - symlinkiterlist_t erase_iters; - for(symlink_cache_t::iterator iter = symlink_cache.begin(); iter != symlink_cache.end(); ++iter){ - erase_iters.push_back(iter); - sort(erase_iters.begin(), erase_iters.end(), sort_symlinkiterlist()); - if(erase_count < erase_iters.size()){ - erase_iters.pop_back(); + // 1) erase over expire time + if(IsExpireTime){ + for(symlink_cache_t::iterator iter = symlink_cache.begin(); iter != symlink_cache.end(); ){ + symlink_cache_entry* entry = iter->second; + if(!entry || IsExpireStatCacheTime(entry->cache_date, ExpireTime)){ // use the same as Stats + delete entry; + symlink_cache.erase(iter++); + }else{ + ++iter; + } + } } - } - for(symlinkiterlist_t::iterator iiter = erase_iters.begin(); iiter != erase_iters.end(); ++iiter){ - symlink_cache_t::iterator siter = *iiter; - S3FS_PRN_DBG("truncate symbolic link cache[path=%s]", siter->first.c_str()); - delete siter->second; - symlink_cache.erase(siter); - } - S3FS_MALLOCTRIM(0); + // 2) check stat cache count + if(symlink_cache.size() < CacheSize){ + return true; + } - return true; + // 3) erase from the old cache in order + size_t erase_count= symlink_cache.size() - CacheSize + 1; + symlinkiterlist_t erase_iters; + for(symlink_cache_t::iterator iter = symlink_cache.begin(); iter != symlink_cache.end(); ++iter){ + erase_iters.push_back(iter); + sort(erase_iters.begin(), erase_iters.end(), sort_symlinkiterlist()); + if(erase_count < erase_iters.size()){ + erase_iters.pop_back(); + } + } + for(symlinkiterlist_t::iterator iiter = erase_iters.begin(); iiter != erase_iters.end(); ++iiter){ + symlink_cache_t::iterator siter = *iiter; + + S3FS_PRN_DBG("truncate symbolic link cache[path=%s]", siter->first.c_str()); + delete siter->second; + symlink_cache.erase(siter); + } + S3FS_MALLOCTRIM(0); + + return true; } bool StatCache::DelSymlink(const char* key, bool lock_already_held) { - if(!key){ - return false; - } - S3FS_PRN_INFO3("delete symbolic link cache entry[path=%s]", key); + if(!key){ + return false; + } + S3FS_PRN_INFO3("delete symbolic link cache entry[path=%s]", key); - AutoLock lock(&StatCache::stat_cache_lock, lock_already_held ? AutoLock::ALREADY_LOCKED : AutoLock::NONE); + AutoLock lock(&StatCache::stat_cache_lock, lock_already_held ? AutoLock::ALREADY_LOCKED : AutoLock::NONE); - symlink_cache_t::iterator iter; - if(symlink_cache.end() != (iter = symlink_cache.find(string(key)))){ - delete iter->second; - symlink_cache.erase(iter); - } - S3FS_MALLOCTRIM(0); + symlink_cache_t::iterator iter; + if(symlink_cache.end() != (iter = symlink_cache.find(string(key)))){ + delete iter->second; + symlink_cache.erase(iter); + } + S3FS_MALLOCTRIM(0); - return true; + return true; } //------------------------------------------------------------------- @@ -765,43 +755,43 @@ bool StatCache::DelSymlink(const char* key, bool lock_already_held) //------------------------------------------------------------------- bool convert_header_to_stat(const char* path, const headers_t& meta, struct stat* pst, bool forcedir) { - if(!path || !pst){ - return false; - } - memset(pst, 0, sizeof(struct stat)); + if(!path || !pst){ + return false; + } + memset(pst, 0, sizeof(struct stat)); - pst->st_nlink = 1; // see fuse FAQ + pst->st_nlink = 1; // see fuse FAQ - // mode - pst->st_mode = get_mode(meta, path, true, forcedir); + // mode + pst->st_mode = get_mode(meta, path, true, forcedir); - // blocks - if(S_ISREG(pst->st_mode)){ - pst->st_blocks = get_blocks(pst->st_size); - } - pst->st_blksize = 4096; + // blocks + if(S_ISREG(pst->st_mode)){ + pst->st_blocks = get_blocks(pst->st_size); + } + pst->st_blksize = 4096; - // mtime - pst->st_mtime = get_mtime(meta); + // mtime + pst->st_mtime = get_mtime(meta); - // ctime - pst->st_ctime = get_ctime(meta); + // ctime + pst->st_ctime = get_ctime(meta); - // size - pst->st_size = get_size(meta); + // size + pst->st_size = get_size(meta); - // uid/gid - pst->st_uid = get_uid(meta); - pst->st_gid = get_gid(meta); + // uid/gid + pst->st_uid = get_uid(meta); + pst->st_gid = get_gid(meta); - return true; + return true; } /* * Local variables: -* tab-width: 2 -* c-basic-offset: 2 +* tab-width: 4 +* c-basic-offset: 4 * End: -* vim600: expandtab sw=2 ts=2 fdm=marker -* vim<600: expandtab sw=2 ts=2 +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 */ diff --git a/src/cache.h b/src/cache.h index 19ca563..7c655a7 100644 --- a/src/cache.h +++ b/src/cache.h @@ -21,26 +21,30 @@ #ifndef S3FS_CACHE_H_ #define S3FS_CACHE_H_ -#include "common.h" +#include "metaheader.h" +//------------------------------------------------------------------- +// Structure +//------------------------------------------------------------------- // // Struct for stats cache // struct stat_cache_entry { - struct stat stbuf; - unsigned long hit_count; - struct timespec cache_date; - headers_t meta; - bool isforce; - bool noobjcache; // Flag: cache is no object for no listing. - unsigned long notruncate; // 0<: not remove automatically at checking truncate + struct stat stbuf; + unsigned long hit_count; + struct timespec cache_date; + headers_t meta; + bool isforce; + bool noobjcache; // Flag: cache is no object for no listing. + unsigned long notruncate; // 0<: not remove automatically at checking truncate - stat_cache_entry() : hit_count(0), isforce(false), noobjcache(false), notruncate(0L) { - memset(&stbuf, 0, sizeof(struct stat)); - cache_date.tv_sec = 0; - cache_date.tv_nsec = 0; - meta.clear(); - } + stat_cache_entry() : hit_count(0), isforce(false), noobjcache(false), notruncate(0L) + { + memset(&stbuf, 0, sizeof(struct stat)); + cache_date.tv_sec = 0; + cache_date.tv_nsec = 0; + meta.clear(); + } }; typedef std::map stat_cache_t; // key=path @@ -49,21 +53,22 @@ typedef std::map stat_cache_t; // key=path // Struct for symbolic link cache // struct symlink_cache_entry { - std::string link; - unsigned long hit_count; - struct timespec cache_date; // The function that operates timespec uses the same as Stats + std::string link; + unsigned long hit_count; + struct timespec cache_date; // The function that operates timespec uses the same as Stats - symlink_cache_entry() : link(""), hit_count(0) { - cache_date.tv_sec = 0; - cache_date.tv_nsec = 0; - } + symlink_cache_entry() : link(""), hit_count(0) + { + cache_date.tv_sec = 0; + cache_date.tv_nsec = 0; + } }; typedef std::map symlink_cache_t; -// -// Class -// +//------------------------------------------------------------------- +// Class StatCache +//------------------------------------------------------------------- // [NOTE] About Symbolic link cache // The Stats cache class now also has a symbolic link cache. // It is possible to take out the Symbolic link cache in another class, @@ -75,102 +80,112 @@ typedef std::map symlink_cache_t; // class StatCache { - private: - static StatCache singleton; - static pthread_mutex_t stat_cache_lock; - stat_cache_t stat_cache; - bool IsExpireTime; - bool IsExpireIntervalType; // if this flag is true, cache data is updated at last access time. - time_t ExpireTime; - unsigned long CacheSize; - bool IsCacheNoObject; - symlink_cache_t symlink_cache; + private: + static StatCache singleton; + static pthread_mutex_t stat_cache_lock; + stat_cache_t stat_cache; + bool IsExpireTime; + bool IsExpireIntervalType; // if this flag is true, cache data is updated at last access time. + time_t ExpireTime; + unsigned long CacheSize; + bool IsCacheNoObject; + symlink_cache_t symlink_cache; - private: - StatCache(); - ~StatCache(); + private: + StatCache(); + ~StatCache(); - void Clear(void); - bool GetStat(const std::string& key, struct stat* pst, headers_t* meta, bool overcheck, const char* petag, bool* pisforce); - // Truncate stat cache - bool TruncateCache(void); - // Truncate symbolic link cache - bool TruncateSymlink(void); + void Clear(void); + bool GetStat(const std::string& key, struct stat* pst, headers_t* meta, bool overcheck, const char* petag, bool* pisforce); + // Truncate stat cache + bool TruncateCache(void); + // Truncate symbolic link cache + bool TruncateSymlink(void); - public: - // Reference singleton - static StatCache* getStatCacheData(void) { - return &singleton; - } + public: + // Reference singleton + static StatCache* getStatCacheData(void) + { + return &singleton; + } - // Attribute - unsigned long GetCacheSize(void) const; - unsigned long SetCacheSize(unsigned long size); - time_t GetExpireTime(void) const; - time_t SetExpireTime(time_t expire, bool is_interval = false); - time_t UnsetExpireTime(void); - bool SetCacheNoObject(bool flag); - bool EnableCacheNoObject(void) { - return SetCacheNoObject(true); - } - bool DisableCacheNoObject(void) { - return SetCacheNoObject(false); - } - bool GetCacheNoObject(void) const { - return IsCacheNoObject; - } + // Attribute + unsigned long GetCacheSize(void) const; + unsigned long SetCacheSize(unsigned long size); + time_t GetExpireTime(void) const; + time_t SetExpireTime(time_t expire, bool is_interval = false); + time_t UnsetExpireTime(void); + bool SetCacheNoObject(bool flag); + bool EnableCacheNoObject(void) + { + return SetCacheNoObject(true); + } + bool DisableCacheNoObject(void) + { + return SetCacheNoObject(false); + } + bool GetCacheNoObject(void) const + { + return IsCacheNoObject; + } - // Get stat cache - bool GetStat(const std::string& key, struct stat* pst, headers_t* meta, bool overcheck = true, bool* pisforce = NULL) { - return GetStat(key, pst, meta, overcheck, NULL, pisforce); - } - bool GetStat(const std::string& key, struct stat* pst, bool overcheck = true) { - return GetStat(key, pst, NULL, overcheck, NULL, NULL); - } - bool GetStat(const std::string& key, headers_t* meta, bool overcheck = true) { - return GetStat(key, NULL, meta, overcheck, NULL, NULL); - } - bool HasStat(const std::string& key, bool overcheck = true) { - return GetStat(key, NULL, NULL, overcheck, NULL, NULL); - } - bool HasStat(const std::string& key, const char* etag, bool overcheck = true) { - return GetStat(key, NULL, NULL, overcheck, etag, NULL); - } + // Get stat cache + bool GetStat(const std::string& key, struct stat* pst, headers_t* meta, bool overcheck = true, bool* pisforce = NULL) + { + return GetStat(key, pst, meta, overcheck, NULL, pisforce); + } + bool GetStat(const std::string& key, struct stat* pst, bool overcheck = true) + { + return GetStat(key, pst, NULL, overcheck, NULL, NULL); + } + bool GetStat(const std::string& key, headers_t* meta, bool overcheck = true) + { + return GetStat(key, NULL, meta, overcheck, NULL, NULL); + } + bool HasStat(const std::string& key, bool overcheck = true) + { + return GetStat(key, NULL, NULL, overcheck, NULL, NULL); + } + bool HasStat(const std::string& key, const char* etag, bool overcheck = true) + { + return GetStat(key, NULL, NULL, overcheck, etag, NULL); + } - // Cache For no object - bool IsNoObjectCache(const std::string& key, bool overcheck = true); - bool AddNoObjectCache(const std::string& key); + // Cache For no object + bool IsNoObjectCache(const std::string& key, bool overcheck = true); + bool AddNoObjectCache(const std::string& key); - // Add stat cache - bool AddStat(const std::string& key, headers_t& meta, bool forcedir = false, bool no_truncate = false); + // Add stat cache + bool AddStat(const std::string& key, headers_t& meta, bool forcedir = false, bool no_truncate = false); - // Change no truncate flag - void ChangeNoTruncateFlag(const std::string& key, bool no_truncate); + // Change no truncate flag + void ChangeNoTruncateFlag(const std::string& key, bool no_truncate); - // Delete stat cache - bool DelStat(const char* key, bool lock_already_held = false); - bool DelStat(std::string& key, bool lock_already_held = false) { - return DelStat(key.c_str(), lock_already_held); - } + // Delete stat cache + bool DelStat(const char* key, bool lock_already_held = false); + bool DelStat(std::string& key, bool lock_already_held = false) + { + return DelStat(key.c_str(), lock_already_held); + } - // Cache for symbolic link - bool GetSymlink(const std::string& key, std::string& value); - bool AddSymlink(const std::string& key, const std::string& value); - bool DelSymlink(const char* key, bool lock_already_held = false); + // Cache for symbolic link + bool GetSymlink(const std::string& key, std::string& value); + bool AddSymlink(const std::string& key, const std::string& value); + bool DelSymlink(const char* key, bool lock_already_held = false); }; -// +//------------------------------------------------------------------- // Functions -// +//------------------------------------------------------------------- bool convert_header_to_stat(const char* path, const headers_t& meta, struct stat* pst, bool forcedir = false); #endif // S3FS_CACHE_H_ /* * Local variables: -* tab-width: 2 -* c-basic-offset: 2 +* tab-width: 4 +* c-basic-offset: 4 * End: -* vim600: expandtab sw=2 ts=2 fdm=marker -* vim<600: expandtab sw=2 ts=2 +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 */ diff --git a/src/common.h b/src/common.h index 421caca..ba8a265 100644 --- a/src/common.h +++ b/src/common.h @@ -21,192 +21,38 @@ #ifndef S3FS_COMMON_H_ #define S3FS_COMMON_H_ -#include #include "../config.h" +#include "types.h" +#include "s3fs_logger.h" -// -// Extended attribute -// -#ifdef HAVE_SYS_EXTATTR_H -#include -#elif HAVE_ATTR_XATTR_H -#include -#elif HAVE_SYS_XATTR_H -#include -#endif - -// -// Macro -// -static inline const char *SAFESTRPTR(const char *strptr) { return strptr ? strptr : ""; } - -// -// Debug level -// -enum s3fs_log_level{ - S3FS_LOG_CRIT = 0, // LOG_CRIT - S3FS_LOG_ERR = 1, // LOG_ERR - S3FS_LOG_WARN = 3, // LOG_WARNING - S3FS_LOG_INFO = 7, // LOG_INFO - S3FS_LOG_DBG = 15 // LOG_DEBUG -}; - -// -// Debug macros -// -#define IS_S3FS_LOG_CRIT() (S3FS_LOG_CRIT == debug_level) -#define IS_S3FS_LOG_ERR() (S3FS_LOG_ERR == (debug_level & S3FS_LOG_DBG)) -#define IS_S3FS_LOG_WARN() (S3FS_LOG_WARN == (debug_level & S3FS_LOG_DBG)) -#define IS_S3FS_LOG_INFO() (S3FS_LOG_INFO == (debug_level & S3FS_LOG_DBG)) -#define IS_S3FS_LOG_DBG() (S3FS_LOG_DBG == (debug_level & S3FS_LOG_DBG)) - -#define S3FS_LOG_LEVEL_TO_SYSLOG(level) \ - ( S3FS_LOG_DBG == (level & S3FS_LOG_DBG) ? LOG_DEBUG : \ - S3FS_LOG_INFO == (level & S3FS_LOG_DBG) ? LOG_INFO : \ - S3FS_LOG_WARN == (level & S3FS_LOG_DBG) ? LOG_WARNING : \ - S3FS_LOG_ERR == (level & S3FS_LOG_DBG) ? LOG_ERR : LOG_CRIT ) - -#define S3FS_LOG_LEVEL_STRING(level) \ - ( S3FS_LOG_DBG == (level & S3FS_LOG_DBG) ? "[DBG] " : \ - S3FS_LOG_INFO == (level & S3FS_LOG_DBG) ? "[INF] " : \ - S3FS_LOG_WARN == (level & S3FS_LOG_DBG) ? "[WAN] " : \ - S3FS_LOG_ERR == (level & S3FS_LOG_DBG) ? "[ERR] " : "[CRT] " ) - -#define S3FS_LOG_NEST_MAX 4 -#define S3FS_LOG_NEST(nest) (nest < S3FS_LOG_NEST_MAX ? s3fs_log_nest[nest] : s3fs_log_nest[S3FS_LOG_NEST_MAX - 1]) - -#define S3FS_LOW_LOGPRN(level, fmt, ...) \ - do{ \ - if(S3FS_LOG_CRIT == level || (S3FS_LOG_CRIT != debug_level && level == (debug_level & level))){ \ - if(foreground){ \ - fprintf(stdout, "%s%s:%s(%d): " fmt "%s\n", S3FS_LOG_LEVEL_STRING(level), __FILE__, __func__, __LINE__, __VA_ARGS__); \ - }else{ \ - syslog(S3FS_LOG_LEVEL_TO_SYSLOG(level), "%s%s:%s(%d): " fmt "%s", instance_name.c_str(), __FILE__, __func__, __LINE__, __VA_ARGS__); \ - } \ - } \ - }while(0) - -#define S3FS_LOW_LOGPRN2(level, nest, fmt, ...) \ - do{ \ - if(S3FS_LOG_CRIT == level || (S3FS_LOG_CRIT != debug_level && level == (debug_level & level))){ \ - if(foreground){ \ - fprintf(stdout, "%s%s%s:%s(%d): " fmt "%s\n", S3FS_LOG_LEVEL_STRING(level), S3FS_LOG_NEST(nest), __FILE__, __func__, __LINE__, __VA_ARGS__); \ - }else{ \ - syslog(S3FS_LOG_LEVEL_TO_SYSLOG(level), "%s%s" fmt "%s", instance_name.c_str(), S3FS_LOG_NEST(nest), __VA_ARGS__); \ - } \ - } \ - }while(0) - -#define S3FS_LOW_CURLDBG(fmt, ...) \ - do{ \ - if(foreground){ \ - fprintf(stdout, "[CURL DBG] " fmt "%s\n", __VA_ARGS__); \ - }else{ \ - syslog(S3FS_LOG_LEVEL_TO_SYSLOG(S3FS_LOG_CRIT), "%s" fmt "%s", instance_name.c_str(), __VA_ARGS__); \ - } \ - }while(0) - -#define S3FS_LOW_LOGPRN_EXIT(fmt, ...) \ - do{ \ - if(foreground){ \ - fprintf(stderr, "s3fs: " fmt "%s\n", __VA_ARGS__); \ - }else{ \ - fprintf(stderr, "s3fs: " fmt "%s\n", __VA_ARGS__); \ - syslog(S3FS_LOG_LEVEL_TO_SYSLOG(S3FS_LOG_CRIT), "%ss3fs: " fmt "%s", instance_name.c_str(), __VA_ARGS__); \ - } \ - }while(0) - -// Special macro for init message -#define S3FS_PRN_INIT_INFO(fmt, ...) \ - do{ \ - if(foreground){ \ - fprintf(stdout, "%s%s%s:%s(%d): " fmt "%s\n", S3FS_LOG_LEVEL_STRING(S3FS_LOG_INFO), S3FS_LOG_NEST(0), __FILE__, __func__, __LINE__, __VA_ARGS__, ""); \ - }else{ \ - syslog(S3FS_LOG_LEVEL_TO_SYSLOG(S3FS_LOG_INFO), "%s%s" fmt "%s", instance_name.c_str(), S3FS_LOG_NEST(0), __VA_ARGS__, ""); \ - } \ - }while(0) - -// Special macro for checking cache files -#define S3FS_LOW_CACHE(fp, fmt, ...) \ - do{ \ - if(foreground){ \ - fprintf(fp, fmt "%s\n", __VA_ARGS__); \ - }else{ \ - syslog(S3FS_LOG_LEVEL_TO_SYSLOG(S3FS_LOG_INFO), "%s: " fmt "%s", instance_name.c_str(), __VA_ARGS__); \ - } \ - }while(0) - -// [NOTE] -// small trick for VA_ARGS -// -#define S3FS_PRN_EXIT(fmt, ...) S3FS_LOW_LOGPRN_EXIT(fmt, ##__VA_ARGS__, "") -#define S3FS_PRN_CRIT(fmt, ...) S3FS_LOW_LOGPRN(S3FS_LOG_CRIT, fmt, ##__VA_ARGS__, "") -#define S3FS_PRN_ERR(fmt, ...) S3FS_LOW_LOGPRN(S3FS_LOG_ERR, fmt, ##__VA_ARGS__, "") -#define S3FS_PRN_WARN(fmt, ...) S3FS_LOW_LOGPRN(S3FS_LOG_WARN, fmt, ##__VA_ARGS__, "") -#define S3FS_PRN_DBG(fmt, ...) S3FS_LOW_LOGPRN(S3FS_LOG_DBG, fmt, ##__VA_ARGS__, "") -#define S3FS_PRN_INFO(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_INFO, 0, fmt, ##__VA_ARGS__, "") -#define S3FS_PRN_INFO0(fmt, ...) S3FS_LOG_INFO(fmt, __VA_ARGS__) -#define S3FS_PRN_INFO1(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_INFO, 1, fmt, ##__VA_ARGS__, "") -#define S3FS_PRN_INFO2(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_INFO, 2, fmt, ##__VA_ARGS__, "") -#define S3FS_PRN_INFO3(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_INFO, 3, fmt, ##__VA_ARGS__, "") -#define S3FS_PRN_CURL(fmt, ...) S3FS_LOW_CURLDBG(fmt, ##__VA_ARGS__, "") -#define S3FS_PRN_CACHE(fp, ...) S3FS_LOW_CACHE(fp, ##__VA_ARGS__, "") - -// -// Typedef -// -struct header_nocase_cmp : public std::binary_function{ - bool operator()(const std::string &strleft, const std::string &strright) const - { - return (strcasecmp(strleft.c_str(), strright.c_str()) < 0); - } -}; -typedef std::map headers_t; - -// -// Header "x-amz-meta-xattr" is for extended attributes. -// This header is url encoded string which is json formatted. -// x-amz-meta-xattr:urlencode({"xattr-1":"base64(value-1)","xattr-2":"base64(value-2)","xattr-3":"base64(value-3)"}) -// -typedef struct xattr_value{ - unsigned char* pvalue; - size_t length; - - explicit xattr_value(unsigned char* pval = NULL, size_t len = 0) : pvalue(pval), length(len) {} - ~xattr_value() - { - delete[] pvalue; - } -}XATTRVAL, *PXATTRVAL; - -typedef std::map xattrs_t; - -// +//------------------------------------------------------------------- // Global variables -// +//------------------------------------------------------------------- // TODO: namespace these +extern int64_t FIVE_GB; +extern off_t MIN_MULTIPART_SIZE; extern bool foreground; extern bool nomultipart; extern bool pathrequeststyle; extern bool complement_stat; +extern bool noxmlns; extern std::string program_name; extern std::string service_path; -extern std::string host; +extern std::string s3host; extern std::string bucket; extern std::string mount_prefix; extern std::string endpoint; extern std::string cipher_suites; extern std::string instance_name; -extern s3fs_log_level debug_level; -extern const char* s3fs_log_nest[S3FS_LOG_NEST_MAX]; +extern std::string aws_profile; #endif // S3FS_COMMON_H_ /* * Local variables: -* tab-width: 2 -* c-basic-offset: 2 +* tab-width: 4 +* c-basic-offset: 4 * End: -* vim600: expandtab sw=2 ts=2 fdm=marker -* vim<600: expandtab sw=2 ts=2 +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 */ diff --git a/src/common_auth.cpp b/src/common_auth.cpp index 4cd0d3e..38e6a64 100644 --- a/src/common_auth.cpp +++ b/src/common_auth.cpp @@ -24,6 +24,8 @@ #include #include +#include "common.h" +#include "s3fs.h" #include "s3fs_auth.h" #include "string_util.h" @@ -34,63 +36,63 @@ using namespace std; //------------------------------------------------------------------- string s3fs_get_content_md5(int fd) { - unsigned char* md5hex; - char* base64; - string Signature; + unsigned char* md5hex; + char* base64; + string Signature; - if(NULL == (md5hex = s3fs_md5hexsum(fd, 0, -1))){ - return string(""); - } - if(NULL == (base64 = s3fs_base64(md5hex, get_md5_digest_length()))){ - return string(""); // ENOMEM - } - delete[] md5hex; + if(NULL == (md5hex = s3fs_md5hexsum(fd, 0, -1))){ + return string(""); + } + if(NULL == (base64 = s3fs_base64(md5hex, get_md5_digest_length()))){ + return string(""); // ENOMEM + } + delete[] md5hex; - Signature = base64; - delete[] base64; + Signature = base64; + delete[] base64; - return Signature; + return Signature; } string s3fs_md5sum(int fd, off_t start, ssize_t size) { - size_t digestlen = get_md5_digest_length(); - unsigned char* md5hex; + size_t digestlen = get_md5_digest_length(); + unsigned char* md5hex; - if(NULL == (md5hex = s3fs_md5hexsum(fd, start, size))){ - return string(""); - } + if(NULL == (md5hex = s3fs_md5hexsum(fd, start, size))){ + return string(""); + } - std::string md5 = s3fs_hex(md5hex, digestlen); - delete[] md5hex; + std::string md5 = s3fs_hex(md5hex, digestlen); + delete[] md5hex; - return md5; + return md5; } string s3fs_sha256sum(int fd, off_t start, ssize_t size) { - size_t digestlen = get_sha256_digest_length(); - char sha256[2 * digestlen + 1]; - unsigned char* sha256hex; + size_t digestlen = get_sha256_digest_length(); + char sha256[2 * digestlen + 1]; + unsigned char* sha256hex; - if(NULL == (sha256hex = s3fs_sha256hexsum(fd, start, size))){ - return string(""); - } + if(NULL == (sha256hex = s3fs_sha256hexsum(fd, start, size))){ + return string(""); + } - memset(sha256, 0, 2 * digestlen + 1); - for(size_t pos = 0; pos < digestlen; pos++){ - snprintf(sha256 + 2 * pos, 3, "%02x", sha256hex[pos]); - } - delete[] sha256hex; + memset(sha256, 0, 2 * digestlen + 1); + for(size_t pos = 0; pos < digestlen; pos++){ + snprintf(sha256 + 2 * pos, 3, "%02x", sha256hex[pos]); + } + delete[] sha256hex; - return string(sha256); + return string(sha256); } /* * Local variables: -* tab-width: 2 -* c-basic-offset: 2 +* tab-width: 4 +* c-basic-offset: 4 * End: -* vim600: expandtab sw=2 ts=2 fdm=marker -* vim<600: expandtab sw=2 ts=2 +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 */ diff --git a/src/curl.cpp b/src/curl.cpp index 0b40980..42db8bd 100644 --- a/src/curl.cpp +++ b/src/curl.cpp @@ -21,350 +21,43 @@ #include #include #include -#include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include #include -#include -#include #include -#include -#include #include "common.h" -#include "curl.h" -#include "string_util.h" #include "s3fs.h" -#include "s3fs_util.h" +#include "curl.h" +#include "curl_multi.h" +#include "curl_util.h" #include "s3fs_auth.h" +#include "autolock.h" +#include "s3fs_util.h" +#include "string_util.h" #include "addhead.h" -#include "fdcache.h" -#include "psemaphore.h" using namespace std; -static const std::string empty_payload_hash = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"; - //------------------------------------------------------------------- -// Utilities +// Symbols //------------------------------------------------------------------- -// [TODO] -// This function uses temporary file, but should not use it. -// For not using it, we implement function in each auth file(openssl, nss. gnutls). -// -static bool make_md5_from_binary(const char* pstr, size_t length, string& md5) -{ - if(!pstr || '\0' == pstr[0]){ - S3FS_PRN_ERR("Parameter is wrong."); - return false; - } - FILE* fp; - if(NULL == (fp = tmpfile())){ - S3FS_PRN_ERR("Could not make tmpfile."); - return false; - } - if(length != fwrite(pstr, sizeof(char), length, fp)){ - S3FS_PRN_ERR("Failed to write tmpfile."); - fclose(fp); - return false; - } - int fd; - if(0 != fflush(fp) || 0 != fseek(fp, 0L, SEEK_SET) || -1 == (fd = fileno(fp))){ - S3FS_PRN_ERR("Failed to make MD5."); - fclose(fp); - return false; - } - // base64 md5 - md5 = s3fs_get_content_md5(fd); - if(0 == md5.length()){ - S3FS_PRN_ERR("Failed to make MD5."); - fclose(fp); - return false; - } - fclose(fp); - return true; -} - -static string url_to_host(const std::string &url) -{ - S3FS_PRN_INFO3("url is %s", url.c_str()); - - static const string http = "http://"; - static const string https = "https://"; - std::string hostname; - - if (url.compare(0, http.size(), http) == 0) { - hostname = url.substr(http.size()); - } else if (url.compare(0, https.size(), https) == 0) { - hostname = url.substr(https.size()); - } else { - S3FS_PRN_EXIT("url does not begin with http:// or https://"); - abort(); - } - - size_t idx; - if ((idx = hostname.find('/')) != string::npos) { - return hostname.substr(0, idx); - } else { - return hostname; - } -} - -static string get_bucket_host() -{ - if(!pathrequeststyle){ - return bucket + "." + url_to_host(host); - } - return url_to_host(host); -} - -// compare ETag ignoring quotes and case -static bool etag_equals(std::string s1, std::string s2) { - if(s1.length() > 1 && s1[0] == '\"' && s1[s1.length() - 1] == '\"'){ - s1 = s1.substr(1, s1.size() - 2); - } - if(s2.length() > 1 && s2[0] == '\"' && s2[s2.length() - 1] == '\"'){ - s2 = s2.substr(1, s2.size() - 2); - } - return 0 == strcasecmp(s1.c_str(), s2.c_str()); -} - -#if 0 // noused -static string tolower_header_name(const char* head) -{ - string::size_type pos; - string name = head; - string value(""); - if(string::npos != (pos = name.find(':'))){ - value= name.substr(pos); - name = name.substr(0, pos); - } - name = lower(name); - name += value; - return name; -} -#endif - -static const char* getCurlDebugHead(curl_infotype type) -{ - const char* unknown = ""; - const char* dataIn = "BODY <"; - const char* dataOut = "BODY >"; - const char* headIn = "<"; - const char* headOut = ">"; - - switch(type){ - case CURLINFO_DATA_IN: - return dataIn; - case CURLINFO_DATA_OUT: - return dataOut; - case CURLINFO_HEADER_IN: - return headIn; - case CURLINFO_HEADER_OUT: - return headOut; - default: - break; - } - return unknown; -} - -//------------------------------------------------------------------- -// Class BodyData -//------------------------------------------------------------------- -static const int BODYDATA_RESIZE_APPEND_MIN = 1024; -static const int BODYDATA_RESIZE_APPEND_MID = 1024 * 1024; -static const int BODYDATA_RESIZE_APPEND_MAX = 10 * 1024 * 1024; - -static size_t adjust_block(size_t bytes, size_t block) { return ((bytes / block) + ((bytes % block) ? 1 : 0)) * block; } - -bool BodyData::Resize(size_t addbytes) -{ - if(IsSafeSize(addbytes)){ - return true; - } - - // New size - size_t need_size = adjust_block((lastpos + addbytes + 1) - bufsize, sizeof(off_t)); - - if(BODYDATA_RESIZE_APPEND_MAX < bufsize){ - need_size = (BODYDATA_RESIZE_APPEND_MAX < need_size ? need_size : BODYDATA_RESIZE_APPEND_MAX); - }else if(BODYDATA_RESIZE_APPEND_MID < bufsize){ - need_size = (BODYDATA_RESIZE_APPEND_MID < need_size ? need_size : BODYDATA_RESIZE_APPEND_MID); - }else if(BODYDATA_RESIZE_APPEND_MIN < bufsize){ - need_size = ((bufsize * 2) < need_size ? need_size : (bufsize * 2)); - }else{ - need_size = (BODYDATA_RESIZE_APPEND_MIN < need_size ? need_size : BODYDATA_RESIZE_APPEND_MIN); - } - // realloc - char* newtext; - if(NULL == (newtext = (char*)realloc(text, (bufsize + need_size)))){ - S3FS_PRN_CRIT("not enough memory (realloc returned NULL)"); - free(text); - text = NULL; - return false; - } - text = newtext; - bufsize += need_size; - - return true; -} - -void BodyData::Clear() -{ - if(text){ - free(text); - text = NULL; - } - lastpos = 0; - bufsize = 0; -} - -bool BodyData::Append(void* ptr, size_t bytes) -{ - if(!ptr){ - return false; - } - if(0 == bytes){ - return true; - } - if(!Resize(bytes)){ - return false; - } - memcpy(&text[lastpos], ptr, bytes); - lastpos += bytes; - text[lastpos] = '\0'; - - return true; -} - -const char* BodyData::str() const -{ - if(!text){ - static const char* strnull = ""; - return strnull; - } - return text; -} - -//------------------------------------------------------------------- -// Class CurlHandlerPool -//------------------------------------------------------------------- -bool CurlHandlerPool::Init() -{ - pthread_mutexattr_t attr; - pthread_mutexattr_init(&attr); -#if S3FS_PTHREAD_ERRORCHECK - pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK); -#endif - if (0 != pthread_mutex_init(&mLock, &attr)) { - S3FS_PRN_ERR("Init curl handlers lock failed"); - return false; - } - - for(int cnt = 0; cnt < mMaxHandlers; ++cnt){ - CURL* hCurl = curl_easy_init(); - if(!hCurl){ - S3FS_PRN_ERR("Init curl handlers pool failed"); - Destroy(); - return false; - } - mPool.push_back(hCurl); - } - - return true; -} - -bool CurlHandlerPool::Destroy() -{ - while(!mPool.empty()){ - CURL* hCurl = mPool.back(); - mPool.pop_back(); - if(hCurl){ - curl_easy_cleanup(hCurl); - } - } - if (0 != pthread_mutex_destroy(&mLock)) { - S3FS_PRN_ERR("Destroy curl handlers lock failed"); - return false; - } - - return true; -} - -CURL* CurlHandlerPool::GetHandler(bool only_pool) -{ - CURL* hCurl = NULL; - { - AutoLock lock(&mLock); - - if(!mPool.empty()){ - hCurl = mPool.back(); - mPool.pop_back(); - S3FS_PRN_DBG("Get handler from pool: rest = %d", static_cast(mPool.size())); - } - } - if(only_pool){ - return hCurl; - } - if(!hCurl){ - S3FS_PRN_INFO("Pool empty: force to create new handler"); - hCurl = curl_easy_init(); - } - return hCurl; -} - -void CurlHandlerPool::ReturnHandler(CURL* hCurl, bool restore_pool) -{ - if(!hCurl){ - return; - } - - if(restore_pool){ - AutoLock lock(&mLock); - - S3FS_PRN_DBG("Return handler to pool"); - mPool.push_back(hCurl); - - while(mMaxHandlers <= static_cast(mPool.size())){ - CURL* hOldCurl = mPool.front(); - mPool.pop_front(); - if(hOldCurl){ - S3FS_PRN_INFO("Pool full: destroy the oldest handler"); - curl_easy_cleanup(hOldCurl); - } - } - }else{ - S3FS_PRN_INFO("Pool full: destroy the handler"); - curl_easy_cleanup(hCurl); - } -} +static const std::string empty_payload_hash = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"; //------------------------------------------------------------------- // Class S3fsCurl //------------------------------------------------------------------- -static const int MULTIPART_SIZE = 10 * 1024 * 1024; +static const int MULTIPART_SIZE = 10 * 1024 * 1024; + // constant must be at least 512 MB to copy the maximum 5 TB object size // TODO: scale part size with object size -static const int MAX_MULTI_COPY_SOURCE_SIZE = 512 * 1024 * 1024; +static const int MAX_MULTI_COPY_SOURCE_SIZE = 512 * 1024 * 1024; -static const int IAM_EXPIRE_MERGIN = 20 * 60; // update timing -static const std::string ECS_IAM_ENV_VAR = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"; -static const std::string IAMCRED_ACCESSKEYID = "AccessKeyId"; -static const std::string IAMCRED_SECRETACCESSKEY = "SecretAccessKey"; -static const std::string IAMCRED_ROLEARN = "RoleArn"; - -static const long S3FSCURL_RESPONSECODE_NOTSET = -1; -static const long S3FSCURL_RESPONSECODE_FATAL_ERROR = -2; -static const int S3FSCURL_PERFORM_RESULT_NOTSET = 1; +static const int IAM_EXPIRE_MERGIN = 20 * 60; // update timing +static const std::string ECS_IAM_ENV_VAR = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"; +static const std::string IAMCRED_ACCESSKEYID = "AccessKeyId"; +static const std::string IAMCRED_SECRETACCESSKEY = "SecretAccessKey"; +static const std::string IAMCRED_ROLEARN = "RoleArn"; // [NOTE] about default mime.types file // If no mime.types file is specified in the mime option, s3fs @@ -377,15 +70,21 @@ static const int S3FSCURL_PERFORM_RESULT_NOTSET = 1; // If the mime.types file is not found, s3fs will exit with an // error. // -static const char* DEFAULT_MIME_FILE = "/etc/mime.types"; -static const char* SPECIAL_DARWIN_MIME_FILE = "/etc/apache2/mime.types"; +static const char* DEFAULT_MIME_FILE = "/etc/mime.types"; +static const char* SPECIAL_DARWIN_MIME_FILE = "/etc/apache2/mime.types"; // [NOTICE] // This symbol is for libcurl under 7.23.0 #ifndef CURLSHE_NOT_BUILT_IN -#define CURLSHE_NOT_BUILT_IN 5 +#define CURLSHE_NOT_BUILT_IN 5 #endif +//------------------------------------------------------------------- +// Class S3fsCurl +//------------------------------------------------------------------- +const long S3fsCurl::S3FSCURL_RESPONSECODE_NOTSET; +const long S3fsCurl::S3FSCURL_RESPONSECODE_FATAL_ERROR; +const int S3fsCurl::S3FSCURL_PERFORM_RESULT_NOTSET; pthread_mutex_t S3fsCurl::curl_handles_lock; S3fsCurl::callback_locks_t S3fsCurl::callback_locks; bool S3fsCurl::is_initglobal_done = false; @@ -432,7 +131,7 @@ int S3fsCurl::max_multireq = 20; // default off_t S3fsCurl::multipart_size = MULTIPART_SIZE; // default bool S3fsCurl::is_sigv4 = true; // default bool S3fsCurl::is_ua = true; // default -bool S3fsCurl::is_use_session_token = false; // default +bool S3fsCurl::is_use_session_token= false; // default bool S3fsCurl::requester_pays = false; // default //------------------------------------------------------------------- @@ -440,309 +139,308 @@ bool S3fsCurl::requester_pays = false; // default //------------------------------------------------------------------- bool S3fsCurl::InitS3fsCurl() { - pthread_mutexattr_t attr; - pthread_mutexattr_init(&attr); + pthread_mutexattr_t attr; + pthread_mutexattr_init(&attr); #if S3FS_PTHREAD_ERRORCHECK - pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK); + pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK); #endif - if(0 != pthread_mutex_init(&S3fsCurl::curl_handles_lock, &attr)){ - return false; - } - if(0 != pthread_mutex_init(&S3fsCurl::callback_locks.dns, &attr)){ - return false; - } - if(0 != pthread_mutex_init(&S3fsCurl::callback_locks.ssl_session, &attr)){ - return false; - } - if(!S3fsCurl::InitGlobalCurl()){ - return false; - } - if(!S3fsCurl::InitShareCurl()){ - return false; - } - if(!S3fsCurl::InitCryptMutex()){ - return false; - } - // [NOTE] - // sCurlPoolSize must be over parallel(or multireq) count. - // - if(sCurlPoolSize < std::max(GetMaxParallelCount(), GetMaxMultiRequest())){ - sCurlPoolSize = std::max(GetMaxParallelCount(), GetMaxMultiRequest()); - } - sCurlPool = new CurlHandlerPool(sCurlPoolSize); - if (!sCurlPool->Init()) { - return false; - } - return true; + if(0 != pthread_mutex_init(&S3fsCurl::curl_handles_lock, &attr)){ + return false; + } + if(0 != pthread_mutex_init(&S3fsCurl::callback_locks.dns, &attr)){ + return false; + } + if(0 != pthread_mutex_init(&S3fsCurl::callback_locks.ssl_session, &attr)){ + return false; + } + if(!S3fsCurl::InitGlobalCurl()){ + return false; + } + if(!S3fsCurl::InitShareCurl()){ + return false; + } + if(!S3fsCurl::InitCryptMutex()){ + return false; + } + // [NOTE] + // sCurlPoolSize must be over parallel(or multireq) count. + // + if(sCurlPoolSize < std::max(GetMaxParallelCount(), GetMaxMultiRequest())){ + sCurlPoolSize = std::max(GetMaxParallelCount(), GetMaxMultiRequest()); + } + sCurlPool = new CurlHandlerPool(sCurlPoolSize); + if (!sCurlPool->Init()) { + return false; + } + return true; } bool S3fsCurl::DestroyS3fsCurl() { - bool result = true; + bool result = true; - if(!S3fsCurl::DestroyCryptMutex()){ - result = false; - } - if(!sCurlPool->Destroy()){ - result = false; - } - delete sCurlPool; - sCurlPool = NULL; - if(!S3fsCurl::DestroyShareCurl()){ - result = false; - } - if(!S3fsCurl::DestroyGlobalCurl()){ - result = false; - } - if(0 != pthread_mutex_destroy(&S3fsCurl::callback_locks.dns)){ - result = false; - } - if(0 != pthread_mutex_destroy(&S3fsCurl::callback_locks.ssl_session)){ - result = false; - } - if(0 != pthread_mutex_destroy(&S3fsCurl::curl_handles_lock)){ - result = false; - } - return result; + if(!S3fsCurl::DestroyCryptMutex()){ + result = false; + } + if(!sCurlPool->Destroy()){ + result = false; + } + delete sCurlPool; + sCurlPool = NULL; + if(!S3fsCurl::DestroyShareCurl()){ + result = false; + } + if(!S3fsCurl::DestroyGlobalCurl()){ + result = false; + } + if(0 != pthread_mutex_destroy(&S3fsCurl::callback_locks.dns)){ + result = false; + } + if(0 != pthread_mutex_destroy(&S3fsCurl::callback_locks.ssl_session)){ + result = false; + } + if(0 != pthread_mutex_destroy(&S3fsCurl::curl_handles_lock)){ + result = false; + } + return result; } bool S3fsCurl::InitGlobalCurl() { - if(S3fsCurl::is_initglobal_done){ - return false; - } - if(CURLE_OK != curl_global_init(CURL_GLOBAL_ALL)){ - S3FS_PRN_ERR("init_curl_global_all returns error."); - return false; - } - S3fsCurl::is_initglobal_done = true; - return true; + if(S3fsCurl::is_initglobal_done){ + return false; + } + if(CURLE_OK != curl_global_init(CURL_GLOBAL_ALL)){ + S3FS_PRN_ERR("init_curl_global_all returns error."); + return false; + } + S3fsCurl::is_initglobal_done = true; + return true; } bool S3fsCurl::DestroyGlobalCurl() { - if(!S3fsCurl::is_initglobal_done){ - return false; - } - curl_global_cleanup(); - S3fsCurl::is_initglobal_done = false; - return true; + if(!S3fsCurl::is_initglobal_done){ + return false; + } + curl_global_cleanup(); + S3fsCurl::is_initglobal_done = false; + return true; } bool S3fsCurl::InitShareCurl() { - CURLSHcode nSHCode; + CURLSHcode nSHCode; - if(!S3fsCurl::is_dns_cache && !S3fsCurl::is_ssl_session_cache){ - S3FS_PRN_INFO("Curl does not share DNS data."); + if(!S3fsCurl::is_dns_cache && !S3fsCurl::is_ssl_session_cache){ + S3FS_PRN_INFO("Curl does not share DNS data."); + return true; + } + if(S3fsCurl::hCurlShare){ + S3FS_PRN_WARN("already initiated."); + return false; + } + if(NULL == (S3fsCurl::hCurlShare = curl_share_init())){ + S3FS_PRN_ERR("curl_share_init failed"); + return false; + } + if(CURLSHE_OK != (nSHCode = curl_share_setopt(S3fsCurl::hCurlShare, CURLSHOPT_LOCKFUNC, S3fsCurl::LockCurlShare))){ + S3FS_PRN_ERR("curl_share_setopt(LOCKFUNC) returns %d(%s)", nSHCode, curl_share_strerror(nSHCode)); + return false; + } + if(CURLSHE_OK != (nSHCode = curl_share_setopt(S3fsCurl::hCurlShare, CURLSHOPT_UNLOCKFUNC, S3fsCurl::UnlockCurlShare))){ + S3FS_PRN_ERR("curl_share_setopt(UNLOCKFUNC) returns %d(%s)", nSHCode, curl_share_strerror(nSHCode)); + return false; + } + if(S3fsCurl::is_dns_cache){ + nSHCode = curl_share_setopt(S3fsCurl::hCurlShare, CURLSHOPT_SHARE, CURL_LOCK_DATA_DNS); + if(CURLSHE_OK != nSHCode && CURLSHE_BAD_OPTION != nSHCode && CURLSHE_NOT_BUILT_IN != nSHCode){ + S3FS_PRN_ERR("curl_share_setopt(DNS) returns %d(%s)", nSHCode, curl_share_strerror(nSHCode)); + return false; + }else if(CURLSHE_BAD_OPTION == nSHCode || CURLSHE_NOT_BUILT_IN == nSHCode){ + S3FS_PRN_WARN("curl_share_setopt(DNS) returns %d(%s), but continue without shared dns data.", nSHCode, curl_share_strerror(nSHCode)); + } + } + if(S3fsCurl::is_ssl_session_cache){ + nSHCode = curl_share_setopt(S3fsCurl::hCurlShare, CURLSHOPT_SHARE, CURL_LOCK_DATA_SSL_SESSION); + if(CURLSHE_OK != nSHCode && CURLSHE_BAD_OPTION != nSHCode && CURLSHE_NOT_BUILT_IN != nSHCode){ + S3FS_PRN_ERR("curl_share_setopt(SSL SESSION) returns %d(%s)", nSHCode, curl_share_strerror(nSHCode)); + return false; + }else if(CURLSHE_BAD_OPTION == nSHCode || CURLSHE_NOT_BUILT_IN == nSHCode){ + S3FS_PRN_WARN("curl_share_setopt(SSL SESSION) returns %d(%s), but continue without shared ssl session data.", nSHCode, curl_share_strerror(nSHCode)); + } + } + if(CURLSHE_OK != (nSHCode = curl_share_setopt(S3fsCurl::hCurlShare, CURLSHOPT_USERDATA, &S3fsCurl::callback_locks))){ + S3FS_PRN_ERR("curl_share_setopt(USERDATA) returns %d(%s)", nSHCode, curl_share_strerror(nSHCode)); + return false; + } return true; - } - if(S3fsCurl::hCurlShare){ - S3FS_PRN_WARN("already initiated."); - return false; - } - if(NULL == (S3fsCurl::hCurlShare = curl_share_init())){ - S3FS_PRN_ERR("curl_share_init failed"); - return false; - } - if(CURLSHE_OK != (nSHCode = curl_share_setopt(S3fsCurl::hCurlShare, CURLSHOPT_LOCKFUNC, S3fsCurl::LockCurlShare))){ - S3FS_PRN_ERR("curl_share_setopt(LOCKFUNC) returns %d(%s)", nSHCode, curl_share_strerror(nSHCode)); - return false; - } - if(CURLSHE_OK != (nSHCode = curl_share_setopt(S3fsCurl::hCurlShare, CURLSHOPT_UNLOCKFUNC, S3fsCurl::UnlockCurlShare))){ - S3FS_PRN_ERR("curl_share_setopt(UNLOCKFUNC) returns %d(%s)", nSHCode, curl_share_strerror(nSHCode)); - return false; - } - if(S3fsCurl::is_dns_cache){ - nSHCode = curl_share_setopt(S3fsCurl::hCurlShare, CURLSHOPT_SHARE, CURL_LOCK_DATA_DNS); - if(CURLSHE_OK != nSHCode && CURLSHE_BAD_OPTION != nSHCode && CURLSHE_NOT_BUILT_IN != nSHCode){ - S3FS_PRN_ERR("curl_share_setopt(DNS) returns %d(%s)", nSHCode, curl_share_strerror(nSHCode)); - return false; - }else if(CURLSHE_BAD_OPTION == nSHCode || CURLSHE_NOT_BUILT_IN == nSHCode){ - S3FS_PRN_WARN("curl_share_setopt(DNS) returns %d(%s), but continue without shared dns data.", nSHCode, curl_share_strerror(nSHCode)); - } - } - if(S3fsCurl::is_ssl_session_cache){ - nSHCode = curl_share_setopt(S3fsCurl::hCurlShare, CURLSHOPT_SHARE, CURL_LOCK_DATA_SSL_SESSION); - if(CURLSHE_OK != nSHCode && CURLSHE_BAD_OPTION != nSHCode && CURLSHE_NOT_BUILT_IN != nSHCode){ - S3FS_PRN_ERR("curl_share_setopt(SSL SESSION) returns %d(%s)", nSHCode, curl_share_strerror(nSHCode)); - return false; - }else if(CURLSHE_BAD_OPTION == nSHCode || CURLSHE_NOT_BUILT_IN == nSHCode){ - S3FS_PRN_WARN("curl_share_setopt(SSL SESSION) returns %d(%s), but continue without shared ssl session data.", nSHCode, curl_share_strerror(nSHCode)); - } - } - if(CURLSHE_OK != (nSHCode = curl_share_setopt(S3fsCurl::hCurlShare, CURLSHOPT_USERDATA, &S3fsCurl::callback_locks))){ - S3FS_PRN_ERR("curl_share_setopt(USERDATA) returns %d(%s)", nSHCode, curl_share_strerror(nSHCode)); - return false; - } - return true; } bool S3fsCurl::DestroyShareCurl() { - if(!S3fsCurl::hCurlShare){ - if(!S3fsCurl::is_dns_cache && !S3fsCurl::is_ssl_session_cache){ - return true; + if(!S3fsCurl::hCurlShare){ + if(!S3fsCurl::is_dns_cache && !S3fsCurl::is_ssl_session_cache){ + return true; + } + S3FS_PRN_WARN("already destroy share curl."); + return false; } - S3FS_PRN_WARN("already destroy share curl."); - return false; - } - if(CURLSHE_OK != curl_share_cleanup(S3fsCurl::hCurlShare)){ - return false; - } - S3fsCurl::hCurlShare = NULL; - return true; + if(CURLSHE_OK != curl_share_cleanup(S3fsCurl::hCurlShare)){ + return false; + } + S3fsCurl::hCurlShare = NULL; + return true; } void S3fsCurl::LockCurlShare(CURL* handle, curl_lock_data nLockData, curl_lock_access laccess, void* useptr) { - if(!hCurlShare){ - return; - } - S3fsCurl::callback_locks_t* locks = static_cast(useptr); - int res; - if(CURL_LOCK_DATA_DNS == nLockData){ - if(0 != (res = pthread_mutex_lock(&locks->dns))){ - S3FS_PRN_CRIT("pthread_mutex_lock returned: %d", res); - abort(); + if(!hCurlShare){ + return; } - }else if(CURL_LOCK_DATA_SSL_SESSION == nLockData){ - if(0 != (res = pthread_mutex_lock(&locks->ssl_session))){ - S3FS_PRN_CRIT("pthread_mutex_lock returned: %d", res); - abort(); + S3fsCurl::callback_locks_t* locks = static_cast(useptr); + int res; + if(CURL_LOCK_DATA_DNS == nLockData){ + if(0 != (res = pthread_mutex_lock(&locks->dns))){ + S3FS_PRN_CRIT("pthread_mutex_lock returned: %d", res); + abort(); + } + }else if(CURL_LOCK_DATA_SSL_SESSION == nLockData){ + if(0 != (res = pthread_mutex_lock(&locks->ssl_session))){ + S3FS_PRN_CRIT("pthread_mutex_lock returned: %d", res); + abort(); + } } - } } void S3fsCurl::UnlockCurlShare(CURL* handle, curl_lock_data nLockData, void* useptr) { - if(!hCurlShare){ - return; - } - S3fsCurl::callback_locks_t* locks = static_cast(useptr); - int res; - if(CURL_LOCK_DATA_DNS == nLockData){ - if(0 != (res = pthread_mutex_unlock(&locks->dns))){ - S3FS_PRN_CRIT("pthread_mutex_unlock returned: %d", res); - abort(); + if(!hCurlShare){ + return; } - }else if(CURL_LOCK_DATA_SSL_SESSION == nLockData){ - if(0 != (res = pthread_mutex_unlock(&locks->ssl_session))){ - S3FS_PRN_CRIT("pthread_mutex_unlock returned: %d", res); - abort(); + S3fsCurl::callback_locks_t* locks = static_cast(useptr); + int res; + if(CURL_LOCK_DATA_DNS == nLockData){ + if(0 != (res = pthread_mutex_unlock(&locks->dns))){ + S3FS_PRN_CRIT("pthread_mutex_unlock returned: %d", res); + abort(); + } + }else if(CURL_LOCK_DATA_SSL_SESSION == nLockData){ + if(0 != (res = pthread_mutex_unlock(&locks->ssl_session))){ + S3FS_PRN_CRIT("pthread_mutex_unlock returned: %d", res); + abort(); + } } - } } bool S3fsCurl::InitCryptMutex() { - return s3fs_init_crypt_mutex(); + return s3fs_init_crypt_mutex(); } bool S3fsCurl::DestroyCryptMutex() { - return s3fs_destroy_crypt_mutex(); + return s3fs_destroy_crypt_mutex(); } // homegrown timeout mechanism int S3fsCurl::CurlProgress(void *clientp, double dltotal, double dlnow, double ultotal, double ulnow) { - CURL* curl = static_cast(clientp); - time_t now = time(0); - progress_t p(dlnow, ulnow); + CURL* curl = static_cast(clientp); + time_t now = time(0); + progress_t p(dlnow, ulnow); - AutoLock lock(&S3fsCurl::curl_handles_lock); + AutoLock lock(&S3fsCurl::curl_handles_lock); - // any progress? - if(p != S3fsCurl::curl_progress[curl]){ - // yes! - S3fsCurl::curl_times[curl] = now; - S3fsCurl::curl_progress[curl] = p; - }else{ - // timeout? - if(now - S3fsCurl::curl_times[curl] > readwrite_timeout){ - S3FS_PRN_ERR("timeout now: %lld, curl_times[curl]: %lld, readwrite_timeout: %lld", - static_cast(now), static_cast((S3fsCurl::curl_times[curl])), static_cast(readwrite_timeout)); - return CURLE_ABORTED_BY_CALLBACK; + // any progress? + if(p != S3fsCurl::curl_progress[curl]){ + // yes! + S3fsCurl::curl_times[curl] = now; + S3fsCurl::curl_progress[curl] = p; + }else{ + // timeout? + if(now - S3fsCurl::curl_times[curl] > readwrite_timeout){ + S3FS_PRN_ERR("timeout now: %lld, curl_times[curl]: %lld, readwrite_timeout: %lld", + static_cast(now), static_cast((S3fsCurl::curl_times[curl])), static_cast(readwrite_timeout)); + return CURLE_ABORTED_BY_CALLBACK; + } } - } - - return 0; + return 0; } bool S3fsCurl::InitMimeType(const std::string& strFile) { - string MimeFile; - if(!strFile.empty()){ - MimeFile = strFile; - }else{ - // search default mime.types - string errPaths = DEFAULT_MIME_FILE; - struct stat st; - if(0 == stat(DEFAULT_MIME_FILE, &st)){ - MimeFile = DEFAULT_MIME_FILE; - }else if(compare_sysname("Darwin")){ - // for macos, search another default file. - if(0 == stat(SPECIAL_DARWIN_MIME_FILE, &st)){ - MimeFile = SPECIAL_DARWIN_MIME_FILE; - }else{ - errPaths += " and "; - errPaths += SPECIAL_DARWIN_MIME_FILE; - } - } - if(MimeFile.empty()){ - S3FS_PRN_WARN("Could not find mime.types files, you have to create file(%s) or specify mime option for existing mime.types file.", errPaths.c_str()); - return false; - } - } - S3FS_PRN_DBG("Try to load mime types from %s file.", MimeFile.c_str()); - - string line; - ifstream MT(MimeFile.c_str()); - if(MT.good()){ - S3FS_PRN_DBG("The old mime types are cleared to load new mime types."); - S3fsCurl::mimeTypes.clear(); - - while(getline(MT, line)){ - if(line[0]=='#'){ - continue; - } - if(line.empty()){ - continue; - } - - istringstream tmp(line); - string mimeType; - tmp >> mimeType; - while(tmp){ - string ext; - tmp >> ext; - if(ext.empty()){ - continue; + string MimeFile; + if(!strFile.empty()){ + MimeFile = strFile; + }else{ + // search default mime.types + string errPaths = DEFAULT_MIME_FILE; + struct stat st; + if(0 == stat(DEFAULT_MIME_FILE, &st)){ + MimeFile = DEFAULT_MIME_FILE; + }else if(compare_sysname("Darwin")){ + // for macos, search another default file. + if(0 == stat(SPECIAL_DARWIN_MIME_FILE, &st)){ + MimeFile = SPECIAL_DARWIN_MIME_FILE; + }else{ + errPaths += " and "; + errPaths += SPECIAL_DARWIN_MIME_FILE; + } + } + if(MimeFile.empty()){ + S3FS_PRN_WARN("Could not find mime.types files, you have to create file(%s) or specify mime option for existing mime.types file.", errPaths.c_str()); + return false; } - S3fsCurl::mimeTypes[ext] = mimeType; - } } - S3FS_PRN_INIT_INFO("Loaded mime information from %s", MimeFile.c_str()); - }else{ - S3FS_PRN_WARN("Could not load mime types from %s, please check the existence and permissions of this file.", MimeFile.c_str()); - return false; - } - return true; + S3FS_PRN_DBG("Try to load mime types from %s file.", MimeFile.c_str()); + + string line; + ifstream MT(MimeFile.c_str()); + if(MT.good()){ + S3FS_PRN_DBG("The old mime types are cleared to load new mime types."); + S3fsCurl::mimeTypes.clear(); + + while(getline(MT, line)){ + if(line[0]=='#'){ + continue; + } + if(line.empty()){ + continue; + } + + istringstream tmp(line); + string mimeType; + tmp >> mimeType; + while(tmp){ + string ext; + tmp >> ext; + if(ext.empty()){ + continue; + } + S3fsCurl::mimeTypes[ext] = mimeType; + } + } + S3FS_PRN_INIT_INFO("Loaded mime information from %s", MimeFile.c_str()); + }else{ + S3FS_PRN_WARN("Could not load mime types from %s, please check the existence and permissions of this file.", MimeFile.c_str()); + return false; + } + return true; } void S3fsCurl::InitUserAgent() { - if(S3fsCurl::userAgent.empty()){ - S3fsCurl::userAgent = "s3fs/"; - S3fsCurl::userAgent += VERSION; - S3fsCurl::userAgent += " (commit hash "; - S3fsCurl::userAgent += COMMIT_HASH_VAL; - S3fsCurl::userAgent += "; "; - S3fsCurl::userAgent += s3fs_crypt_lib_name(); - S3fsCurl::userAgent += ")"; - S3fsCurl::userAgent += instance_name; - } + if(S3fsCurl::userAgent.empty()){ + S3fsCurl::userAgent = "s3fs/"; + S3fsCurl::userAgent += VERSION; + S3fsCurl::userAgent += " (commit hash "; + S3fsCurl::userAgent += COMMIT_HASH_VAL; + S3fsCurl::userAgent += "; "; + S3fsCurl::userAgent += s3fs_crypt_lib_name(); + S3fsCurl::userAgent += ")"; + S3fsCurl::userAgent += instance_name; + } } // @@ -751,420 +449,422 @@ void S3fsCurl::InitUserAgent() // string S3fsCurl::LookupMimeType(const string& name) { - if(!name.empty() && name[name.size() - 1] == '/'){ - return "application/x-directory"; - } + if(!name.empty() && name[name.size() - 1] == '/'){ + return "application/x-directory"; + } - string result("application/octet-stream"); - string::size_type last_pos = name.find_last_of('.'); - string::size_type first_pos = name.find_first_of('.'); - string prefix, ext, ext2; + string result("application/octet-stream"); + string::size_type last_pos = name.find_last_of('.'); + string::size_type first_pos = name.find_first_of('.'); + string prefix, ext, ext2; - // No dots in name, just return - if(last_pos == string::npos){ - return result; - } - // extract the last extension - ext = name.substr(1+last_pos, string::npos); + // No dots in name, just return + if(last_pos == string::npos){ + return result; + } + // extract the last extension + ext = name.substr(1+last_pos, string::npos); - if (last_pos != string::npos) { - // one dot was found, now look for another - if (first_pos != string::npos && first_pos < last_pos) { - prefix = name.substr(0, last_pos); - // Now get the second to last file extension - string::size_type next_pos = prefix.find_last_of('.'); - if (next_pos != string::npos) { - ext2 = prefix.substr(1+next_pos, string::npos); + if (last_pos != string::npos) { + // one dot was found, now look for another + if (first_pos != string::npos && first_pos < last_pos) { + prefix = name.substr(0, last_pos); + // Now get the second to last file extension + string::size_type next_pos = prefix.find_last_of('.'); + if (next_pos != string::npos) { + ext2 = prefix.substr(1+next_pos, string::npos); + } } - } - } + } - // if we get here, then we have an extension (ext) - mimes_t::const_iterator iter = S3fsCurl::mimeTypes.find(ext); - // if the last extension matches a mimeType, then return - // that mime type - if (iter != S3fsCurl::mimeTypes.end()) { - result = (*iter).second; + // if we get here, then we have an extension (ext) + mimes_t::const_iterator iter = S3fsCurl::mimeTypes.find(ext); + // if the last extension matches a mimeType, then return + // that mime type + if (iter != S3fsCurl::mimeTypes.end()) { + result = (*iter).second; + return result; + } + + // return with the default result if there isn't a second extension + if(first_pos == last_pos){ + return result; + } + + // Didn't find a mime-type for the first extension + // Look for second extension in mimeTypes, return if found + iter = S3fsCurl::mimeTypes.find(ext2); + if (iter != S3fsCurl::mimeTypes.end()) { + result = (*iter).second; + return result; + } + + // neither the last extension nor the second-to-last extension + // matched a mimeType, return the default mime type return result; - } - - // return with the default result if there isn't a second extension - if(first_pos == last_pos){ - return result; - } - - // Didn't find a mime-type for the first extension - // Look for second extension in mimeTypes, return if found - iter = S3fsCurl::mimeTypes.find(ext2); - if (iter != S3fsCurl::mimeTypes.end()) { - result = (*iter).second; - return result; - } - - // neither the last extension nor the second-to-last extension - // matched a mimeType, return the default mime type - return result; } bool S3fsCurl::LocateBundle() { - // See if environment variable CURL_CA_BUNDLE is set - // if so, check it, if it is a good path, then set the - // curl_ca_bundle variable to it - if(S3fsCurl::curl_ca_bundle.empty()){ - char* CURL_CA_BUNDLE = getenv("CURL_CA_BUNDLE"); - if(CURL_CA_BUNDLE != NULL) { - // check for existence and readability of the file - ifstream BF(CURL_CA_BUNDLE); - if(!BF.good()){ - S3FS_PRN_ERR("%s: file specified by CURL_CA_BUNDLE environment variable is not readable", program_name.c_str()); - return false; - } - BF.close(); - S3fsCurl::curl_ca_bundle.assign(CURL_CA_BUNDLE); - return true; - } - }else{ - // Already set ca bundle variable - return true; - } - - // not set via environment variable, look in likely locations - - /////////////////////////////////////////// - // following comment from curl's (7.21.2) acinclude.m4 file - /////////////////////////////////////////// - // dnl CURL_CHECK_CA_BUNDLE - // dnl ------------------------------------------------- - // dnl Check if a default ca-bundle should be used - // dnl - // dnl regarding the paths this will scan: - // dnl /etc/ssl/certs/ca-certificates.crt Debian systems - // dnl /etc/pki/tls/certs/ca-bundle.crt Redhat and Mandriva - // dnl /usr/share/ssl/certs/ca-bundle.crt old(er) Redhat - // dnl /usr/local/share/certs/ca-root.crt FreeBSD - // dnl /etc/ssl/cert.pem OpenBSD - // dnl /etc/ssl/certs/ (ca path) SUSE - /////////////////////////////////////////// - // Within CURL the above path should have been checked - // according to the OS. Thus, although we do not need - // to check files here, we will only examine some files. - // - ifstream BF("/etc/pki/tls/certs/ca-bundle.crt"); - if(BF.good()){ - BF.close(); - S3fsCurl::curl_ca_bundle.assign("/etc/pki/tls/certs/ca-bundle.crt"); - }else{ - BF.open("/etc/ssl/certs/ca-certificates.crt"); - if(BF.good()){ - BF.close(); - S3fsCurl::curl_ca_bundle.assign("/etc/ssl/certs/ca-certificates.crt"); - }else{ - BF.open("/usr/share/ssl/certs/ca-bundle.crt"); - if(BF.good()){ - BF.close(); - S3fsCurl::curl_ca_bundle.assign("/usr/share/ssl/certs/ca-bundle.crt"); - }else{ - BF.open("/usr/local/share/certs/ca-root.crt"); - if(BF.good()){ - BF.close(); - S3fsCurl::curl_ca_bundle.assign("/usr/share/ssl/certs/ca-bundle.crt"); - }else{ - S3FS_PRN_ERR("%s: /.../ca-bundle.crt is not readable", program_name.c_str()); - return false; + // See if environment variable CURL_CA_BUNDLE is set + // if so, check it, if it is a good path, then set the + // curl_ca_bundle variable to it + if(S3fsCurl::curl_ca_bundle.empty()){ + char* CURL_CA_BUNDLE = getenv("CURL_CA_BUNDLE"); + if(CURL_CA_BUNDLE != NULL) { + // check for existence and readability of the file + ifstream BF(CURL_CA_BUNDLE); + if(!BF.good()){ + S3FS_PRN_ERR("%s: file specified by CURL_CA_BUNDLE environment variable is not readable", program_name.c_str()); + return false; + } + BF.close(); + S3fsCurl::curl_ca_bundle.assign(CURL_CA_BUNDLE); + return true; } - } + }else{ + // Already set ca bundle variable + return true; } - } - return true; + + // not set via environment variable, look in likely locations + + /////////////////////////////////////////// + // following comment from curl's (7.21.2) acinclude.m4 file + /////////////////////////////////////////// + // dnl CURL_CHECK_CA_BUNDLE + // dnl ------------------------------------------------- + // dnl Check if a default ca-bundle should be used + // dnl + // dnl regarding the paths this will scan: + // dnl /etc/ssl/certs/ca-certificates.crt Debian systems + // dnl /etc/pki/tls/certs/ca-bundle.crt Redhat and Mandriva + // dnl /usr/share/ssl/certs/ca-bundle.crt old(er) Redhat + // dnl /usr/local/share/certs/ca-root.crt FreeBSD + // dnl /etc/ssl/cert.pem OpenBSD + // dnl /etc/ssl/certs/ (ca path) SUSE + /////////////////////////////////////////// + // Within CURL the above path should have been checked + // according to the OS. Thus, although we do not need + // to check files here, we will only examine some files. + // + ifstream BF("/etc/pki/tls/certs/ca-bundle.crt"); + if(BF.good()){ + BF.close(); + S3fsCurl::curl_ca_bundle.assign("/etc/pki/tls/certs/ca-bundle.crt"); + }else{ + BF.open("/etc/ssl/certs/ca-certificates.crt"); + if(BF.good()){ + BF.close(); + S3fsCurl::curl_ca_bundle.assign("/etc/ssl/certs/ca-certificates.crt"); + }else{ + BF.open("/usr/share/ssl/certs/ca-bundle.crt"); + if(BF.good()){ + BF.close(); + S3fsCurl::curl_ca_bundle.assign("/usr/share/ssl/certs/ca-bundle.crt"); + }else{ + BF.open("/usr/local/share/certs/ca-root.crt"); + if(BF.good()){ + BF.close(); + S3fsCurl::curl_ca_bundle.assign("/usr/share/ssl/certs/ca-bundle.crt"); + }else{ + S3FS_PRN_ERR("%s: /.../ca-bundle.crt is not readable", program_name.c_str()); + return false; + } + } + } + } + return true; } size_t S3fsCurl::WriteMemoryCallback(void* ptr, size_t blockSize, size_t numBlocks, void* data) { - BodyData* body = static_cast(data); + BodyData* body = static_cast(data); - if(!body->Append(ptr, blockSize, numBlocks)){ - S3FS_PRN_CRIT("BodyData.Append() returned false."); - S3FS_FUSE_EXIT(); - return -1; - } - return (blockSize * numBlocks); + if(!body->Append(ptr, blockSize, numBlocks)){ + S3FS_PRN_CRIT("BodyData.Append() returned false."); + S3FS_FUSE_EXIT(); + return -1; + } + return (blockSize * numBlocks); } size_t S3fsCurl::ReadCallback(void* ptr, size_t size, size_t nmemb, void* userp) { - S3fsCurl* pCurl = reinterpret_cast(userp); + S3fsCurl* pCurl = reinterpret_cast(userp); - if(1 > (size * nmemb)){ - return 0; - } - if(0 >= pCurl->postdata_remaining){ - return 0; - } - int copysize = std::min((int)(size * nmemb), pCurl->postdata_remaining); - memcpy(ptr, pCurl->postdata, copysize); + if(1 > (size * nmemb)){ + return 0; + } + if(0 >= pCurl->postdata_remaining){ + return 0; + } + int copysize = std::min((int)(size * nmemb), pCurl->postdata_remaining); + memcpy(ptr, pCurl->postdata, copysize); - pCurl->postdata_remaining = (pCurl->postdata_remaining > copysize ? (pCurl->postdata_remaining - copysize) : 0); - pCurl->postdata += static_cast(copysize); + pCurl->postdata_remaining = (pCurl->postdata_remaining > copysize ? (pCurl->postdata_remaining - copysize) : 0); + pCurl->postdata += static_cast(copysize); - return copysize; + return copysize; } size_t S3fsCurl::HeaderCallback(void* data, size_t blockSize, size_t numBlocks, void* userPtr) { - headers_t* headers = reinterpret_cast(userPtr); - string header(reinterpret_cast(data), blockSize * numBlocks); - string key; - istringstream ss(header); + headers_t* headers = reinterpret_cast(userPtr); + string header(reinterpret_cast(data), blockSize * numBlocks); + string key; + istringstream ss(header); - if(getline(ss, key, ':')){ - // Force to lower, only "x-amz" - string lkey = key; - transform(lkey.begin(), lkey.end(), lkey.begin(), static_cast(std::tolower)); - if(lkey.compare(0, 5, "x-amz") == 0){ - key = lkey; + if(getline(ss, key, ':')){ + // Force to lower, only "x-amz" + string lkey = key; + transform(lkey.begin(), lkey.end(), lkey.begin(), static_cast(std::tolower)); + if(lkey.compare(0, 5, "x-amz") == 0){ + key = lkey; + } + string value; + getline(ss, value); + (*headers)[key] = trim(value); } - string value; - getline(ss, value); - (*headers)[key] = trim(value); - } - return blockSize * numBlocks; + return blockSize * numBlocks; } size_t S3fsCurl::UploadReadCallback(void* ptr, size_t size, size_t nmemb, void* userp) { - S3fsCurl* pCurl = reinterpret_cast(userp); + S3fsCurl* pCurl = reinterpret_cast(userp); - if(1 > (size * nmemb)){ - return 0; - } - if(-1 == pCurl->partdata.fd || 0 >= pCurl->partdata.size){ - return 0; - } - // read size - ssize_t copysize = (size * nmemb) < (size_t)pCurl->partdata.size ? (size * nmemb) : (size_t)pCurl->partdata.size; - ssize_t readbytes; - ssize_t totalread; - // read and set - for(totalread = 0, readbytes = 0; totalread < copysize; totalread += readbytes){ - readbytes = pread(pCurl->partdata.fd, &((char*)ptr)[totalread], (copysize - totalread), pCurl->partdata.startpos + totalread); - if(0 == readbytes){ - // eof - break; - }else if(-1 == readbytes){ - // error - S3FS_PRN_ERR("read file error(%d).", errno); - return 0; + if(1 > (size * nmemb)){ + return 0; } - } - pCurl->partdata.startpos += totalread; - pCurl->partdata.size -= totalread; + if(-1 == pCurl->partdata.fd || 0 >= pCurl->partdata.size){ + return 0; + } + // read size + ssize_t copysize = (size * nmemb) < (size_t)pCurl->partdata.size ? (size * nmemb) : (size_t)pCurl->partdata.size; + ssize_t readbytes; + ssize_t totalread; + // read and set + for(totalread = 0, readbytes = 0; totalread < copysize; totalread += readbytes){ + readbytes = pread(pCurl->partdata.fd, &((char*)ptr)[totalread], (copysize - totalread), pCurl->partdata.startpos + totalread); + if(0 == readbytes){ + // eof + break; + }else if(-1 == readbytes){ + // error + S3FS_PRN_ERR("read file error(%d).", errno); + return 0; + } + } + pCurl->partdata.startpos += totalread; + pCurl->partdata.size -= totalread; - return totalread; + return totalread; } size_t S3fsCurl::DownloadWriteCallback(void* ptr, size_t size, size_t nmemb, void* userp) { - S3fsCurl* pCurl = reinterpret_cast(userp); + S3fsCurl* pCurl = reinterpret_cast(userp); - if(1 > (size * nmemb)){ - return 0; - } - if(-1 == pCurl->partdata.fd || 0 >= pCurl->partdata.size){ - return 0; - } - - // write size - ssize_t copysize = (size * nmemb) < (size_t)pCurl->partdata.size ? (size * nmemb) : (size_t)pCurl->partdata.size; - ssize_t writebytes; - ssize_t totalwrite; - - // write - for(totalwrite = 0, writebytes = 0; totalwrite < copysize; totalwrite += writebytes){ - writebytes = pwrite(pCurl->partdata.fd, &((char*)ptr)[totalwrite], (copysize - totalwrite), pCurl->partdata.startpos + totalwrite); - if(0 == writebytes){ - // eof? - break; - }else if(-1 == writebytes){ - // error - S3FS_PRN_ERR("write file error(%d).", errno); - return 0; + if(1 > (size * nmemb)){ + return 0; + } + if(-1 == pCurl->partdata.fd || 0 >= pCurl->partdata.size){ + return 0; } - } - pCurl->partdata.startpos += totalwrite; - pCurl->partdata.size -= totalwrite; - return totalwrite; + // write size + ssize_t copysize = (size * nmemb) < (size_t)pCurl->partdata.size ? (size * nmemb) : (size_t)pCurl->partdata.size; + ssize_t writebytes; + ssize_t totalwrite; + + // write + for(totalwrite = 0, writebytes = 0; totalwrite < copysize; totalwrite += writebytes){ + writebytes = pwrite(pCurl->partdata.fd, &((char*)ptr)[totalwrite], (copysize - totalwrite), pCurl->partdata.startpos + totalwrite); + if(0 == writebytes){ + // eof? + break; + }else if(-1 == writebytes){ + // error + S3FS_PRN_ERR("write file error(%d).", errno); + return 0; + } + } + pCurl->partdata.startpos += totalwrite; + pCurl->partdata.size -= totalwrite; + + return totalwrite; } -bool S3fsCurl::SetCheckCertificate(bool isCertCheck) { - bool old = S3fsCurl::is_cert_check; - S3fsCurl::is_cert_check = isCertCheck; - return old; +bool S3fsCurl::SetCheckCertificate(bool isCertCheck) +{ + bool old = S3fsCurl::is_cert_check; + S3fsCurl::is_cert_check = isCertCheck; + return old; } bool S3fsCurl::SetDnsCache(bool isCache) { - bool old = S3fsCurl::is_dns_cache; - S3fsCurl::is_dns_cache = isCache; - return old; + bool old = S3fsCurl::is_dns_cache; + S3fsCurl::is_dns_cache = isCache; + return old; } bool S3fsCurl::SetSslSessionCache(bool isCache) { - bool old = S3fsCurl::is_ssl_session_cache; - S3fsCurl::is_ssl_session_cache = isCache; - return old; + bool old = S3fsCurl::is_ssl_session_cache; + S3fsCurl::is_ssl_session_cache = isCache; + return old; } long S3fsCurl::SetConnectTimeout(long timeout) { - long old = S3fsCurl::connect_timeout; - S3fsCurl::connect_timeout = timeout; - return old; + long old = S3fsCurl::connect_timeout; + S3fsCurl::connect_timeout = timeout; + return old; } time_t S3fsCurl::SetReadwriteTimeout(time_t timeout) { - time_t old = S3fsCurl::readwrite_timeout; - S3fsCurl::readwrite_timeout = timeout; - return old; + time_t old = S3fsCurl::readwrite_timeout; + S3fsCurl::readwrite_timeout = timeout; + return old; } int S3fsCurl::SetRetries(int count) { - int old = S3fsCurl::retries; - S3fsCurl::retries = count; - return old; + int old = S3fsCurl::retries; + S3fsCurl::retries = count; + return old; } bool S3fsCurl::SetPublicBucket(bool flag) { - bool old = S3fsCurl::is_public_bucket; - S3fsCurl::is_public_bucket = flag; - return old; + bool old = S3fsCurl::is_public_bucket; + S3fsCurl::is_public_bucket = flag; + return old; } acl_t S3fsCurl::SetDefaultAcl(acl_t acl) { - acl_t old = S3fsCurl::default_acl; - S3fsCurl::default_acl = acl; - return old; + acl_t old = S3fsCurl::default_acl; + S3fsCurl::default_acl = acl; + return old; } acl_t S3fsCurl::GetDefaultAcl() { - return S3fsCurl::default_acl; + return S3fsCurl::default_acl; } storage_class_t S3fsCurl::SetStorageClass(storage_class_t storage_class) { - storage_class_t old = S3fsCurl::storage_class; - S3fsCurl::storage_class = storage_class; - return old; + storage_class_t old = S3fsCurl::storage_class; + S3fsCurl::storage_class = storage_class; + return old; } bool S3fsCurl::PushbackSseKeys(string& onekey) { - onekey = trim(onekey); - if(onekey.empty()){ - return false; - } - if('#' == onekey[0]){ - return false; - } - // make base64 if the key is short enough, otherwise assume it is already so - string base64_key; - string raw_key; - if(onekey.length() > 256 / 8){ - char* p_key; - size_t keylength; - - if(NULL != (p_key = (char *)s3fs_decode64(onekey.c_str(), &keylength))) { - raw_key = string(p_key, keylength); - base64_key = onekey; - delete[] p_key; - } else { - S3FS_PRN_ERR("Failed to convert base64 to SSE-C key %s", onekey.c_str()); - return false; + onekey = trim(onekey); + if(onekey.empty()){ + return false; } - } else { - char* pbase64_key; - - if(NULL != (pbase64_key = s3fs_base64((unsigned char*)onekey.c_str(), onekey.length()))) { - raw_key = onekey; - base64_key = pbase64_key; - delete[] pbase64_key; - } else { - S3FS_PRN_ERR("Failed to convert base64 from SSE-C key %s", onekey.c_str()); - return false; + if('#' == onekey[0]){ + return false; } - } + // make base64 if the key is short enough, otherwise assume it is already so + string base64_key; + string raw_key; + if(onekey.length() > 256 / 8){ + char* p_key; + size_t keylength; - // make MD5 - string strMd5; - if(!make_md5_from_binary(raw_key.c_str(), raw_key.length(), strMd5)){ - S3FS_PRN_ERR("Could not make MD5 from SSE-C keys(%s).", raw_key.c_str()); - return false; - } - // mapped MD5 = SSE Key - sseckeymap_t md5map; - md5map.clear(); - md5map[strMd5] = base64_key; - S3fsCurl::sseckeys.push_back(md5map); - return true; + if(NULL != (p_key = (char *)s3fs_decode64(onekey.c_str(), &keylength))) { + raw_key = string(p_key, keylength); + base64_key = onekey; + delete[] p_key; + } else { + S3FS_PRN_ERR("Failed to convert base64 to SSE-C key %s", onekey.c_str()); + return false; + } + } else { + char* pbase64_key; + + if(NULL != (pbase64_key = s3fs_base64((unsigned char*)onekey.c_str(), onekey.length()))) { + raw_key = onekey; + base64_key = pbase64_key; + delete[] pbase64_key; + } else { + S3FS_PRN_ERR("Failed to convert base64 from SSE-C key %s", onekey.c_str()); + return false; + } + } + + // make MD5 + string strMd5; + if(!make_md5_from_binary(raw_key.c_str(), raw_key.length(), strMd5)){ + S3FS_PRN_ERR("Could not make MD5 from SSE-C keys(%s).", raw_key.c_str()); + return false; + } + // mapped MD5 = SSE Key + sseckeymap_t md5map; + md5map.clear(); + md5map[strMd5] = base64_key; + S3fsCurl::sseckeys.push_back(md5map); + + return true; } sse_type_t S3fsCurl::SetSseType(sse_type_t type) { - sse_type_t old = S3fsCurl::ssetype; - S3fsCurl::ssetype = type; - return old; + sse_type_t old = S3fsCurl::ssetype; + S3fsCurl::ssetype = type; + return old; } bool S3fsCurl::SetSseCKeys(const char* filepath) { - if(!filepath){ - S3FS_PRN_ERR("SSE-C keys filepath is empty."); - return false; - } - struct stat st; - if(0 != stat(filepath, &st)){ - S3FS_PRN_ERR("could not open use_sse keys file(%s).", filepath); - return false; - } - if(st.st_mode & (S_IXUSR | S_IRWXG | S_IRWXO)){ - S3FS_PRN_ERR("use_sse keys file %s should be 0600 permissions.", filepath); - return false; - } + if(!filepath){ + S3FS_PRN_ERR("SSE-C keys filepath is empty."); + return false; + } + struct stat st; + if(0 != stat(filepath, &st)){ + S3FS_PRN_ERR("could not open use_sse keys file(%s).", filepath); + return false; + } + if(st.st_mode & (S_IXUSR | S_IRWXG | S_IRWXO)){ + S3FS_PRN_ERR("use_sse keys file %s should be 0600 permissions.", filepath); + return false; + } - S3fsCurl::sseckeys.clear(); + S3fsCurl::sseckeys.clear(); - ifstream ssefs(filepath); - if(!ssefs.good()){ - S3FS_PRN_ERR("Could not open SSE-C keys file(%s).", filepath); - return false; - } + ifstream ssefs(filepath); + if(!ssefs.good()){ + S3FS_PRN_ERR("Could not open SSE-C keys file(%s).", filepath); + return false; + } - string line; - while(getline(ssefs, line)){ - S3fsCurl::PushbackSseKeys(line); - } - if(S3fsCurl::sseckeys.empty()){ - S3FS_PRN_ERR("There is no SSE Key in file(%s).", filepath); - return false; - } - return true; + string line; + while(getline(ssefs, line)){ + S3fsCurl::PushbackSseKeys(line); + } + if(S3fsCurl::sseckeys.empty()){ + S3FS_PRN_ERR("There is no SSE Key in file(%s).", filepath); + return false; + } + return true; } bool S3fsCurl::SetSseKmsid(const char* kmsid) { - if(!kmsid || '\0' == kmsid[0]){ - S3FS_PRN_ERR("SSE-KMS kms id is empty."); - return false; - } - S3fsCurl::ssekmsid = kmsid; - return true; + if(!kmsid || '\0' == kmsid[0]){ + S3FS_PRN_ERR("SSE-KMS kms id is empty."); + return false; + } + S3fsCurl::ssekmsid = kmsid; + return true; } // [NOTE] @@ -1172,64 +872,65 @@ bool S3fsCurl::SetSseKmsid(const char* kmsid) // this function check the integrity of the SSE data finally. bool S3fsCurl::FinalCheckSse() { - switch(S3fsCurl::ssetype){ - case sse_type_t::SSE_DISABLE: - S3fsCurl::ssekmsid.erase(); - return true; - case sse_type_t::SSE_S3: - S3fsCurl::ssekmsid.erase(); - return true; - case sse_type_t::SSE_C: - if(S3fsCurl::sseckeys.empty()){ - S3FS_PRN_ERR("sse type is SSE-C, but there is no custom key."); - return false; + switch(S3fsCurl::ssetype){ + case sse_type_t::SSE_DISABLE: + S3fsCurl::ssekmsid.erase(); + return true; + case sse_type_t::SSE_S3: + S3fsCurl::ssekmsid.erase(); + return true; + case sse_type_t::SSE_C: + if(S3fsCurl::sseckeys.empty()){ + S3FS_PRN_ERR("sse type is SSE-C, but there is no custom key."); + return false; + } + S3fsCurl::ssekmsid.erase(); + return true; + case sse_type_t::SSE_KMS: + if(S3fsCurl::ssekmsid.empty()){ + S3FS_PRN_ERR("sse type is SSE-KMS, but there is no specified kms id."); + return false; + } + if(!S3fsCurl::IsSignatureV4()){ + S3FS_PRN_ERR("sse type is SSE-KMS, but signature type is not v4. SSE-KMS require signature v4."); + return false; + } + return true; } - S3fsCurl::ssekmsid.erase(); - return true; - case sse_type_t::SSE_KMS: - if(S3fsCurl::ssekmsid.empty()){ - S3FS_PRN_ERR("sse type is SSE-KMS, but there is no specified kms id."); - return false; - } - if(!S3fsCurl::IsSignatureV4()){ - S3FS_PRN_ERR("sse type is SSE-KMS, but signature type is not v4. SSE-KMS require signature v4."); - return false; - } - return true; - } - S3FS_PRN_ERR("sse type is unknown(%d).", static_cast(S3fsCurl::ssetype)); - return false; + S3FS_PRN_ERR("sse type is unknown(%d).", static_cast(S3fsCurl::ssetype)); + + return false; } bool S3fsCurl::LoadEnvSseCKeys() { - char* envkeys = getenv("AWSSSECKEYS"); - if(NULL == envkeys){ - // nothing to do - return true; - } - S3fsCurl::sseckeys.clear(); + char* envkeys = getenv("AWSSSECKEYS"); + if(NULL == envkeys){ + // nothing to do + return true; + } + S3fsCurl::sseckeys.clear(); - istringstream fullkeys(envkeys); - string onekey; - while(getline(fullkeys, onekey, ':')){ - S3fsCurl::PushbackSseKeys(onekey); - } - if(S3fsCurl::sseckeys.empty()){ - S3FS_PRN_ERR("There is no SSE Key in environment(AWSSSECKEYS=%s).", envkeys); - return false; - } - return true; + istringstream fullkeys(envkeys); + string onekey; + while(getline(fullkeys, onekey, ':')){ + S3fsCurl::PushbackSseKeys(onekey); + } + if(S3fsCurl::sseckeys.empty()){ + S3FS_PRN_ERR("There is no SSE Key in environment(AWSSSECKEYS=%s).", envkeys); + return false; + } + return true; } bool S3fsCurl::LoadEnvSseKmsid() { - char* envkmsid = getenv("AWSSSEKMSID"); - if(NULL == envkmsid){ - // nothing to do - return true; - } - return S3fsCurl::SetSseKmsid(envkmsid); + char* envkmsid = getenv("AWSSSEKMSID"); + if(NULL == envkmsid){ + // nothing to do + return true; + } + return S3fsCurl::SetSseKmsid(envkmsid); } // @@ -1237,888 +938,888 @@ bool S3fsCurl::LoadEnvSseKmsid() // bool S3fsCurl::GetSseKey(string& md5, string& ssekey) { - for(sseckeylist_t::const_iterator iter = S3fsCurl::sseckeys.begin(); iter != S3fsCurl::sseckeys.end(); ++iter){ - if(0 == md5.length() || md5 == (*iter).begin()->first){ - md5 = iter->begin()->first; - ssekey = iter->begin()->second; - return true; + for(sseckeylist_t::const_iterator iter = S3fsCurl::sseckeys.begin(); iter != S3fsCurl::sseckeys.end(); ++iter){ + if(0 == md5.length() || md5 == (*iter).begin()->first){ + md5 = iter->begin()->first; + ssekey = iter->begin()->second; + return true; + } } - } - return false; + return false; } bool S3fsCurl::GetSseKeyMd5(int pos, string& md5) { - if(pos < 0){ - return false; - } - if(S3fsCurl::sseckeys.size() <= static_cast(pos)){ - return false; - } - int cnt = 0; - for(sseckeylist_t::const_iterator iter = S3fsCurl::sseckeys.begin(); iter != S3fsCurl::sseckeys.end(); ++iter, ++cnt){ - if(pos == cnt){ - md5 = iter->begin()->first; - return true; + if(pos < 0){ + return false; } - } - return false; + if(S3fsCurl::sseckeys.size() <= static_cast(pos)){ + return false; + } + int cnt = 0; + for(sseckeylist_t::const_iterator iter = S3fsCurl::sseckeys.begin(); iter != S3fsCurl::sseckeys.end(); ++iter, ++cnt){ + if(pos == cnt){ + md5 = iter->begin()->first; + return true; + } + } + return false; } int S3fsCurl::GetSseKeyCount() { - return S3fsCurl::sseckeys.size(); + return S3fsCurl::sseckeys.size(); } bool S3fsCurl::SetContentMd5(bool flag) { - bool old = S3fsCurl::is_content_md5; - S3fsCurl::is_content_md5 = flag; - return old; + bool old = S3fsCurl::is_content_md5; + S3fsCurl::is_content_md5 = flag; + return old; } bool S3fsCurl::SetVerbose(bool flag) { - bool old = S3fsCurl::is_verbose; - S3fsCurl::is_verbose = flag; - return old; + bool old = S3fsCurl::is_verbose; + S3fsCurl::is_verbose = flag; + return old; } bool S3fsCurl::SetDumpBody(bool flag) { - bool old = S3fsCurl::is_dump_body; - S3fsCurl::is_dump_body = flag; - return old; + bool old = S3fsCurl::is_dump_body; + S3fsCurl::is_dump_body = flag; + return old; } bool S3fsCurl::SetAccessKey(const char* AccessKeyId, const char* SecretAccessKey) { - if((!S3fsCurl::is_ibm_iam_auth && (!AccessKeyId || '\0' == AccessKeyId[0])) || !SecretAccessKey || '\0' == SecretAccessKey[0]){ - return false; - } - AWSAccessKeyId = AccessKeyId; - AWSSecretAccessKey = SecretAccessKey; - return true; + if((!S3fsCurl::is_ibm_iam_auth && (!AccessKeyId || '\0' == AccessKeyId[0])) || !SecretAccessKey || '\0' == SecretAccessKey[0]){ + return false; + } + AWSAccessKeyId = AccessKeyId; + AWSSecretAccessKey = SecretAccessKey; + return true; } bool S3fsCurl::SetAccessKeyWithSessionToken(const char* AccessKeyId, const char* SecretAccessKey, const char* SessionToken) { - bool access_key_is_empty = !AccessKeyId || '\0' == AccessKeyId[0]; - bool secret_access_key_is_empty = !SecretAccessKey || '\0' == SecretAccessKey[0]; - bool session_token_is_empty = !SessionToken || '\0' == SessionToken[0]; - if((!S3fsCurl::is_ibm_iam_auth && access_key_is_empty) || secret_access_key_is_empty || session_token_is_empty){ - return false; - } - AWSAccessKeyId = AccessKeyId; - AWSSecretAccessKey = SecretAccessKey; - AWSAccessToken = SessionToken; - S3fsCurl::is_use_session_token = true; - return true; + bool access_key_is_empty = !AccessKeyId || '\0' == AccessKeyId[0]; + bool secret_access_key_is_empty = !SecretAccessKey || '\0' == SecretAccessKey[0]; + bool session_token_is_empty = !SessionToken || '\0' == SessionToken[0]; + + if((!S3fsCurl::is_ibm_iam_auth && access_key_is_empty) || secret_access_key_is_empty || session_token_is_empty){ + return false; + } + AWSAccessKeyId = AccessKeyId; + AWSSecretAccessKey = SecretAccessKey; + AWSAccessToken = SessionToken; + S3fsCurl::is_use_session_token = true; + return true; } long S3fsCurl::SetSslVerifyHostname(long value) { - if(0 != value && 1 != value){ - return -1; - } - long old = S3fsCurl::ssl_verify_hostname; - S3fsCurl::ssl_verify_hostname = value; - return old; + if(0 != value && 1 != value){ + return -1; + } + long old = S3fsCurl::ssl_verify_hostname; + S3fsCurl::ssl_verify_hostname = value; + return old; } bool S3fsCurl::SetIsIBMIAMAuth(bool flag) { - bool old = S3fsCurl::is_ibm_iam_auth; - S3fsCurl::is_ibm_iam_auth = flag; - return old; + bool old = S3fsCurl::is_ibm_iam_auth; + S3fsCurl::is_ibm_iam_auth = flag; + return old; } bool S3fsCurl::SetIsECS(bool flag) { - bool old = S3fsCurl::is_ecs; - S3fsCurl::is_ecs = flag; - return old; + bool old = S3fsCurl::is_ecs; + S3fsCurl::is_ecs = flag; + return old; } string S3fsCurl::SetIAMRole(const char* role) { - string old = S3fsCurl::IAM_role; - S3fsCurl::IAM_role = role ? role : ""; - return old; + string old = S3fsCurl::IAM_role; + S3fsCurl::IAM_role = role ? role : ""; + return old; } size_t S3fsCurl::SetIAMFieldCount(size_t field_count) { - size_t old = S3fsCurl::IAM_field_count; - S3fsCurl::IAM_field_count = field_count; - return old; + size_t old = S3fsCurl::IAM_field_count; + S3fsCurl::IAM_field_count = field_count; + return old; } string S3fsCurl::SetIAMCredentialsURL(const char* url) { - string old = S3fsCurl::IAM_cred_url; - S3fsCurl::IAM_cred_url = url ? url : ""; - return old; + string old = S3fsCurl::IAM_cred_url; + S3fsCurl::IAM_cred_url = url ? url : ""; + return old; } string S3fsCurl::SetIAMTokenField(const char* token_field) { - string old = S3fsCurl::IAM_token_field; - S3fsCurl::IAM_token_field = token_field ? token_field : ""; - return old; + string old = S3fsCurl::IAM_token_field; + S3fsCurl::IAM_token_field = token_field ? token_field : ""; + return old; } string S3fsCurl::SetIAMExpiryField(const char* expiry_field) { - string old = S3fsCurl::IAM_expiry_field; - S3fsCurl::IAM_expiry_field = expiry_field ? expiry_field : ""; - return old; + string old = S3fsCurl::IAM_expiry_field; + S3fsCurl::IAM_expiry_field = expiry_field ? expiry_field : ""; + return old; } bool S3fsCurl::SetMultipartSize(off_t size) { - size = size * 1024 * 1024; - if(size < MIN_MULTIPART_SIZE){ - return false; - } - S3fsCurl::multipart_size = size; - return true; + size = size * 1024 * 1024; + if(size < MIN_MULTIPART_SIZE){ + return false; + } + S3fsCurl::multipart_size = size; + return true; } int S3fsCurl::SetMaxParallelCount(int value) { - int old = S3fsCurl::max_parallel_cnt; - S3fsCurl::max_parallel_cnt = value; - return old; + int old = S3fsCurl::max_parallel_cnt; + S3fsCurl::max_parallel_cnt = value; + return old; } int S3fsCurl::SetMaxMultiRequest(int max) { - int old = S3fsCurl::max_multireq; - S3fsCurl::max_multireq = max; - return old; + int old = S3fsCurl::max_multireq; + S3fsCurl::max_multireq = max; + return old; } bool S3fsCurl::UploadMultipartPostCallback(S3fsCurl* s3fscurl) { - if(!s3fscurl){ - return false; - } + if(!s3fscurl){ + return false; + } - return s3fscurl->UploadMultipartPostComplete(); + return s3fscurl->UploadMultipartPostComplete(); } bool S3fsCurl::MixMultipartPostCallback(S3fsCurl* s3fscurl) { - if(!s3fscurl){ - return false; - } + if(!s3fscurl){ + return false; + } - return s3fscurl->MixMultipartPostComplete(); + return s3fscurl->MixMultipartPostComplete(); } S3fsCurl* S3fsCurl::UploadMultipartPostRetryCallback(S3fsCurl* s3fscurl) { - if(!s3fscurl){ - return NULL; - } - // parse and get part_num, upload_id. - string upload_id; - string part_num_str; - int part_num; - off_t tmp_part_num = 0; - if(!get_keyword_value(s3fscurl->url, "uploadId", upload_id)){ - return NULL; - } - if(!get_keyword_value(s3fscurl->url, "partNumber", part_num_str)){ - return NULL; - } - if(!try_strtoofft(part_num_str.c_str(), tmp_part_num, /*base=*/ 10)){ - return NULL; - } - part_num = static_cast(tmp_part_num); + if(!s3fscurl){ + return NULL; + } + // parse and get part_num, upload_id. + string upload_id; + string part_num_str; + int part_num; + off_t tmp_part_num = 0; + if(!get_keyword_value(s3fscurl->url, "uploadId", upload_id)){ + return NULL; + } + if(!get_keyword_value(s3fscurl->url, "partNumber", part_num_str)){ + return NULL; + } + if(!try_strtoofft(part_num_str.c_str(), tmp_part_num, /*base=*/ 10)){ + return NULL; + } + part_num = static_cast(tmp_part_num); - if(s3fscurl->retry_count >= S3fsCurl::retries){ - S3FS_PRN_ERR("Over retry count(%d) limit(%s:%d).", s3fscurl->retry_count, s3fscurl->path.c_str(), part_num); - return NULL; - } + if(s3fscurl->retry_count >= S3fsCurl::retries){ + S3FS_PRN_ERR("Over retry count(%d) limit(%s:%d).", s3fscurl->retry_count, s3fscurl->path.c_str(), part_num); + return NULL; + } - // duplicate request - S3fsCurl* newcurl = new S3fsCurl(s3fscurl->IsUseAhbe()); - newcurl->partdata.etaglist = s3fscurl->partdata.etaglist; - newcurl->partdata.etagpos = s3fscurl->partdata.etagpos; - newcurl->partdata.fd = s3fscurl->partdata.fd; - newcurl->partdata.startpos = s3fscurl->b_partdata_startpos; - newcurl->partdata.size = s3fscurl->b_partdata_size; - newcurl->b_partdata_startpos = s3fscurl->b_partdata_startpos; - newcurl->b_partdata_size = s3fscurl->b_partdata_size; - newcurl->retry_count = s3fscurl->retry_count + 1; - newcurl->op = s3fscurl->op; - newcurl->type = s3fscurl->type; + // duplicate request + S3fsCurl* newcurl = new S3fsCurl(s3fscurl->IsUseAhbe()); + newcurl->partdata.etaglist = s3fscurl->partdata.etaglist; + newcurl->partdata.etagpos = s3fscurl->partdata.etagpos; + newcurl->partdata.fd = s3fscurl->partdata.fd; + newcurl->partdata.startpos = s3fscurl->b_partdata_startpos; + newcurl->partdata.size = s3fscurl->b_partdata_size; + newcurl->b_partdata_startpos = s3fscurl->b_partdata_startpos; + newcurl->b_partdata_size = s3fscurl->b_partdata_size; + newcurl->retry_count = s3fscurl->retry_count + 1; + newcurl->op = s3fscurl->op; + newcurl->type = s3fscurl->type; - // setup new curl object - if(0 != newcurl->UploadMultipartPostSetup(s3fscurl->path.c_str(), part_num, upload_id)){ - S3FS_PRN_ERR("Could not duplicate curl object(%s:%d).", s3fscurl->path.c_str(), part_num); - delete newcurl; - return NULL; - } - return newcurl; + // setup new curl object + if(0 != newcurl->UploadMultipartPostSetup(s3fscurl->path.c_str(), part_num, upload_id)){ + S3FS_PRN_ERR("Could not duplicate curl object(%s:%d).", s3fscurl->path.c_str(), part_num); + delete newcurl; + return NULL; + } + return newcurl; } S3fsCurl* S3fsCurl::CopyMultipartPostRetryCallback(S3fsCurl* s3fscurl) { - if(!s3fscurl){ - return NULL; - } - // parse and get part_num, upload_id. - string upload_id; - string part_num_str; - int part_num; - off_t tmp_part_num = 0; - if(!get_keyword_value(s3fscurl->url, "uploadId", upload_id)){ - return NULL; - } - if(!get_keyword_value(s3fscurl->url, "partNumber", part_num_str)){ - return NULL; - } - if(!try_strtoofft(part_num_str.c_str(), tmp_part_num, /*base=*/ 10)){ - return NULL; - } - part_num = static_cast(tmp_part_num); + if(!s3fscurl){ + return NULL; + } + // parse and get part_num, upload_id. + string upload_id; + string part_num_str; + int part_num; + off_t tmp_part_num = 0; + if(!get_keyword_value(s3fscurl->url, "uploadId", upload_id)){ + return NULL; + } + if(!get_keyword_value(s3fscurl->url, "partNumber", part_num_str)){ + return NULL; + } + if(!try_strtoofft(part_num_str.c_str(), tmp_part_num, /*base=*/ 10)){ + return NULL; + } + part_num = static_cast(tmp_part_num); - if(s3fscurl->retry_count >= S3fsCurl::retries){ - S3FS_PRN_ERR("Over retry count(%d) limit(%s:%d).", s3fscurl->retry_count, s3fscurl->path.c_str(), part_num); - return NULL; - } + if(s3fscurl->retry_count >= S3fsCurl::retries){ + S3FS_PRN_ERR("Over retry count(%d) limit(%s:%d).", s3fscurl->retry_count, s3fscurl->path.c_str(), part_num); + return NULL; + } - // duplicate request - S3fsCurl* newcurl = new S3fsCurl(s3fscurl->IsUseAhbe()); - newcurl->partdata.etaglist = s3fscurl->partdata.etaglist; - newcurl->partdata.etagpos = s3fscurl->partdata.etagpos; - newcurl->b_from = s3fscurl->b_from; - newcurl->b_meta = s3fscurl->b_meta; - newcurl->retry_count = s3fscurl->retry_count + 1; - newcurl->op = s3fscurl->op; - newcurl->type = s3fscurl->type; + // duplicate request + S3fsCurl* newcurl = new S3fsCurl(s3fscurl->IsUseAhbe()); + newcurl->partdata.etaglist = s3fscurl->partdata.etaglist; + newcurl->partdata.etagpos = s3fscurl->partdata.etagpos; + newcurl->b_from = s3fscurl->b_from; + newcurl->b_meta = s3fscurl->b_meta; + newcurl->retry_count = s3fscurl->retry_count + 1; + newcurl->op = s3fscurl->op; + newcurl->type = s3fscurl->type; - // setup new curl object - if(0 != newcurl->CopyMultipartPostSetup(s3fscurl->b_from.c_str(), s3fscurl->path.c_str(), part_num, upload_id, s3fscurl->b_meta)){ - S3FS_PRN_ERR("Could not duplicate curl object(%s:%d).", s3fscurl->path.c_str(), part_num); - delete newcurl; - return NULL; - } - return newcurl; + // setup new curl object + if(0 != newcurl->CopyMultipartPostSetup(s3fscurl->b_from.c_str(), s3fscurl->path.c_str(), part_num, upload_id, s3fscurl->b_meta)){ + S3FS_PRN_ERR("Could not duplicate curl object(%s:%d).", s3fscurl->path.c_str(), part_num); + delete newcurl; + return NULL; + } + return newcurl; } S3fsCurl* S3fsCurl::MixMultipartPostRetryCallback(S3fsCurl* s3fscurl) { - if(!s3fscurl){ - return NULL; - } + if(!s3fscurl){ + return NULL; + } - S3fsCurl* pcurl; - if(-1 == s3fscurl->partdata.fd){ - pcurl = S3fsCurl::CopyMultipartPostRetryCallback(s3fscurl); - }else{ - pcurl = S3fsCurl::UploadMultipartPostRetryCallback(s3fscurl); - } - return pcurl; + S3fsCurl* pcurl; + if(-1 == s3fscurl->partdata.fd){ + pcurl = S3fsCurl::CopyMultipartPostRetryCallback(s3fscurl); + }else{ + pcurl = S3fsCurl::UploadMultipartPostRetryCallback(s3fscurl); + } + return pcurl; } int S3fsCurl::ParallelMultipartUploadRequest(const char* tpath, headers_t& meta, int fd) { - int result; - string upload_id; - struct stat st; - int fd2; - etaglist_t list; - off_t remaining_bytes; - S3fsCurl s3fscurl(true); + int result; + string upload_id; + struct stat st; + int fd2; + etaglist_t list; + off_t remaining_bytes; + S3fsCurl s3fscurl(true); - S3FS_PRN_INFO3("[tpath=%s][fd=%d]", SAFESTRPTR(tpath), fd); + S3FS_PRN_INFO3("[tpath=%s][fd=%d]", SAFESTRPTR(tpath), fd); - // duplicate fd - if(-1 == (fd2 = dup(fd)) || 0 != lseek(fd2, 0, SEEK_SET)){ - S3FS_PRN_ERR("Could not duplicate file descriptor(errno=%d)", errno); - if(-1 != fd2){ - close(fd2); + // duplicate fd + if(-1 == (fd2 = dup(fd)) || 0 != lseek(fd2, 0, SEEK_SET)){ + S3FS_PRN_ERR("Could not duplicate file descriptor(errno=%d)", errno); + if(-1 != fd2){ + close(fd2); + } + return -errno; } - return -errno; - } - if(-1 == fstat(fd2, &st)){ - S3FS_PRN_ERR("Invalid file descriptor(errno=%d)", errno); - close(fd2); - return -errno; - } - - if(0 != (result = s3fscurl.PreMultipartPostRequest(tpath, meta, upload_id, false))){ - close(fd2); - return result; - } - s3fscurl.DestroyCurlHandle(); - - // Initialize S3fsMultiCurl - S3fsMultiCurl curlmulti(GetMaxParallelCount()); - curlmulti.SetSuccessCallback(S3fsCurl::UploadMultipartPostCallback); - curlmulti.SetRetryCallback(S3fsCurl::UploadMultipartPostRetryCallback); - - // cycle through open fd, pulling off 10MB chunks at a time - for(remaining_bytes = st.st_size; 0 < remaining_bytes; ){ - off_t chunk = remaining_bytes > S3fsCurl::multipart_size ? S3fsCurl::multipart_size : remaining_bytes; - - // s3fscurl sub object - S3fsCurl* s3fscurl_para = new S3fsCurl(true); - s3fscurl_para->partdata.fd = fd2; - s3fscurl_para->partdata.startpos = st.st_size - remaining_bytes; - s3fscurl_para->partdata.size = chunk; - s3fscurl_para->b_partdata_startpos = s3fscurl_para->partdata.startpos; - s3fscurl_para->b_partdata_size = s3fscurl_para->partdata.size; - s3fscurl_para->partdata.add_etag_list(&list); - - // initiate upload part for parallel - if(0 != (result = s3fscurl_para->UploadMultipartPostSetup(tpath, list.size(), upload_id))){ - S3FS_PRN_ERR("failed uploading part setup(%d)", result); - close(fd2); - delete s3fscurl_para; - return result; - } - - // set into parallel object - if(!curlmulti.SetS3fsCurlObject(s3fscurl_para)){ - S3FS_PRN_ERR("Could not make curl object into multi curl(%s).", tpath); - close(fd2); - delete s3fscurl_para; - return -1; - } - - remaining_bytes -= chunk; - } - - // Multi request - if(0 != (result = curlmulti.Request())){ - S3FS_PRN_ERR("error occurred in multi request(errno=%d).", result); - - S3fsCurl s3fscurl_abort(true); - int result2 = s3fscurl_abort.AbortMultipartUpload(tpath, upload_id); - s3fscurl_abort.DestroyCurlHandle(); - if(result2 != 0){ - S3FS_PRN_ERR("error aborting multipart upload(errno=%d).", result2); - } - - return result; - } - - close(fd2); - - if(0 != (result = s3fscurl.CompleteMultipartPostRequest(tpath, upload_id, list))){ - return result; - } - return 0; -} - -int S3fsCurl::ParallelMixMultipartUploadRequest(const char* tpath, headers_t& meta, int fd, const fdpage_list_t& mixuppages) -{ - int result; - string upload_id; - struct stat st; - int fd2; - etaglist_t list; - S3fsCurl s3fscurl(true); - - S3FS_PRN_INFO3("[tpath=%s][fd=%d]", SAFESTRPTR(tpath), fd); - - // duplicate fd - if(-1 == (fd2 = dup(fd)) || 0 != lseek(fd2, 0, SEEK_SET)){ - S3FS_PRN_ERR("Could not duplicate file descriptor(errno=%d)", errno); - if(-1 != fd2){ - close(fd2); - } - return -errno; - } - if(-1 == fstat(fd2, &st)){ - S3FS_PRN_ERR("Invalid file descriptor(errno=%d)", errno); - close(fd2); - return -errno; - } - - if(0 != (result = s3fscurl.PreMultipartPostRequest(tpath, meta, upload_id, true))){ - close(fd2); - return result; - } - s3fscurl.DestroyCurlHandle(); - - // for copy multipart - string srcresource; - string srcurl; - MakeUrlResource(get_realpath(tpath).c_str(), srcresource, srcurl); - meta["Content-Type"] = S3fsCurl::LookupMimeType(string(tpath)); - meta["x-amz-copy-source"] = srcresource; - - // Initialize S3fsMultiCurl - S3fsMultiCurl curlmulti(GetMaxParallelCount()); - curlmulti.SetSuccessCallback(S3fsCurl::MixMultipartPostCallback); - curlmulti.SetRetryCallback(S3fsCurl::MixMultipartPostRetryCallback); - - for(fdpage_list_t::const_iterator iter = mixuppages.begin(); iter != mixuppages.end(); ++iter){ - // s3fscurl sub object - S3fsCurl* s3fscurl_para = new S3fsCurl(true); - - if(iter->modified){ - // Multipart upload - s3fscurl_para->partdata.fd = fd2; - s3fscurl_para->partdata.startpos = iter->offset; - s3fscurl_para->partdata.size = iter->bytes; - s3fscurl_para->b_partdata_startpos = s3fscurl_para->partdata.startpos; - s3fscurl_para->b_partdata_size = s3fscurl_para->partdata.size; - s3fscurl_para->partdata.add_etag_list(&list); - - S3FS_PRN_INFO3("Upload Part [tpath=%s][start=%zd][size=%zd][part=%zu]", SAFESTRPTR(tpath), iter->offset, iter->bytes, list.size()); - - // initiate upload part for parallel - if(0 != (result = s3fscurl_para->UploadMultipartPostSetup(tpath, list.size(), upload_id))){ - S3FS_PRN_ERR("failed uploading part setup(%d)", result); + if(-1 == fstat(fd2, &st)){ + S3FS_PRN_ERR("Invalid file descriptor(errno=%d)", errno); close(fd2); - delete s3fscurl_para; - return result; - } - }else{ - // Multipart copy - ostringstream strrange; - strrange << "bytes=" << iter->offset << "-" << (iter->offset + iter->bytes - 1); - meta["x-amz-copy-source-range"] = strrange.str(); - strrange.str(""); - strrange.clear(stringstream::goodbit); + return -errno; + } - s3fscurl_para->b_from = SAFESTRPTR(tpath); - s3fscurl_para->b_meta = meta; - s3fscurl_para->partdata.add_etag_list(&list); - - S3FS_PRN_INFO3("Copy Part [tpath=%s][start=%zd][size=%zd][part=%zu]", SAFESTRPTR(tpath), iter->offset, iter->bytes, list.size()); - - // initiate upload part for parallel - if(0 != (result = s3fscurl_para->CopyMultipartPostSetup(tpath, tpath, list.size(), upload_id, meta))){ - S3FS_PRN_ERR("failed uploading part setup(%d)", result); + if(0 != (result = s3fscurl.PreMultipartPostRequest(tpath, meta, upload_id, false))){ close(fd2); - delete s3fscurl_para; return result; - } } - - // set into parallel object - if(!curlmulti.SetS3fsCurlObject(s3fscurl_para)){ - S3FS_PRN_ERR("Could not make curl object into multi curl(%s).", tpath); - close(fd2); - delete s3fscurl_para; - return -1; - } - } - - // Multi request - if(0 != (result = curlmulti.Request())){ - S3FS_PRN_ERR("error occurred in multi request(errno=%d).", result); - - S3fsCurl s3fscurl_abort(true); - int result2 = s3fscurl_abort.AbortMultipartUpload(tpath, upload_id); - s3fscurl_abort.DestroyCurlHandle(); - if(result2 != 0){ - S3FS_PRN_ERR("error aborting multipart upload(errno=%d).", result2); - } - close(fd2); - return result; - } - close(fd2); - - if(0 != (result = s3fscurl.CompleteMultipartPostRequest(tpath, upload_id, list))){ - return result; - } - return 0; -} - -S3fsCurl* S3fsCurl::ParallelGetObjectRetryCallback(S3fsCurl* s3fscurl) -{ - int result; - - if(!s3fscurl){ - return NULL; - } - if(s3fscurl->retry_count >= S3fsCurl::retries){ - S3FS_PRN_ERR("Over retry count(%d) limit(%s).", s3fscurl->retry_count, s3fscurl->path.c_str()); - return NULL; - } - - // duplicate request(setup new curl object) - S3fsCurl* newcurl = new S3fsCurl(s3fscurl->IsUseAhbe()); - if(0 != (result = newcurl->PreGetObjectRequest(s3fscurl->path.c_str(), s3fscurl->partdata.fd, - s3fscurl->partdata.startpos, s3fscurl->partdata.size, s3fscurl->b_ssetype, s3fscurl->b_ssevalue))) - { - S3FS_PRN_ERR("failed downloading part setup(%d)", result); - delete newcurl; - return NULL;; - } - newcurl->retry_count = s3fscurl->retry_count + 1; - - return newcurl; -} - -int S3fsCurl::ParallelGetObjectRequest(const char* tpath, int fd, off_t start, ssize_t size) -{ - S3FS_PRN_INFO3("[tpath=%s][fd=%d]", SAFESTRPTR(tpath), fd); - - sse_type_t ssetype = sse_type_t::SSE_DISABLE; - string ssevalue; - if(!get_object_sse_type(tpath, ssetype, ssevalue)){ - S3FS_PRN_WARN("Failed to get SSE type for file(%s).", SAFESTRPTR(tpath)); - } - int result = 0; - ssize_t remaining_bytes; - - // cycle through open fd, pulling off 10MB chunks at a time - for(remaining_bytes = size; 0 < remaining_bytes; ){ - S3fsMultiCurl curlmulti(GetMaxParallelCount()); - int para_cnt; - off_t chunk; + s3fscurl.DestroyCurlHandle(); // Initialize S3fsMultiCurl - //curlmulti.SetSuccessCallback(NULL); // not need to set success callback - curlmulti.SetRetryCallback(S3fsCurl::ParallelGetObjectRetryCallback); + S3fsMultiCurl curlmulti(GetMaxParallelCount()); + curlmulti.SetSuccessCallback(S3fsCurl::UploadMultipartPostCallback); + curlmulti.SetRetryCallback(S3fsCurl::UploadMultipartPostRetryCallback); - // Loop for setup parallel upload(multipart) request. - for(para_cnt = 0; para_cnt < S3fsCurl::max_parallel_cnt && 0 < remaining_bytes; para_cnt++, remaining_bytes -= chunk){ - // chunk size - chunk = remaining_bytes > S3fsCurl::multipart_size ? S3fsCurl::multipart_size : remaining_bytes; + // cycle through open fd, pulling off 10MB chunks at a time + for(remaining_bytes = st.st_size; 0 < remaining_bytes; ){ + off_t chunk = remaining_bytes > S3fsCurl::multipart_size ? S3fsCurl::multipart_size : remaining_bytes; - // s3fscurl sub object - S3fsCurl* s3fscurl_para = new S3fsCurl(); - if(0 != (result = s3fscurl_para->PreGetObjectRequest(tpath, fd, (start + size - remaining_bytes), chunk, ssetype, ssevalue))){ - S3FS_PRN_ERR("failed downloading part setup(%d)", result); - delete s3fscurl_para; - return result; - } + // s3fscurl sub object + S3fsCurl* s3fscurl_para = new S3fsCurl(true); + s3fscurl_para->partdata.fd = fd2; + s3fscurl_para->partdata.startpos = st.st_size - remaining_bytes; + s3fscurl_para->partdata.size = chunk; + s3fscurl_para->b_partdata_startpos = s3fscurl_para->partdata.startpos; + s3fscurl_para->b_partdata_size = s3fscurl_para->partdata.size; + s3fscurl_para->partdata.add_etag_list(&list); - // set into parallel object - if(!curlmulti.SetS3fsCurlObject(s3fscurl_para)){ - S3FS_PRN_ERR("Could not make curl object into multi curl(%s).", tpath); - delete s3fscurl_para; - return -1; - } + // initiate upload part for parallel + if(0 != (result = s3fscurl_para->UploadMultipartPostSetup(tpath, list.size(), upload_id))){ + S3FS_PRN_ERR("failed uploading part setup(%d)", result); + close(fd2); + delete s3fscurl_para; + return result; + } + + // set into parallel object + if(!curlmulti.SetS3fsCurlObject(s3fscurl_para)){ + S3FS_PRN_ERR("Could not make curl object into multi curl(%s).", tpath); + close(fd2); + delete s3fscurl_para; + return -1; + } + remaining_bytes -= chunk; } // Multi request if(0 != (result = curlmulti.Request())){ - S3FS_PRN_ERR("error occurred in multi request(errno=%d).", result); - break; + S3FS_PRN_ERR("error occurred in multi request(errno=%d).", result); + + S3fsCurl s3fscurl_abort(true); + int result2 = s3fscurl_abort.AbortMultipartUpload(tpath, upload_id); + s3fscurl_abort.DestroyCurlHandle(); + if(result2 != 0){ + S3FS_PRN_ERR("error aborting multipart upload(errno=%d).", result2); + } + + return result; } - // reinit for loop. - curlmulti.Clear(); - } - return result; + close(fd2); + + if(0 != (result = s3fscurl.CompleteMultipartPostRequest(tpath, upload_id, list))){ + return result; + } + return 0; +} + +int S3fsCurl::ParallelMixMultipartUploadRequest(const char* tpath, headers_t& meta, int fd, const fdpage_list_t& mixuppages) +{ + int result; + string upload_id; + struct stat st; + int fd2; + etaglist_t list; + S3fsCurl s3fscurl(true); + + S3FS_PRN_INFO3("[tpath=%s][fd=%d]", SAFESTRPTR(tpath), fd); + + // duplicate fd + if(-1 == (fd2 = dup(fd)) || 0 != lseek(fd2, 0, SEEK_SET)){ + S3FS_PRN_ERR("Could not duplicate file descriptor(errno=%d)", errno); + if(-1 != fd2){ + close(fd2); + } + return -errno; + } + if(-1 == fstat(fd2, &st)){ + S3FS_PRN_ERR("Invalid file descriptor(errno=%d)", errno); + close(fd2); + return -errno; + } + + if(0 != (result = s3fscurl.PreMultipartPostRequest(tpath, meta, upload_id, true))){ + close(fd2); + return result; + } + s3fscurl.DestroyCurlHandle(); + + // for copy multipart + string srcresource; + string srcurl; + MakeUrlResource(get_realpath(tpath).c_str(), srcresource, srcurl); + meta["Content-Type"] = S3fsCurl::LookupMimeType(string(tpath)); + meta["x-amz-copy-source"] = srcresource; + + // Initialize S3fsMultiCurl + S3fsMultiCurl curlmulti(GetMaxParallelCount()); + curlmulti.SetSuccessCallback(S3fsCurl::MixMultipartPostCallback); + curlmulti.SetRetryCallback(S3fsCurl::MixMultipartPostRetryCallback); + + for(fdpage_list_t::const_iterator iter = mixuppages.begin(); iter != mixuppages.end(); ++iter){ + // s3fscurl sub object + S3fsCurl* s3fscurl_para = new S3fsCurl(true); + + if(iter->modified){ + // Multipart upload + s3fscurl_para->partdata.fd = fd2; + s3fscurl_para->partdata.startpos = iter->offset; + s3fscurl_para->partdata.size = iter->bytes; + s3fscurl_para->b_partdata_startpos = s3fscurl_para->partdata.startpos; + s3fscurl_para->b_partdata_size = s3fscurl_para->partdata.size; + s3fscurl_para->partdata.add_etag_list(&list); + + S3FS_PRN_INFO3("Upload Part [tpath=%s][start=%zd][size=%zd][part=%zu]", SAFESTRPTR(tpath), iter->offset, iter->bytes, list.size()); + + // initiate upload part for parallel + if(0 != (result = s3fscurl_para->UploadMultipartPostSetup(tpath, list.size(), upload_id))){ + S3FS_PRN_ERR("failed uploading part setup(%d)", result); + close(fd2); + delete s3fscurl_para; + return result; + } + }else{ + // Multipart copy + ostringstream strrange; + strrange << "bytes=" << iter->offset << "-" << (iter->offset + iter->bytes - 1); + meta["x-amz-copy-source-range"] = strrange.str(); + strrange.str(""); + strrange.clear(stringstream::goodbit); + + s3fscurl_para->b_from = SAFESTRPTR(tpath); + s3fscurl_para->b_meta = meta; + s3fscurl_para->partdata.add_etag_list(&list); + + S3FS_PRN_INFO3("Copy Part [tpath=%s][start=%zd][size=%zd][part=%zu]", SAFESTRPTR(tpath), iter->offset, iter->bytes, list.size()); + + // initiate upload part for parallel + if(0 != (result = s3fscurl_para->CopyMultipartPostSetup(tpath, tpath, list.size(), upload_id, meta))){ + S3FS_PRN_ERR("failed uploading part setup(%d)", result); + close(fd2); + delete s3fscurl_para; + return result; + } + } + + // set into parallel object + if(!curlmulti.SetS3fsCurlObject(s3fscurl_para)){ + S3FS_PRN_ERR("Could not make curl object into multi curl(%s).", tpath); + close(fd2); + delete s3fscurl_para; + return -1; + } + } + + // Multi request + if(0 != (result = curlmulti.Request())){ + S3FS_PRN_ERR("error occurred in multi request(errno=%d).", result); + + S3fsCurl s3fscurl_abort(true); + int result2 = s3fscurl_abort.AbortMultipartUpload(tpath, upload_id); + s3fscurl_abort.DestroyCurlHandle(); + if(result2 != 0){ + S3FS_PRN_ERR("error aborting multipart upload(errno=%d).", result2); + } + close(fd2); + return result; + } + close(fd2); + + if(0 != (result = s3fscurl.CompleteMultipartPostRequest(tpath, upload_id, list))){ + return result; + } + return 0; +} + +S3fsCurl* S3fsCurl::ParallelGetObjectRetryCallback(S3fsCurl* s3fscurl) +{ + int result; + + if(!s3fscurl){ + return NULL; + } + if(s3fscurl->retry_count >= S3fsCurl::retries){ + S3FS_PRN_ERR("Over retry count(%d) limit(%s).", s3fscurl->retry_count, s3fscurl->path.c_str()); + return NULL; + } + + // duplicate request(setup new curl object) + S3fsCurl* newcurl = new S3fsCurl(s3fscurl->IsUseAhbe()); + if(0 != (result = newcurl->PreGetObjectRequest(s3fscurl->path.c_str(), s3fscurl->partdata.fd, s3fscurl->partdata.startpos, s3fscurl->partdata.size, s3fscurl->b_ssetype, s3fscurl->b_ssevalue))){ + S3FS_PRN_ERR("failed downloading part setup(%d)", result); + delete newcurl; + return NULL;; + } + newcurl->retry_count = s3fscurl->retry_count + 1; + + return newcurl; +} + +int S3fsCurl::ParallelGetObjectRequest(const char* tpath, int fd, off_t start, ssize_t size) +{ + S3FS_PRN_INFO3("[tpath=%s][fd=%d]", SAFESTRPTR(tpath), fd); + + sse_type_t ssetype = sse_type_t::SSE_DISABLE; + string ssevalue; + if(!get_object_sse_type(tpath, ssetype, ssevalue)){ + S3FS_PRN_WARN("Failed to get SSE type for file(%s).", SAFESTRPTR(tpath)); + } + int result = 0; + ssize_t remaining_bytes; + + // cycle through open fd, pulling off 10MB chunks at a time + for(remaining_bytes = size; 0 < remaining_bytes; ){ + S3fsMultiCurl curlmulti(GetMaxParallelCount()); + int para_cnt; + off_t chunk; + + // Initialize S3fsMultiCurl + //curlmulti.SetSuccessCallback(NULL); // not need to set success callback + curlmulti.SetRetryCallback(S3fsCurl::ParallelGetObjectRetryCallback); + + // Loop for setup parallel upload(multipart) request. + for(para_cnt = 0; para_cnt < S3fsCurl::max_parallel_cnt && 0 < remaining_bytes; para_cnt++, remaining_bytes -= chunk){ + // chunk size + chunk = remaining_bytes > S3fsCurl::multipart_size ? S3fsCurl::multipart_size : remaining_bytes; + + // s3fscurl sub object + S3fsCurl* s3fscurl_para = new S3fsCurl(); + if(0 != (result = s3fscurl_para->PreGetObjectRequest(tpath, fd, (start + size - remaining_bytes), chunk, ssetype, ssevalue))){ + S3FS_PRN_ERR("failed downloading part setup(%d)", result); + delete s3fscurl_para; + return result; + } + + // set into parallel object + if(!curlmulti.SetS3fsCurlObject(s3fscurl_para)){ + S3FS_PRN_ERR("Could not make curl object into multi curl(%s).", tpath); + delete s3fscurl_para; + return -1; + } + } + + // Multi request + if(0 != (result = curlmulti.Request())){ + S3FS_PRN_ERR("error occurred in multi request(errno=%d).", result); + break; + } + + // reinit for loop. + curlmulti.Clear(); + } + return result; } bool S3fsCurl::UploadMultipartPostSetCurlOpts(S3fsCurl* s3fscurl) { - if(!s3fscurl){ - return false; - } - if(!s3fscurl->CreateCurlHandle()){ - return false; - } - curl_easy_setopt(s3fscurl->hCurl, CURLOPT_URL, s3fscurl->url.c_str()); - curl_easy_setopt(s3fscurl->hCurl, CURLOPT_UPLOAD, true); // HTTP PUT - curl_easy_setopt(s3fscurl->hCurl, CURLOPT_WRITEDATA, (void*)(&s3fscurl->bodydata)); - curl_easy_setopt(s3fscurl->hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); - curl_easy_setopt(s3fscurl->hCurl, CURLOPT_HEADERDATA, (void*)&(s3fscurl->responseHeaders)); - curl_easy_setopt(s3fscurl->hCurl, CURLOPT_HEADERFUNCTION, HeaderCallback); - curl_easy_setopt(s3fscurl->hCurl, CURLOPT_INFILESIZE_LARGE, static_cast(s3fscurl->partdata.size)); // Content-Length - curl_easy_setopt(s3fscurl->hCurl, CURLOPT_READFUNCTION, UploadReadCallback); - curl_easy_setopt(s3fscurl->hCurl, CURLOPT_READDATA, (void*)s3fscurl); - S3fsCurl::AddUserAgent(s3fscurl->hCurl); // put User-Agent + if(!s3fscurl){ + return false; + } + if(!s3fscurl->CreateCurlHandle()){ + return false; + } + curl_easy_setopt(s3fscurl->hCurl, CURLOPT_URL, s3fscurl->url.c_str()); + curl_easy_setopt(s3fscurl->hCurl, CURLOPT_UPLOAD, true); // HTTP PUT + curl_easy_setopt(s3fscurl->hCurl, CURLOPT_WRITEDATA, (void*)(&s3fscurl->bodydata)); + curl_easy_setopt(s3fscurl->hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); + curl_easy_setopt(s3fscurl->hCurl, CURLOPT_HEADERDATA, (void*)&(s3fscurl->responseHeaders)); + curl_easy_setopt(s3fscurl->hCurl, CURLOPT_HEADERFUNCTION, HeaderCallback); + curl_easy_setopt(s3fscurl->hCurl, CURLOPT_INFILESIZE_LARGE, static_cast(s3fscurl->partdata.size)); // Content-Length + curl_easy_setopt(s3fscurl->hCurl, CURLOPT_READFUNCTION, UploadReadCallback); + curl_easy_setopt(s3fscurl->hCurl, CURLOPT_READDATA, (void*)s3fscurl); + S3fsCurl::AddUserAgent(s3fscurl->hCurl); // put User-Agent - return true; + return true; } bool S3fsCurl::CopyMultipartPostSetCurlOpts(S3fsCurl* s3fscurl) { - if(!s3fscurl){ - return false; - } - if(!s3fscurl->CreateCurlHandle()){ - return false; - } + if(!s3fscurl){ + return false; + } + if(!s3fscurl->CreateCurlHandle()){ + return false; + } - curl_easy_setopt(s3fscurl->hCurl, CURLOPT_URL, s3fscurl->url.c_str()); - curl_easy_setopt(s3fscurl->hCurl, CURLOPT_UPLOAD, true); // HTTP PUT - curl_easy_setopt(s3fscurl->hCurl, CURLOPT_WRITEDATA, (void*)(&s3fscurl->bodydata)); - curl_easy_setopt(s3fscurl->hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); - curl_easy_setopt(s3fscurl->hCurl, CURLOPT_HEADERDATA, (void*)(&s3fscurl->headdata)); - curl_easy_setopt(s3fscurl->hCurl, CURLOPT_HEADERFUNCTION, WriteMemoryCallback); - curl_easy_setopt(s3fscurl->hCurl, CURLOPT_INFILESIZE, 0); // Content-Length - S3fsCurl::AddUserAgent(s3fscurl->hCurl); // put User-Agent + curl_easy_setopt(s3fscurl->hCurl, CURLOPT_URL, s3fscurl->url.c_str()); + curl_easy_setopt(s3fscurl->hCurl, CURLOPT_UPLOAD, true); // HTTP PUT + curl_easy_setopt(s3fscurl->hCurl, CURLOPT_WRITEDATA, (void*)(&s3fscurl->bodydata)); + curl_easy_setopt(s3fscurl->hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); + curl_easy_setopt(s3fscurl->hCurl, CURLOPT_HEADERDATA, (void*)(&s3fscurl->headdata)); + curl_easy_setopt(s3fscurl->hCurl, CURLOPT_HEADERFUNCTION, WriteMemoryCallback); + curl_easy_setopt(s3fscurl->hCurl, CURLOPT_INFILESIZE, 0); // Content-Length + S3fsCurl::AddUserAgent(s3fscurl->hCurl); // put User-Agent - return true; + return true; } bool S3fsCurl::PreGetObjectRequestSetCurlOpts(S3fsCurl* s3fscurl) { - if(!s3fscurl){ - return false; - } - if(!s3fscurl->CreateCurlHandle()){ - return false; - } + if(!s3fscurl){ + return false; + } + if(!s3fscurl->CreateCurlHandle()){ + return false; + } - curl_easy_setopt(s3fscurl->hCurl, CURLOPT_URL, s3fscurl->url.c_str()); - curl_easy_setopt(s3fscurl->hCurl, CURLOPT_WRITEFUNCTION, DownloadWriteCallback); - curl_easy_setopt(s3fscurl->hCurl, CURLOPT_WRITEDATA, (void*)s3fscurl); - S3fsCurl::AddUserAgent(s3fscurl->hCurl); // put User-Agent + curl_easy_setopt(s3fscurl->hCurl, CURLOPT_URL, s3fscurl->url.c_str()); + curl_easy_setopt(s3fscurl->hCurl, CURLOPT_WRITEFUNCTION, DownloadWriteCallback); + curl_easy_setopt(s3fscurl->hCurl, CURLOPT_WRITEDATA, (void*)s3fscurl); + S3fsCurl::AddUserAgent(s3fscurl->hCurl); // put User-Agent - return true; + return true; } bool S3fsCurl::PreHeadRequestSetCurlOpts(S3fsCurl* s3fscurl) { - if(!s3fscurl){ - return false; - } - if(!s3fscurl->CreateCurlHandle()){ - return false; - } + if(!s3fscurl){ + return false; + } + if(!s3fscurl->CreateCurlHandle()){ + return false; + } - curl_easy_setopt(s3fscurl->hCurl, CURLOPT_URL, s3fscurl->url.c_str()); - curl_easy_setopt(s3fscurl->hCurl, CURLOPT_NOBODY, true); // HEAD - curl_easy_setopt(s3fscurl->hCurl, CURLOPT_FILETIME, true); // Last-Modified + curl_easy_setopt(s3fscurl->hCurl, CURLOPT_URL, s3fscurl->url.c_str()); + curl_easy_setopt(s3fscurl->hCurl, CURLOPT_NOBODY, true); // HEAD + curl_easy_setopt(s3fscurl->hCurl, CURLOPT_FILETIME, true); // Last-Modified - // responseHeaders - curl_easy_setopt(s3fscurl->hCurl, CURLOPT_HEADERDATA, (void*)&(s3fscurl->responseHeaders)); - curl_easy_setopt(s3fscurl->hCurl, CURLOPT_HEADERFUNCTION, HeaderCallback); - S3fsCurl::AddUserAgent(s3fscurl->hCurl); // put User-Agent + // responseHeaders + curl_easy_setopt(s3fscurl->hCurl, CURLOPT_HEADERDATA, (void*)&(s3fscurl->responseHeaders)); + curl_easy_setopt(s3fscurl->hCurl, CURLOPT_HEADERFUNCTION, HeaderCallback); + S3fsCurl::AddUserAgent(s3fscurl->hCurl); // put User-Agent - return true; + return true; } bool S3fsCurl::ParseIAMCredentialResponse(const char* response, iamcredmap_t& keyval) { - if(!response){ - return false; - } - istringstream sscred(response); - string oneline; - keyval.clear(); - while(getline(sscred, oneline, ',')){ - string::size_type pos; - string key; - string val; - if(string::npos != (pos = oneline.find(IAMCRED_ACCESSKEYID))){ - key = IAMCRED_ACCESSKEYID; - }else if(string::npos != (pos = oneline.find(IAMCRED_SECRETACCESSKEY))){ - key = IAMCRED_SECRETACCESSKEY; - }else if(string::npos != (pos = oneline.find(S3fsCurl::IAM_token_field))){ - key = S3fsCurl::IAM_token_field; - }else if(string::npos != (pos = oneline.find(S3fsCurl::IAM_expiry_field))){ - key = S3fsCurl::IAM_expiry_field; - }else if(string::npos != (pos = oneline.find(IAMCRED_ROLEARN))){ - key = IAMCRED_ROLEARN; - }else{ - continue; - } - if(string::npos == (pos = oneline.find(':', pos + key.length()))){ - continue; + if(!response){ + return false; } + istringstream sscred(response); + string oneline; + keyval.clear(); + while(getline(sscred, oneline, ',')){ + string::size_type pos; + string key; + string val; + if(string::npos != (pos = oneline.find(IAMCRED_ACCESSKEYID))){ + key = IAMCRED_ACCESSKEYID; + }else if(string::npos != (pos = oneline.find(IAMCRED_SECRETACCESSKEY))){ + key = IAMCRED_SECRETACCESSKEY; + }else if(string::npos != (pos = oneline.find(S3fsCurl::IAM_token_field))){ + key = S3fsCurl::IAM_token_field; + }else if(string::npos != (pos = oneline.find(S3fsCurl::IAM_expiry_field))){ + key = S3fsCurl::IAM_expiry_field; + }else if(string::npos != (pos = oneline.find(IAMCRED_ROLEARN))){ + key = IAMCRED_ROLEARN; + }else{ + continue; + } + if(string::npos == (pos = oneline.find(':', pos + key.length()))){ + continue; + } - if(S3fsCurl::is_ibm_iam_auth && key == S3fsCurl::IAM_expiry_field){ - // parse integer value - if(string::npos == (pos = oneline.find_first_of("0123456789", pos))){ - continue; - } - oneline = oneline.substr(pos); - if(string::npos == (pos = oneline.find_last_of("0123456789"))){ - continue; - } - val = oneline.substr(0, pos+1); - }else{ - // parse string value (starts and ends with quotes) - if(string::npos == (pos = oneline.find('\"', pos))){ - continue; - } - oneline = oneline.substr(pos + sizeof(char)); - if(string::npos == (pos = oneline.find('\"'))){ - continue; - } - val = oneline.substr(0, pos); + if(S3fsCurl::is_ibm_iam_auth && key == S3fsCurl::IAM_expiry_field){ + // parse integer value + if(string::npos == (pos = oneline.find_first_of("0123456789", pos))){ + continue; + } + oneline = oneline.substr(pos); + if(string::npos == (pos = oneline.find_last_of("0123456789"))){ + continue; + } + val = oneline.substr(0, pos+1); + }else{ + // parse string value (starts and ends with quotes) + if(string::npos == (pos = oneline.find('\"', pos))){ + continue; + } + oneline = oneline.substr(pos + sizeof(char)); + if(string::npos == (pos = oneline.find('\"'))){ + continue; + } + val = oneline.substr(0, pos); + } + keyval[key] = val; } - keyval[key] = val; - } - return true; + return true; } bool S3fsCurl::SetIAMCredentials(const char* response) { - S3FS_PRN_INFO3("IAM credential response = \"%s\"", response); + S3FS_PRN_INFO3("IAM credential response = \"%s\"", response); - iamcredmap_t keyval; + iamcredmap_t keyval; - if(!ParseIAMCredentialResponse(response, keyval)){ - return false; - } - - if(S3fsCurl::IAM_field_count != keyval.size()){ - return false; - } - - S3fsCurl::AWSAccessToken = keyval[string(S3fsCurl::IAM_token_field)]; - - if(S3fsCurl::is_ibm_iam_auth){ - off_t tmp_expire = 0; - if(!try_strtoofft(keyval[string(S3fsCurl::IAM_expiry_field)].c_str(), tmp_expire, /*base=*/ 10)){ - return false; + if(!ParseIAMCredentialResponse(response, keyval)){ + return false; } - S3fsCurl::AWSAccessTokenExpire = static_cast(tmp_expire); - }else{ - S3fsCurl::AWSAccessKeyId = keyval[string(IAMCRED_ACCESSKEYID)]; - S3fsCurl::AWSSecretAccessKey = keyval[string(IAMCRED_SECRETACCESSKEY)]; - S3fsCurl::AWSAccessTokenExpire = cvtIAMExpireStringToTime(keyval[S3fsCurl::IAM_expiry_field].c_str()); - } - return true; + if(S3fsCurl::IAM_field_count != keyval.size()){ + return false; + } + + S3fsCurl::AWSAccessToken = keyval[string(S3fsCurl::IAM_token_field)]; + + if(S3fsCurl::is_ibm_iam_auth){ + off_t tmp_expire = 0; + if(!try_strtoofft(keyval[string(S3fsCurl::IAM_expiry_field)].c_str(), tmp_expire, /*base=*/ 10)){ + return false; + } + S3fsCurl::AWSAccessTokenExpire = static_cast(tmp_expire); + }else{ + S3fsCurl::AWSAccessKeyId = keyval[string(IAMCRED_ACCESSKEYID)]; + S3fsCurl::AWSSecretAccessKey = keyval[string(IAMCRED_SECRETACCESSKEY)]; + S3fsCurl::AWSAccessTokenExpire = cvtIAMExpireStringToTime(keyval[S3fsCurl::IAM_expiry_field].c_str()); + } + return true; } bool S3fsCurl::CheckIAMCredentialUpdate() { - if(S3fsCurl::IAM_role.empty() && !S3fsCurl::is_ecs && !S3fsCurl::is_ibm_iam_auth){ + if(S3fsCurl::IAM_role.empty() && !S3fsCurl::is_ecs && !S3fsCurl::is_ibm_iam_auth){ + return true; + } + if(time(NULL) + IAM_EXPIRE_MERGIN <= S3fsCurl::AWSAccessTokenExpire){ + return true; + } + // update + S3fsCurl s3fscurl; + if(0 != s3fscurl.GetIAMCredentials()){ + return false; + } return true; - } - if(time(NULL) + IAM_EXPIRE_MERGIN <= S3fsCurl::AWSAccessTokenExpire){ - return true; - } - // update - S3fsCurl s3fscurl; - if(0 != s3fscurl.GetIAMCredentials()){ - return false; - } - return true; } bool S3fsCurl::ParseIAMRoleFromMetaDataResponse(const char* response, string& rolename) { - if(!response){ + if(!response){ + return false; + } + // [NOTE] + // expected following strings. + // + // myrolename + // + istringstream ssrole(response); + string oneline; + if (getline(ssrole, oneline, '\n')){ + rolename = oneline; + return !rolename.empty(); + } return false; - } - // [NOTE] - // expected following strings. - // - // myrolename - // - istringstream ssrole(response); - string oneline; - if (getline(ssrole, oneline, '\n')){ - rolename = oneline; - return !rolename.empty(); - } - return false; } bool S3fsCurl::SetIAMRoleFromMetaData(const char* response) { - S3FS_PRN_INFO3("IAM role name response = \"%s\"", response); + S3FS_PRN_INFO3("IAM role name response = \"%s\"", response); - string rolename; + string rolename; - if(!S3fsCurl::ParseIAMRoleFromMetaDataResponse(response, rolename)){ - return false; - } + if(!S3fsCurl::ParseIAMRoleFromMetaDataResponse(response, rolename)){ + return false; + } - SetIAMRole(rolename.c_str()); - return true; + SetIAMRole(rolename.c_str()); + return true; } bool S3fsCurl::AddUserAgent(CURL* hCurl) { - if(!hCurl){ - return false; - } - if(S3fsCurl::IsUserAgentFlag()){ - curl_easy_setopt(hCurl, CURLOPT_USERAGENT, S3fsCurl::userAgent.c_str()); - } - return true; + if(!hCurl){ + return false; + } + if(S3fsCurl::IsUserAgentFlag()){ + curl_easy_setopt(hCurl, CURLOPT_USERAGENT, S3fsCurl::userAgent.c_str()); + } + return true; } int S3fsCurl::CurlDebugFunc(CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr) { - return S3fsCurl::RawCurlDebugFunc(hcurl, type, data, size, userptr, CURLINFO_END); + return S3fsCurl::RawCurlDebugFunc(hcurl, type, data, size, userptr, CURLINFO_END); } int S3fsCurl::CurlDebugBodyInFunc(CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr) { - return S3fsCurl::RawCurlDebugFunc(hcurl, type, data, size, userptr, CURLINFO_DATA_IN); + return S3fsCurl::RawCurlDebugFunc(hcurl, type, data, size, userptr, CURLINFO_DATA_IN); } int S3fsCurl::CurlDebugBodyOutFunc(CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr) { - return S3fsCurl::RawCurlDebugFunc(hcurl, type, data, size, userptr, CURLINFO_DATA_OUT); + return S3fsCurl::RawCurlDebugFunc(hcurl, type, data, size, userptr, CURLINFO_DATA_OUT); } int S3fsCurl::RawCurlDebugFunc(CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr, curl_infotype datatype) { - if(!hcurl){ - // something wrong... - return 0; - } - switch(type){ - case CURLINFO_TEXT: - // Swap tab indentation with spaces so it stays pretty in syslog - int indent; - indent = 0; - while (*data == '\t' && size > 0) { - indent += 4; - size--; - data++; - } - if(foreground && 0 < size && '\n' == data[size - 1]){ - size--; - } - S3FS_PRN_CURL("* %*s%.*s", indent, "", (int)size, data); - break; - case CURLINFO_DATA_IN: - case CURLINFO_DATA_OUT: - if(type != datatype || !S3fsCurl::is_dump_body){ - // not put - break; - } - case CURLINFO_HEADER_IN: - case CURLINFO_HEADER_OUT: - size_t remaining; - char* p; + if(!hcurl){ + // something wrong... + return 0; + } - // Print each line individually for tidy output - remaining = size; - p = data; - do { - char* eol = (char*)memchr(p, '\n', remaining); - int newline = 0; - if (eol == NULL) { - eol = (char*)memchr(p, '\r', remaining); - } else { - if (eol > p && *(eol - 1) == '\r') { - newline++; - } - newline++; - eol++; - } - size_t length = eol - p; - S3FS_PRN_CURL("%s %.*s", getCurlDebugHead(type), (int)length - newline, p); - remaining -= length; - p = eol; - } while (p != NULL && remaining > 0); - break; - case CURLINFO_SSL_DATA_IN: - case CURLINFO_SSL_DATA_OUT: - // not put - break; - default: - // why - break; - } - return 0; + switch(type){ + case CURLINFO_TEXT: + // Swap tab indentation with spaces so it stays pretty in syslog + int indent; + indent = 0; + while (*data == '\t' && size > 0) { + indent += 4; + size--; + data++; + } + if(foreground && 0 < size && '\n' == data[size - 1]){ + size--; + } + S3FS_PRN_CURL("* %*s%.*s", indent, "", (int)size, data); + break; + + case CURLINFO_DATA_IN: + case CURLINFO_DATA_OUT: + if(type != datatype || !S3fsCurl::is_dump_body){ + // not put + break; + } + case CURLINFO_HEADER_IN: + case CURLINFO_HEADER_OUT: + size_t remaining; + char* p; + + // Print each line individually for tidy output + remaining = size; + p = data; + do { + char* eol = (char*)memchr(p, '\n', remaining); + int newline = 0; + if (eol == NULL) { + eol = (char*)memchr(p, '\r', remaining); + } else { + if (eol > p && *(eol - 1) == '\r') { + newline++; + } + newline++; + eol++; + } + size_t length = eol - p; + S3FS_PRN_CURL("%s %.*s", getCurlDebugHead(type), (int)length - newline, p); + remaining -= length; + p = eol; + } while (p != NULL && remaining > 0); + break; + + case CURLINFO_SSL_DATA_IN: + case CURLINFO_SSL_DATA_OUT: + // not put + break; + default: + // why + break; + } + return 0; } //------------------------------------------------------------------- @@ -2135,181 +1836,180 @@ S3fsCurl::S3fsCurl(bool ahbe) : S3fsCurl::~S3fsCurl() { - DestroyCurlHandle(); + DestroyCurlHandle(); } bool S3fsCurl::ResetHandle(bool lock_already_held) { - static volatile bool run_once = false; // emit older curl warnings only once - curl_easy_reset(hCurl); - curl_easy_setopt(hCurl, CURLOPT_NOSIGNAL, 1); - curl_easy_setopt(hCurl, CURLOPT_FOLLOWLOCATION, true); - curl_easy_setopt(hCurl, CURLOPT_CONNECTTIMEOUT, S3fsCurl::connect_timeout); - curl_easy_setopt(hCurl, CURLOPT_NOPROGRESS, 0); - curl_easy_setopt(hCurl, CURLOPT_PROGRESSFUNCTION, S3fsCurl::CurlProgress); - curl_easy_setopt(hCurl, CURLOPT_PROGRESSDATA, hCurl); - // curl_easy_setopt(hCurl, CURLOPT_FORBID_REUSE, 1); - if(CURLE_OK != curl_easy_setopt(hCurl, S3FS_CURLOPT_TCP_KEEPALIVE, 1) && !run_once){ - S3FS_PRN_WARN("The CURLOPT_TCP_KEEPALIVE option could not be set. For maximize performance you need to enable this option and you should use libcurl 7.25.0 or later."); - } - if(CURLE_OK != curl_easy_setopt(hCurl, S3FS_CURLOPT_SSL_ENABLE_ALPN, 0) && !run_once){ - S3FS_PRN_WARN("The CURLOPT_SSL_ENABLE_ALPN option could not be unset. S3 server does not support ALPN, then this option should be disabled to maximize performance. you need to use libcurl 7.36.0 or later."); - } - if(CURLE_OK != curl_easy_setopt(hCurl, S3FS_CURLOPT_KEEP_SENDING_ON_ERROR, 1) && !run_once){ - S3FS_PRN_WARN("The S3FS_CURLOPT_KEEP_SENDING_ON_ERROR option could not be set. For maximize performance you need to enable this option and you should use libcurl 7.51.0 or later."); - } - run_once = true; - - if(type != REQTYPE_IAMCRED && type != REQTYPE_IAMROLE){ - // REQTYPE_IAMCRED and REQTYPE_IAMROLE are always HTTP - if(0 == S3fsCurl::ssl_verify_hostname){ - curl_easy_setopt(hCurl, CURLOPT_SSL_VERIFYHOST, 0); + static volatile bool run_once = false; // emit older curl warnings only once + curl_easy_reset(hCurl); + curl_easy_setopt(hCurl, CURLOPT_NOSIGNAL, 1); + curl_easy_setopt(hCurl, CURLOPT_FOLLOWLOCATION, true); + curl_easy_setopt(hCurl, CURLOPT_CONNECTTIMEOUT, S3fsCurl::connect_timeout); + curl_easy_setopt(hCurl, CURLOPT_NOPROGRESS, 0); + curl_easy_setopt(hCurl, CURLOPT_PROGRESSFUNCTION, S3fsCurl::CurlProgress); + curl_easy_setopt(hCurl, CURLOPT_PROGRESSDATA, hCurl); + // curl_easy_setopt(hCurl, CURLOPT_FORBID_REUSE, 1); + if(CURLE_OK != curl_easy_setopt(hCurl, S3FS_CURLOPT_TCP_KEEPALIVE, 1) && !run_once){ + S3FS_PRN_WARN("The CURLOPT_TCP_KEEPALIVE option could not be set. For maximize performance you need to enable this option and you should use libcurl 7.25.0 or later."); } - if(!S3fsCurl::curl_ca_bundle.empty()){ - curl_easy_setopt(hCurl, CURLOPT_CAINFO, S3fsCurl::curl_ca_bundle.c_str()); + if(CURLE_OK != curl_easy_setopt(hCurl, S3FS_CURLOPT_SSL_ENABLE_ALPN, 0) && !run_once){ + S3FS_PRN_WARN("The CURLOPT_SSL_ENABLE_ALPN option could not be unset. S3 server does not support ALPN, then this option should be disabled to maximize performance. you need to use libcurl 7.36.0 or later."); } - } - if((S3fsCurl::is_dns_cache || S3fsCurl::is_ssl_session_cache) && S3fsCurl::hCurlShare){ - curl_easy_setopt(hCurl, CURLOPT_SHARE, S3fsCurl::hCurlShare); - } - if(!S3fsCurl::is_cert_check) { - S3FS_PRN_DBG("'no_check_certificate' option in effect."); - S3FS_PRN_DBG("The server certificate won't be checked against the available certificate authorities."); - curl_easy_setopt(hCurl, CURLOPT_SSL_VERIFYPEER, false); - } - if(S3fsCurl::is_verbose){ - curl_easy_setopt(hCurl, CURLOPT_VERBOSE, true); - curl_easy_setopt(hCurl, CURLOPT_DEBUGFUNCTION, S3fsCurl::CurlDebugFunc); - } - if(!cipher_suites.empty()) { - curl_easy_setopt(hCurl, CURLOPT_SSL_CIPHER_LIST, cipher_suites.c_str()); - } + if(CURLE_OK != curl_easy_setopt(hCurl, S3FS_CURLOPT_KEEP_SENDING_ON_ERROR, 1) && !run_once){ + S3FS_PRN_WARN("The S3FS_CURLOPT_KEEP_SENDING_ON_ERROR option could not be set. For maximize performance you need to enable this option and you should use libcurl 7.51.0 or later."); + } + run_once = true; - AutoLock lock(&S3fsCurl::curl_handles_lock, lock_already_held ? AutoLock::ALREADY_LOCKED : AutoLock::NONE); - S3fsCurl::curl_times[hCurl] = time(0); - S3fsCurl::curl_progress[hCurl] = progress_t(-1, -1); + if(type != REQTYPE_IAMCRED && type != REQTYPE_IAMROLE){ + // REQTYPE_IAMCRED and REQTYPE_IAMROLE are always HTTP + if(0 == S3fsCurl::ssl_verify_hostname){ + curl_easy_setopt(hCurl, CURLOPT_SSL_VERIFYHOST, 0); + } + if(!S3fsCurl::curl_ca_bundle.empty()){ + curl_easy_setopt(hCurl, CURLOPT_CAINFO, S3fsCurl::curl_ca_bundle.c_str()); + } + } + if((S3fsCurl::is_dns_cache || S3fsCurl::is_ssl_session_cache) && S3fsCurl::hCurlShare){ + curl_easy_setopt(hCurl, CURLOPT_SHARE, S3fsCurl::hCurlShare); + } + if(!S3fsCurl::is_cert_check) { + S3FS_PRN_DBG("'no_check_certificate' option in effect."); + S3FS_PRN_DBG("The server certificate won't be checked against the available certificate authorities."); + curl_easy_setopt(hCurl, CURLOPT_SSL_VERIFYPEER, false); + } + if(S3fsCurl::is_verbose){ + curl_easy_setopt(hCurl, CURLOPT_VERBOSE, true); + curl_easy_setopt(hCurl, CURLOPT_DEBUGFUNCTION, S3fsCurl::CurlDebugFunc); + } + if(!cipher_suites.empty()) { + curl_easy_setopt(hCurl, CURLOPT_SSL_CIPHER_LIST, cipher_suites.c_str()); + } - return true; + AutoLock lock(&S3fsCurl::curl_handles_lock, lock_already_held ? AutoLock::ALREADY_LOCKED : AutoLock::NONE); + S3fsCurl::curl_times[hCurl] = time(0); + S3fsCurl::curl_progress[hCurl] = progress_t(-1, -1); + + return true; } bool S3fsCurl::CreateCurlHandle(bool only_pool, bool remake) { - AutoLock lock(&S3fsCurl::curl_handles_lock); + AutoLock lock(&S3fsCurl::curl_handles_lock); - if(hCurl && remake){ - if(!DestroyCurlHandle(false)){ - S3FS_PRN_ERR("could not destroy handle."); - return false; + if(hCurl && remake){ + if(!DestroyCurlHandle(false)){ + S3FS_PRN_ERR("could not destroy handle."); + return false; + } + S3FS_PRN_INFO3("already has handle, so destroyed it or restored it to pool."); } - S3FS_PRN_INFO3("already has handle, so destroyed it or restored it to pool."); - } - if(!hCurl){ - if(NULL == (hCurl = sCurlPool->GetHandler(only_pool))){ - if(!only_pool){ - S3FS_PRN_ERR("Failed to create handle."); - return false; - }else{ - // [NOTE] - // Further initialization processing is left to lazy processing to be executed later. - // (Currently we do not use only_pool=true, but this code is remained for the future) - return true; - } + if(!hCurl){ + if(NULL == (hCurl = sCurlPool->GetHandler(only_pool))){ + if(!only_pool){ + S3FS_PRN_ERR("Failed to create handle."); + return false; + }else{ + // [NOTE] + // Further initialization processing is left to lazy processing to be executed later. + // (Currently we do not use only_pool=true, but this code is remained for the future) + return true; + } + } } - } + ResetHandle(/*lock_already_held=*/ true); - ResetHandle(/*lock_already_held=*/ true); - - return true; + return true; } bool S3fsCurl::DestroyCurlHandle(bool restore_pool, bool clear_internal_data) { - // [NOTE] - // If type is REQTYPE_IAMCRED or REQTYPE_IAMROLE, do not clear type. - // Because that type only uses HTTP protocol, then the special - // logic in ResetHandle function. - // - if(type != REQTYPE_IAMCRED && type != REQTYPE_IAMROLE){ - type = REQTYPE_UNSET; - } + // [NOTE] + // If type is REQTYPE_IAMCRED or REQTYPE_IAMROLE, do not clear type. + // Because that type only uses HTTP protocol, then the special + // logic in ResetHandle function. + // + if(type != REQTYPE_IAMCRED && type != REQTYPE_IAMROLE){ + type = REQTYPE_UNSET; + } - if(clear_internal_data){ - ClearInternalData(); - } + if(clear_internal_data){ + ClearInternalData(); + } - if(hCurl){ - AutoLock lock(&S3fsCurl::curl_handles_lock); - - S3fsCurl::curl_times.erase(hCurl); - S3fsCurl::curl_progress.erase(hCurl); - sCurlPool->ReturnHandler(hCurl, restore_pool); - hCurl = NULL; - }else{ - return false; - } - return true; + if(hCurl){ + AutoLock lock(&S3fsCurl::curl_handles_lock); + + S3fsCurl::curl_times.erase(hCurl); + S3fsCurl::curl_progress.erase(hCurl); + sCurlPool->ReturnHandler(hCurl, restore_pool); + hCurl = NULL; + }else{ + return false; + } + return true; } bool S3fsCurl::ClearInternalData() { - // Always clear internal data - // - type = REQTYPE_UNSET; - path = ""; - base_path = ""; - saved_path = ""; - url = ""; - op = ""; - query_string= ""; - if(requestHeaders){ - curl_slist_free_all(requestHeaders); - requestHeaders = NULL; - } - responseHeaders.clear(); - bodydata.Clear(); - headdata.Clear(); - LastResponseCode = S3FSCURL_RESPONSECODE_NOTSET; - postdata = NULL; - postdata_remaining = 0; - retry_count = 0; - b_infile = NULL; - b_postdata = NULL; - b_postdata_remaining = 0; - b_partdata_startpos = 0; - b_partdata_size = 0; - partdata.clear(); + // Always clear internal data + // + type = REQTYPE_UNSET; + path = ""; + base_path = ""; + saved_path = ""; + url = ""; + op = ""; + query_string= ""; + if(requestHeaders){ + curl_slist_free_all(requestHeaders); + requestHeaders = NULL; + } + responseHeaders.clear(); + bodydata.Clear(); + headdata.Clear(); + LastResponseCode = S3FSCURL_RESPONSECODE_NOTSET; + postdata = NULL; + postdata_remaining = 0; + retry_count = 0; + b_infile = NULL; + b_postdata = NULL; + b_postdata_remaining = 0; + b_partdata_startpos = 0; + b_partdata_size = 0; + partdata.clear(); - fpLazySetup = NULL; + fpLazySetup = NULL; - S3FS_MALLOCTRIM(0); + S3FS_MALLOCTRIM(0); - return true; + return true; } bool S3fsCurl::SetUseAhbe(bool ahbe) { - bool old = is_use_ahbe; - is_use_ahbe = ahbe; - return old; + bool old = is_use_ahbe; + is_use_ahbe = ahbe; + return old; } bool S3fsCurl::GetResponseCode(long& responseCode, bool from_curl_handle) { - responseCode = -1; + responseCode = -1; - if(!from_curl_handle){ - responseCode = LastResponseCode; - }else{ - if(!hCurl){ - return false; + if(!from_curl_handle){ + responseCode = LastResponseCode; + }else{ + if(!hCurl){ + return false; + } + if(CURLE_OK != curl_easy_getinfo(hCurl, CURLINFO_RESPONSE_CODE, &LastResponseCode)){ + return false; + } + responseCode = LastResponseCode; } - if(CURLE_OK != curl_easy_getinfo(hCurl, CURLINFO_RESPONSE_CODE, &LastResponseCode)){ - return false; - } - responseCode = LastResponseCode; - } - return true; + return true; } // @@ -2317,171 +2017,171 @@ bool S3fsCurl::GetResponseCode(long& responseCode, bool from_curl_handle) // bool S3fsCurl::RemakeHandle() { - S3FS_PRN_INFO3("Retry request. [type=%d][url=%s][path=%s]", type, url.c_str(), path.c_str()); + S3FS_PRN_INFO3("Retry request. [type=%d][url=%s][path=%s]", type, url.c_str(), path.c_str()); - if(REQTYPE_UNSET == type){ - return false; - } - - // rewind file - struct stat st; - if(b_infile){ - rewind(b_infile); - if(-1 == fstat(fileno(b_infile), &st)){ - S3FS_PRN_WARN("Could not get file stat(fd=%d)", fileno(b_infile)); - return false; + if(REQTYPE_UNSET == type){ + return false; } - } - // reinitialize internal data - responseHeaders.clear(); - bodydata.Clear(); - headdata.Clear(); - LastResponseCode = S3FSCURL_RESPONSECODE_NOTSET; + // rewind file + struct stat st; + if(b_infile){ + rewind(b_infile); + if(-1 == fstat(fileno(b_infile), &st)){ + S3FS_PRN_WARN("Could not get file stat(fd=%d)", fileno(b_infile)); + return false; + } + } - // count up(only use for multipart) - retry_count++; + // reinitialize internal data + responseHeaders.clear(); + bodydata.Clear(); + headdata.Clear(); + LastResponseCode = S3FSCURL_RESPONSECODE_NOTSET; - // set from backup - postdata = b_postdata; - postdata_remaining = b_postdata_remaining; - partdata.startpos = b_partdata_startpos; - partdata.size = b_partdata_size; + // count up(only use for multipart) + retry_count++; - // reset handle - ResetHandle(); + // set from backup + postdata = b_postdata; + postdata_remaining = b_postdata_remaining; + partdata.startpos = b_partdata_startpos; + partdata.size = b_partdata_size; - // set options - switch(type){ - case REQTYPE_DELETE: - curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); - curl_easy_setopt(hCurl, CURLOPT_CUSTOMREQUEST, "DELETE"); - break; + // reset handle + ResetHandle(); - case REQTYPE_HEAD: - curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); - curl_easy_setopt(hCurl, CURLOPT_NOBODY, true); - curl_easy_setopt(hCurl, CURLOPT_FILETIME, true); - // responseHeaders - curl_easy_setopt(hCurl, CURLOPT_HEADERDATA, (void*)&responseHeaders); - curl_easy_setopt(hCurl, CURLOPT_HEADERFUNCTION, HeaderCallback); - break; + // set options + switch(type){ + case REQTYPE_DELETE: + curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); + curl_easy_setopt(hCurl, CURLOPT_CUSTOMREQUEST, "DELETE"); + break; - case REQTYPE_PUTHEAD: - curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); - curl_easy_setopt(hCurl, CURLOPT_UPLOAD, true); - curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); - curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); - curl_easy_setopt(hCurl, CURLOPT_INFILESIZE, 0); - break; + case REQTYPE_HEAD: + curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); + curl_easy_setopt(hCurl, CURLOPT_NOBODY, true); + curl_easy_setopt(hCurl, CURLOPT_FILETIME, true); + // responseHeaders + curl_easy_setopt(hCurl, CURLOPT_HEADERDATA, (void*)&responseHeaders); + curl_easy_setopt(hCurl, CURLOPT_HEADERFUNCTION, HeaderCallback); + break; - case REQTYPE_PUT: - curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); - curl_easy_setopt(hCurl, CURLOPT_UPLOAD, true); - curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); - curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); - if(b_infile){ - curl_easy_setopt(hCurl, CURLOPT_INFILESIZE_LARGE, static_cast(st.st_size)); - curl_easy_setopt(hCurl, CURLOPT_INFILE, b_infile); - }else{ - curl_easy_setopt(hCurl, CURLOPT_INFILESIZE, 0); - } - break; + case REQTYPE_PUTHEAD: + curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); + curl_easy_setopt(hCurl, CURLOPT_UPLOAD, true); + curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); + curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); + curl_easy_setopt(hCurl, CURLOPT_INFILESIZE, 0); + break; - case REQTYPE_GET: - curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); - curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, S3fsCurl::DownloadWriteCallback); - curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)this); - break; + case REQTYPE_PUT: + curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); + curl_easy_setopt(hCurl, CURLOPT_UPLOAD, true); + curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); + curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); + if(b_infile){ + curl_easy_setopt(hCurl, CURLOPT_INFILESIZE_LARGE, static_cast(st.st_size)); + curl_easy_setopt(hCurl, CURLOPT_INFILE, b_infile); + }else{ + curl_easy_setopt(hCurl, CURLOPT_INFILESIZE, 0); + } + break; - case REQTYPE_CHKBUCKET: - curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); - curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); - curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); - break; + case REQTYPE_GET: + curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); + curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, S3fsCurl::DownloadWriteCallback); + curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)this); + break; - case REQTYPE_LISTBUCKET: - curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); - curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); - curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); - break; + case REQTYPE_CHKBUCKET: + curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); + curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); + curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); + break; - case REQTYPE_PREMULTIPOST: - curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); - curl_easy_setopt(hCurl, CURLOPT_POST, true); - curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); - curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); - curl_easy_setopt(hCurl, CURLOPT_POSTFIELDSIZE, 0); - break; + case REQTYPE_LISTBUCKET: + curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); + curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); + curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); + break; - case REQTYPE_COMPLETEMULTIPOST: - curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); - curl_easy_setopt(hCurl, CURLOPT_POST, true); - curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); - curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); - curl_easy_setopt(hCurl, CURLOPT_POSTFIELDSIZE, static_cast(postdata_remaining)); - curl_easy_setopt(hCurl, CURLOPT_READDATA, (void*)this); - curl_easy_setopt(hCurl, CURLOPT_READFUNCTION, S3fsCurl::ReadCallback); - break; + case REQTYPE_PREMULTIPOST: + curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); + curl_easy_setopt(hCurl, CURLOPT_POST, true); + curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); + curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); + curl_easy_setopt(hCurl, CURLOPT_POSTFIELDSIZE, 0); + break; - case REQTYPE_UPLOADMULTIPOST: - curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); - curl_easy_setopt(hCurl, CURLOPT_UPLOAD, true); - curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); - curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); - curl_easy_setopt(hCurl, CURLOPT_HEADERDATA, (void*)&responseHeaders); - curl_easy_setopt(hCurl, CURLOPT_HEADERFUNCTION, HeaderCallback); - curl_easy_setopt(hCurl, CURLOPT_INFILESIZE_LARGE, static_cast(partdata.size)); - curl_easy_setopt(hCurl, CURLOPT_READFUNCTION, S3fsCurl::UploadReadCallback); - curl_easy_setopt(hCurl, CURLOPT_READDATA, (void*)this); - break; + case REQTYPE_COMPLETEMULTIPOST: + curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); + curl_easy_setopt(hCurl, CURLOPT_POST, true); + curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); + curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); + curl_easy_setopt(hCurl, CURLOPT_POSTFIELDSIZE, static_cast(postdata_remaining)); + curl_easy_setopt(hCurl, CURLOPT_READDATA, (void*)this); + curl_easy_setopt(hCurl, CURLOPT_READFUNCTION, S3fsCurl::ReadCallback); + break; - case REQTYPE_COPYMULTIPOST: - curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); - curl_easy_setopt(hCurl, CURLOPT_UPLOAD, true); - curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); - curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); - curl_easy_setopt(hCurl, CURLOPT_HEADERDATA, (void*)&headdata); - curl_easy_setopt(hCurl, CURLOPT_HEADERFUNCTION, WriteMemoryCallback); - curl_easy_setopt(hCurl, CURLOPT_INFILESIZE, 0); - break; + case REQTYPE_UPLOADMULTIPOST: + curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); + curl_easy_setopt(hCurl, CURLOPT_UPLOAD, true); + curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); + curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); + curl_easy_setopt(hCurl, CURLOPT_HEADERDATA, (void*)&responseHeaders); + curl_easy_setopt(hCurl, CURLOPT_HEADERFUNCTION, HeaderCallback); + curl_easy_setopt(hCurl, CURLOPT_INFILESIZE_LARGE, static_cast(partdata.size)); + curl_easy_setopt(hCurl, CURLOPT_READFUNCTION, S3fsCurl::UploadReadCallback); + curl_easy_setopt(hCurl, CURLOPT_READDATA, (void*)this); + break; - case REQTYPE_MULTILIST: - curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); - curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); - curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); - break; + case REQTYPE_COPYMULTIPOST: + curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); + curl_easy_setopt(hCurl, CURLOPT_UPLOAD, true); + curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); + curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); + curl_easy_setopt(hCurl, CURLOPT_HEADERDATA, (void*)&headdata); + curl_easy_setopt(hCurl, CURLOPT_HEADERFUNCTION, WriteMemoryCallback); + curl_easy_setopt(hCurl, CURLOPT_INFILESIZE, 0); + break; - case REQTYPE_IAMCRED: - curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); - curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); - curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); - if(S3fsCurl::is_ibm_iam_auth){ - curl_easy_setopt(hCurl, CURLOPT_POST, true); - curl_easy_setopt(hCurl, CURLOPT_POSTFIELDSIZE, static_cast(postdata_remaining)); - curl_easy_setopt(hCurl, CURLOPT_READDATA, (void*)this); - curl_easy_setopt(hCurl, CURLOPT_READFUNCTION, S3fsCurl::ReadCallback); - } - break; + case REQTYPE_MULTILIST: + curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); + curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); + curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); + break; - case REQTYPE_ABORTMULTIUPLOAD: - curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); - curl_easy_setopt(hCurl, CURLOPT_CUSTOMREQUEST, "DELETE"); - break; + case REQTYPE_IAMCRED: + curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); + curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); + curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); + if(S3fsCurl::is_ibm_iam_auth){ + curl_easy_setopt(hCurl, CURLOPT_POST, true); + curl_easy_setopt(hCurl, CURLOPT_POSTFIELDSIZE, static_cast(postdata_remaining)); + curl_easy_setopt(hCurl, CURLOPT_READDATA, (void*)this); + curl_easy_setopt(hCurl, CURLOPT_READFUNCTION, S3fsCurl::ReadCallback); + } + break; - case REQTYPE_IAMROLE: - curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); - curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); - curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); - break; + case REQTYPE_ABORTMULTIUPLOAD: + curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); + curl_easy_setopt(hCurl, CURLOPT_CUSTOMREQUEST, "DELETE"); + break; - default: - S3FS_PRN_ERR("request type is unknown(%d)", type); - return false; - } - S3fsCurl::AddUserAgent(hCurl); // put User-Agent + case REQTYPE_IAMROLE: + curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); + curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); + curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); + break; - return true; + default: + S3FS_PRN_ERR("request type is unknown(%d)", type); + return false; + } + S3fsCurl::AddUserAgent(hCurl); // put User-Agent + + return true; } // @@ -2489,233 +2189,233 @@ bool S3fsCurl::RemakeHandle() // int S3fsCurl::RequestPerform(bool dontAddAuthHeaders /*=false*/) { - if(IS_S3FS_LOG_DBG()){ - char* ptr_url = NULL; - curl_easy_getinfo(hCurl, CURLINFO_EFFECTIVE_URL , &ptr_url); - S3FS_PRN_DBG("connecting to URL %s", SAFESTRPTR(ptr_url)); - } + if(IS_S3FS_LOG_DBG()){ + char* ptr_url = NULL; + curl_easy_getinfo(hCurl, CURLINFO_EFFECTIVE_URL , &ptr_url); + S3FS_PRN_DBG("connecting to URL %s", SAFESTRPTR(ptr_url)); + } - LastResponseCode = S3FSCURL_RESPONSECODE_NOTSET; - long responseCode; - int result = S3FSCURL_PERFORM_RESULT_NOTSET; + LastResponseCode = S3FSCURL_RESPONSECODE_NOTSET; + long responseCode; + int result = S3FSCURL_PERFORM_RESULT_NOTSET; - if(!dontAddAuthHeaders) { - insertAuthHeaders(); - } - - curl_easy_setopt(hCurl, CURLOPT_HTTPHEADER, requestHeaders); + if(!dontAddAuthHeaders) { + insertAuthHeaders(); + } - // 1 attempt + retries... - for(int retrycnt = 0; S3FSCURL_PERFORM_RESULT_NOTSET == result && retrycnt < S3fsCurl::retries; ++retrycnt){ - // Reset response code - responseCode = S3FSCURL_RESPONSECODE_NOTSET; + curl_easy_setopt(hCurl, CURLOPT_HTTPHEADER, requestHeaders); - // Requests - CURLcode curlCode = curl_easy_perform(hCurl); + // 1 attempt + retries... + for(int retrycnt = 0; S3FSCURL_PERFORM_RESULT_NOTSET == result && retrycnt < S3fsCurl::retries; ++retrycnt){ + // Reset response code + responseCode = S3FSCURL_RESPONSECODE_NOTSET; - // Check result - switch(curlCode){ - case CURLE_OK: - // Need to look at the HTTP response code - if(0 != curl_easy_getinfo(hCurl, CURLINFO_RESPONSE_CODE, &responseCode)){ - S3FS_PRN_ERR("curl_easy_getinfo failed while trying to retrieve HTTP response code"); - responseCode = S3FSCURL_RESPONSECODE_FATAL_ERROR; - result = -EIO; - break; - } - if(responseCode >= 200 && responseCode < 300){ - S3FS_PRN_INFO3("HTTP response code %ld", responseCode); - result = 0; - break; - } + // Requests + CURLcode curlCode = curl_easy_perform(hCurl); - // Service response codes which are >= 300 && < 500 - switch(responseCode){ - case 301: - case 307: - S3FS_PRN_ERR("HTTP response code 301(Moved Permanently: also happens when bucket's region is incorrect), returning EIO. Body Text: %s", bodydata.str()); - S3FS_PRN_ERR("The options of url and endpoint may be useful for solving, please try to use both options."); - result = -EIO; - break; + // Check result + switch(curlCode){ + case CURLE_OK: + // Need to look at the HTTP response code + if(0 != curl_easy_getinfo(hCurl, CURLINFO_RESPONSE_CODE, &responseCode)){ + S3FS_PRN_ERR("curl_easy_getinfo failed while trying to retrieve HTTP response code"); + responseCode = S3FSCURL_RESPONSECODE_FATAL_ERROR; + result = -EIO; + break; + } + if(responseCode >= 200 && responseCode < 300){ + S3FS_PRN_INFO3("HTTP response code %ld", responseCode); + result = 0; + break; + } - case 400: - S3FS_PRN_ERR("HTTP response code %ld, returning EIO. Body Text: %s", responseCode, bodydata.str()); - result = -EIO; - break; + // Service response codes which are >= 300 && < 500 + switch(responseCode){ + case 301: + case 307: + S3FS_PRN_ERR("HTTP response code 301(Moved Permanently: also happens when bucket's region is incorrect), returning EIO. Body Text: %s", bodydata.str()); + S3FS_PRN_ERR("The options of url and endpoint may be useful for solving, please try to use both options."); + result = -EIO; + break; - case 403: - S3FS_PRN_ERR("HTTP response code %ld, returning EPERM. Body Text: %s", responseCode, bodydata.str()); - result = -EPERM; - break; + case 400: + S3FS_PRN_ERR("HTTP response code %ld, returning EIO. Body Text: %s", responseCode, bodydata.str()); + result = -EIO; + break; - case 404: - S3FS_PRN_INFO3("HTTP response code 404 was returned, returning ENOENT"); - S3FS_PRN_DBG("Body Text: %s", bodydata.str()); - result = -ENOENT; - break; + case 403: + S3FS_PRN_ERR("HTTP response code %ld, returning EPERM. Body Text: %s", responseCode, bodydata.str()); + result = -EPERM; + break; - case 416: - S3FS_PRN_INFO3("HTTP response code 416 was returned, returning EIO"); - result = -EIO; - break; + case 404: + S3FS_PRN_INFO3("HTTP response code 404 was returned, returning ENOENT"); + S3FS_PRN_DBG("Body Text: %s", bodydata.str()); + result = -ENOENT; + break; - case 501: - S3FS_PRN_INFO3("HTTP response code 501 was returned, returning ENOTSUP"); - S3FS_PRN_DBG("Body Text: %s", bodydata.str()); - result = -ENOTSUP; - break; + case 416: + S3FS_PRN_INFO3("HTTP response code 416 was returned, returning EIO"); + result = -EIO; + break; - case 500: - case 503: - S3FS_PRN_INFO3("HTTP response code %ld was returned, slowing down", responseCode); - S3FS_PRN_DBG("Body Text: %s", bodydata.str()); - sleep(4 << retry_count); - break; + case 501: + S3FS_PRN_INFO3("HTTP response code 501 was returned, returning ENOTSUP"); + S3FS_PRN_DBG("Body Text: %s", bodydata.str()); + result = -ENOTSUP; + break; - default: - S3FS_PRN_ERR("HTTP response code %ld, returning EIO. Body Text: %s", responseCode, bodydata.str()); - result = -EIO; - break; - } - break; + case 500: + case 503: + S3FS_PRN_INFO3("HTTP response code %ld was returned, slowing down", responseCode); + S3FS_PRN_DBG("Body Text: %s", bodydata.str()); + sleep(4 << retry_count); + break; - case CURLE_WRITE_ERROR: - S3FS_PRN_ERR("### CURLE_WRITE_ERROR"); - sleep(2); - break; + default: + S3FS_PRN_ERR("HTTP response code %ld, returning EIO. Body Text: %s", responseCode, bodydata.str()); + result = -EIO; + break; + } + break; - case CURLE_OPERATION_TIMEDOUT: - S3FS_PRN_ERR("### CURLE_OPERATION_TIMEDOUT"); - sleep(2); - break; + case CURLE_WRITE_ERROR: + S3FS_PRN_ERR("### CURLE_WRITE_ERROR"); + sleep(2); + break; - case CURLE_COULDNT_RESOLVE_HOST: - S3FS_PRN_ERR("### CURLE_COULDNT_RESOLVE_HOST"); - sleep(2); - break; + case CURLE_OPERATION_TIMEDOUT: + S3FS_PRN_ERR("### CURLE_OPERATION_TIMEDOUT"); + sleep(2); + break; - case CURLE_COULDNT_CONNECT: - S3FS_PRN_ERR("### CURLE_COULDNT_CONNECT"); - sleep(4); - break; + case CURLE_COULDNT_RESOLVE_HOST: + S3FS_PRN_ERR("### CURLE_COULDNT_RESOLVE_HOST"); + sleep(2); + break; - case CURLE_GOT_NOTHING: - S3FS_PRN_ERR("### CURLE_GOT_NOTHING"); - sleep(4); - break; + case CURLE_COULDNT_CONNECT: + S3FS_PRN_ERR("### CURLE_COULDNT_CONNECT"); + sleep(4); + break; - case CURLE_ABORTED_BY_CALLBACK: - S3FS_PRN_ERR("### CURLE_ABORTED_BY_CALLBACK"); - sleep(4); - { - AutoLock lock(&S3fsCurl::curl_handles_lock); - S3fsCurl::curl_times[hCurl] = time(0); - } - break; + case CURLE_GOT_NOTHING: + S3FS_PRN_ERR("### CURLE_GOT_NOTHING"); + sleep(4); + break; - case CURLE_PARTIAL_FILE: - S3FS_PRN_ERR("### CURLE_PARTIAL_FILE"); - sleep(4); - break; + case CURLE_ABORTED_BY_CALLBACK: + S3FS_PRN_ERR("### CURLE_ABORTED_BY_CALLBACK"); + sleep(4); + { + AutoLock lock(&S3fsCurl::curl_handles_lock); + S3fsCurl::curl_times[hCurl] = time(0); + } + break; - case CURLE_SEND_ERROR: - S3FS_PRN_ERR("### CURLE_SEND_ERROR"); - sleep(2); - break; + case CURLE_PARTIAL_FILE: + S3FS_PRN_ERR("### CURLE_PARTIAL_FILE"); + sleep(4); + break; - case CURLE_RECV_ERROR: - S3FS_PRN_ERR("### CURLE_RECV_ERROR"); - sleep(2); - break; + case CURLE_SEND_ERROR: + S3FS_PRN_ERR("### CURLE_SEND_ERROR"); + sleep(2); + break; - case CURLE_SSL_CONNECT_ERROR: - S3FS_PRN_ERR("### CURLE_SSL_CONNECT_ERROR"); - sleep(2); - break; + case CURLE_RECV_ERROR: + S3FS_PRN_ERR("### CURLE_RECV_ERROR"); + sleep(2); + break; - case CURLE_SSL_CACERT: - S3FS_PRN_ERR("### CURLE_SSL_CACERT"); + case CURLE_SSL_CONNECT_ERROR: + S3FS_PRN_ERR("### CURLE_SSL_CONNECT_ERROR"); + sleep(2); + break; - // try to locate cert, if successful, then set the - // option and continue - if(S3fsCurl::curl_ca_bundle.empty()){ - if(!S3fsCurl::LocateBundle()){ - S3FS_PRN_ERR("could not get CURL_CA_BUNDLE."); - result = -EIO; - } - // retry with CAINFO - }else{ - S3FS_PRN_ERR("curlCode: %d msg: %s", curlCode, curl_easy_strerror(curlCode)); - result = -EIO; - } - break; + case CURLE_SSL_CACERT: + S3FS_PRN_ERR("### CURLE_SSL_CACERT"); + + // try to locate cert, if successful, then set the + // option and continue + if(S3fsCurl::curl_ca_bundle.empty()){ + if(!S3fsCurl::LocateBundle()){ + S3FS_PRN_ERR("could not get CURL_CA_BUNDLE."); + result = -EIO; + } + // retry with CAINFO + }else{ + S3FS_PRN_ERR("curlCode: %d msg: %s", curlCode, curl_easy_strerror(curlCode)); + result = -EIO; + } + break; #ifdef CURLE_PEER_FAILED_VERIFICATION - case CURLE_PEER_FAILED_VERIFICATION: - S3FS_PRN_ERR("### CURLE_PEER_FAILED_VERIFICATION"); + case CURLE_PEER_FAILED_VERIFICATION: + S3FS_PRN_ERR("### CURLE_PEER_FAILED_VERIFICATION"); - first_pos = bucket.find_first_of("."); - if(first_pos != string::npos){ - S3FS_PRN_INFO("curl returned a CURL_PEER_FAILED_VERIFICATION error"); - S3FS_PRN_INFO("security issue found: buckets with periods in their name are incompatible with http"); - S3FS_PRN_INFO("This check can be over-ridden by using the -o ssl_verify_hostname=0"); - S3FS_PRN_INFO("The certificate will still be checked but the hostname will not be verified."); - S3FS_PRN_INFO("A more secure method would be to use a bucket name without periods."); - }else{ - S3FS_PRN_INFO("my_curl_easy_perform: curlCode: %d -- %s", curlCode, curl_easy_strerror(curlCode)); - } - result = -EIO; - break; + first_pos = bucket.find_first_of("."); + if(first_pos != string::npos){ + S3FS_PRN_INFO("curl returned a CURL_PEER_FAILED_VERIFICATION error"); + S3FS_PRN_INFO("security issue found: buckets with periods in their name are incompatible with http"); + S3FS_PRN_INFO("This check can be over-ridden by using the -o ssl_verify_hostname=0"); + S3FS_PRN_INFO("The certificate will still be checked but the hostname will not be verified."); + S3FS_PRN_INFO("A more secure method would be to use a bucket name without periods."); + }else{ + S3FS_PRN_INFO("my_curl_easy_perform: curlCode: %d -- %s", curlCode, curl_easy_strerror(curlCode)); + } + result = -EIO; + break; #endif - // This should be invalid since curl option HTTP FAILONERROR is now off - case CURLE_HTTP_RETURNED_ERROR: - S3FS_PRN_ERR("### CURLE_HTTP_RETURNED_ERROR"); + // This should be invalid since curl option HTTP FAILONERROR is now off + case CURLE_HTTP_RETURNED_ERROR: + S3FS_PRN_ERR("### CURLE_HTTP_RETURNED_ERROR"); - if(0 != curl_easy_getinfo(hCurl, CURLINFO_RESPONSE_CODE, &responseCode)){ - result = -EIO; - }else{ - S3FS_PRN_INFO3("HTTP response code =%ld", responseCode); + if(0 != curl_easy_getinfo(hCurl, CURLINFO_RESPONSE_CODE, &responseCode)){ + result = -EIO; + }else{ + S3FS_PRN_INFO3("HTTP response code =%ld", responseCode); - // Let's try to retrieve the - if(404 == responseCode){ - result = -ENOENT; - }else if(500 > responseCode){ - result = -EIO; - } + // Let's try to retrieve the + if(404 == responseCode){ + result = -ENOENT; + }else if(500 > responseCode){ + result = -EIO; + } + } + break; + + // Unknown CURL return code + default: + S3FS_PRN_ERR("###curlCode: %d msg: %s", curlCode, curl_easy_strerror(curlCode)); + result = -EIO; + break; + } // switch + + if(S3FSCURL_PERFORM_RESULT_NOTSET == result){ + S3FS_PRN_INFO("### retrying..."); + + if(!RemakeHandle()){ + S3FS_PRN_INFO("Failed to reset handle and internal data for retrying."); + result = -EIO; + break; + } } - break; + } // for - // Unknown CURL return code - default: - S3FS_PRN_ERR("###curlCode: %d msg: %s", curlCode, curl_easy_strerror(curlCode)); - result = -EIO; - break; + // set last response code + if(S3FSCURL_RESPONSECODE_NOTSET == responseCode){ + LastResponseCode = S3FSCURL_RESPONSECODE_FATAL_ERROR; + }else{ + LastResponseCode = responseCode; } if(S3FSCURL_PERFORM_RESULT_NOTSET == result){ - S3FS_PRN_INFO("### retrying..."); - - if(!RemakeHandle()){ - S3FS_PRN_INFO("Failed to reset handle and internal data for retrying."); + S3FS_PRN_ERR("### giving up"); result = -EIO; - break; - } } - } - - // set last response code - if(S3FSCURL_RESPONSECODE_NOTSET == responseCode){ - LastResponseCode = S3FSCURL_RESPONSECODE_FATAL_ERROR; - }else{ - LastResponseCode = responseCode; - } - - if(S3FSCURL_PERFORM_RESULT_NOTSET == result){ - S3FS_PRN_ERR("### giving up"); - result = -EIO; - } - return result; + return result; } // @@ -2728,248 +2428,247 @@ int S3fsCurl::RequestPerform(bool dontAddAuthHeaders /*=false*/) // string S3fsCurl::CalcSignatureV2(const string& method, const string& strMD5, const string& content_type, const string& date, const string& resource) { - string Signature; - string StringToSign; + string Signature; + string StringToSign; - if(!S3fsCurl::IAM_role.empty() || S3fsCurl::is_ecs || S3fsCurl::is_use_session_token){ - requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-security-token", S3fsCurl::AWSAccessToken.c_str()); - } + if(!S3fsCurl::IAM_role.empty() || S3fsCurl::is_ecs || S3fsCurl::is_use_session_token){ + requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-security-token", S3fsCurl::AWSAccessToken.c_str()); + } - StringToSign += method + "\n"; - StringToSign += strMD5 + "\n"; // md5 - StringToSign += content_type + "\n"; - StringToSign += date + "\n"; - StringToSign += get_canonical_headers(requestHeaders, true); - StringToSign += resource; + StringToSign += method + "\n"; + StringToSign += strMD5 + "\n"; // md5 + StringToSign += content_type + "\n"; + StringToSign += date + "\n"; + StringToSign += get_canonical_headers(requestHeaders, true); + StringToSign += resource; - const void* key = S3fsCurl::AWSSecretAccessKey.data(); - int key_len = S3fsCurl::AWSSecretAccessKey.size(); - const unsigned char* sdata = reinterpret_cast(StringToSign.data()); - int sdata_len = StringToSign.size(); - unsigned char* md = NULL; - unsigned int md_len = 0;; + const void* key = S3fsCurl::AWSSecretAccessKey.data(); + int key_len = S3fsCurl::AWSSecretAccessKey.size(); + const unsigned char* sdata = reinterpret_cast(StringToSign.data()); + int sdata_len = StringToSign.size(); + unsigned char* md = NULL; + unsigned int md_len = 0;; - s3fs_HMAC(key, key_len, sdata, sdata_len, &md, &md_len); + s3fs_HMAC(key, key_len, sdata, sdata_len, &md, &md_len); - char* base64; - if(NULL == (base64 = s3fs_base64(md, md_len))){ + char* base64; + if(NULL == (base64 = s3fs_base64(md, md_len))){ + delete[] md; + return string(""); // ENOMEM + } delete[] md; - return string(""); // ENOMEM - } - delete[] md; - Signature = base64; - delete[] base64; + Signature = base64; + delete[] base64; - return Signature; + return Signature; } string S3fsCurl::CalcSignature(const string& method, const string& canonical_uri, const string& query_string, const string& strdate, const string& payload_hash, const string& date8601) { - string Signature, StringCQ, StringToSign; - string uriencode; + string Signature, StringCQ, StringToSign; + string uriencode; - if(!S3fsCurl::IAM_role.empty() || S3fsCurl::is_ecs || S3fsCurl::is_use_session_token){ - requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-security-token", S3fsCurl::AWSAccessToken.c_str()); - } + if(!S3fsCurl::IAM_role.empty() || S3fsCurl::is_ecs || S3fsCurl::is_use_session_token){ + requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-security-token", S3fsCurl::AWSAccessToken.c_str()); + } - uriencode = urlEncode(canonical_uri); - StringCQ = method + "\n"; - if(0 == strcmp(method.c_str(),"HEAD") || 0 == strcmp(method.c_str(),"PUT") || 0 == strcmp(method.c_str(),"DELETE")){ - StringCQ += uriencode + "\n"; - }else if (0 == strcmp(method.c_str(), "GET") && 0 == strcmp(uriencode.c_str(), "")) { - StringCQ +="/\n"; - }else if (0 == strcmp(method.c_str(), "GET") && 0 == strncmp(uriencode.c_str(), "/", 1)) { - StringCQ += uriencode +"\n"; - }else if (0 == strcmp(method.c_str(), "GET") && 0 != strncmp(uriencode.c_str(), "/", 1)) { - StringCQ += "/\n" + urlEncode2(canonical_uri) +"\n"; - }else if (0 == strcmp(method.c_str(), "POST")) { - StringCQ += uriencode + "\n"; - } - StringCQ += urlEncode2(query_string) + "\n"; - StringCQ += get_canonical_headers(requestHeaders) + "\n"; - StringCQ += get_sorted_header_keys(requestHeaders) + "\n"; - StringCQ += payload_hash; + uriencode = urlEncode(canonical_uri); + StringCQ = method + "\n"; + if(0 == strcmp(method.c_str(),"HEAD") || 0 == strcmp(method.c_str(),"PUT") || 0 == strcmp(method.c_str(),"DELETE")){ + StringCQ += uriencode + "\n"; + }else if (0 == strcmp(method.c_str(), "GET") && 0 == strcmp(uriencode.c_str(), "")) { + StringCQ +="/\n"; + }else if (0 == strcmp(method.c_str(), "GET") && 0 == strncmp(uriencode.c_str(), "/", 1)) { + StringCQ += uriencode +"\n"; + }else if (0 == strcmp(method.c_str(), "GET") && 0 != strncmp(uriencode.c_str(), "/", 1)) { + StringCQ += "/\n" + urlEncode2(canonical_uri) +"\n"; + }else if (0 == strcmp(method.c_str(), "POST")) { + StringCQ += uriencode + "\n"; + } + StringCQ += urlEncode2(query_string) + "\n"; + StringCQ += get_canonical_headers(requestHeaders) + "\n"; + StringCQ += get_sorted_header_keys(requestHeaders) + "\n"; + StringCQ += payload_hash; - char kSecret[128]; - unsigned char *kDate, *kRegion, *kService, *kSigning, *sRequest = NULL; - unsigned int kDate_len,kRegion_len, kService_len, kSigning_len, sRequest_len = 0; - char hexsRequest[64 + 1]; - int kSecret_len = snprintf(kSecret, sizeof(kSecret), "AWS4%s", S3fsCurl::AWSSecretAccessKey.c_str()); - unsigned int cnt; + char kSecret[128]; + unsigned char *kDate, *kRegion, *kService, *kSigning, *sRequest = NULL; + unsigned int kDate_len,kRegion_len, kService_len, kSigning_len, sRequest_len = 0; + char hexsRequest[64 + 1]; + int kSecret_len = snprintf(kSecret, sizeof(kSecret), "AWS4%s", S3fsCurl::AWSSecretAccessKey.c_str()); + unsigned int cnt; - s3fs_HMAC256(kSecret, kSecret_len, reinterpret_cast(strdate.data()), strdate.size(), &kDate, &kDate_len); - s3fs_HMAC256(kDate, kDate_len, reinterpret_cast(endpoint.c_str()), endpoint.size(), &kRegion, &kRegion_len); - s3fs_HMAC256(kRegion, kRegion_len, reinterpret_cast("s3"), sizeof("s3") - 1, &kService, &kService_len); - s3fs_HMAC256(kService, kService_len, reinterpret_cast("aws4_request"), sizeof("aws4_request") - 1, &kSigning, &kSigning_len); - delete[] kDate; - delete[] kRegion; - delete[] kService; + s3fs_HMAC256(kSecret, kSecret_len, reinterpret_cast(strdate.data()), strdate.size(), &kDate, &kDate_len); + s3fs_HMAC256(kDate, kDate_len, reinterpret_cast(endpoint.c_str()), endpoint.size(), &kRegion, &kRegion_len); + s3fs_HMAC256(kRegion, kRegion_len, reinterpret_cast("s3"), sizeof("s3") - 1, &kService, &kService_len); + s3fs_HMAC256(kService, kService_len, reinterpret_cast("aws4_request"), sizeof("aws4_request") - 1, &kSigning, &kSigning_len); + delete[] kDate; + delete[] kRegion; + delete[] kService; - const unsigned char* cRequest = reinterpret_cast(StringCQ.c_str()); - unsigned int cRequest_len = StringCQ.size(); - s3fs_sha256(cRequest, cRequest_len, &sRequest, &sRequest_len); - for(cnt = 0; cnt < sRequest_len; cnt++){ - sprintf(&hexsRequest[cnt * 2], "%02x", sRequest[cnt]); - } - delete[] sRequest; + const unsigned char* cRequest = reinterpret_cast(StringCQ.c_str()); + unsigned int cRequest_len = StringCQ.size(); + s3fs_sha256(cRequest, cRequest_len, &sRequest, &sRequest_len); + for(cnt = 0; cnt < sRequest_len; cnt++){ + sprintf(&hexsRequest[cnt * 2], "%02x", sRequest[cnt]); + } + delete[] sRequest; - StringToSign = "AWS4-HMAC-SHA256\n"; - StringToSign += date8601 + "\n"; - StringToSign += strdate + "/" + endpoint + "/s3/aws4_request\n"; - StringToSign += hexsRequest; + StringToSign = "AWS4-HMAC-SHA256\n"; + StringToSign += date8601 + "\n"; + StringToSign += strdate + "/" + endpoint + "/s3/aws4_request\n"; + StringToSign += hexsRequest; - const unsigned char* cscope = reinterpret_cast(StringToSign.c_str()); - unsigned int cscope_len = StringToSign.size(); - unsigned char* md = NULL; - unsigned int md_len = 0; + const unsigned char* cscope = reinterpret_cast(StringToSign.c_str()); + unsigned int cscope_len = StringToSign.size(); + unsigned char* md = NULL; + unsigned int md_len = 0; - s3fs_HMAC256(kSigning, kSigning_len, cscope, cscope_len, &md, &md_len); - char *hexSig = new char[2 * md_len + 1]; - for(cnt = 0; cnt < md_len; cnt++){ - sprintf(&hexSig[cnt * 2], "%02x", md[cnt]); - } - delete[] kSigning; - delete[] md; + s3fs_HMAC256(kSigning, kSigning_len, cscope, cscope_len, &md, &md_len); + char *hexSig = new char[2 * md_len + 1]; + for(cnt = 0; cnt < md_len; cnt++){ + sprintf(&hexSig[cnt * 2], "%02x", md[cnt]); + } + delete[] kSigning; + delete[] md; - Signature = hexSig; - delete[] hexSig; + Signature = hexSig; + delete[] hexSig; - return Signature; + return Signature; } void S3fsCurl::insertV4Headers() { - string server_path = type == REQTYPE_LISTBUCKET ? "/" : path; - string payload_hash; - switch (type) { - case REQTYPE_PUT: - payload_hash = s3fs_sha256sum(b_infile == NULL ? -1 : fileno(b_infile), 0, -1); - break; + string server_path = type == REQTYPE_LISTBUCKET ? "/" : path; + string payload_hash; + switch (type) { + case REQTYPE_PUT: + payload_hash = s3fs_sha256sum(b_infile == NULL ? -1 : fileno(b_infile), 0, -1); + break; - case REQTYPE_COMPLETEMULTIPOST: - { - unsigned int cRequest_len = strlen(reinterpret_cast(b_postdata)); - unsigned char* sRequest = NULL; - unsigned int sRequest_len = 0; - char hexsRequest[64 + 1]; - unsigned int cnt; - s3fs_sha256(b_postdata, cRequest_len, &sRequest, &sRequest_len); - for(cnt = 0; cnt < sRequest_len; cnt++){ - sprintf(&hexsRequest[cnt * 2], "%02x", sRequest[cnt]); - } - delete[] sRequest; - payload_hash.assign(hexsRequest, &hexsRequest[sRequest_len * 2]); - break; + case REQTYPE_COMPLETEMULTIPOST: + { + unsigned int cRequest_len = strlen(reinterpret_cast(b_postdata)); + unsigned char* sRequest = NULL; + unsigned int sRequest_len = 0; + char hexsRequest[64 + 1]; + unsigned int cnt; + s3fs_sha256(b_postdata, cRequest_len, &sRequest, &sRequest_len); + for(cnt = 0; cnt < sRequest_len; cnt++){ + sprintf(&hexsRequest[cnt * 2], "%02x", sRequest[cnt]); + } + delete[] sRequest; + payload_hash.assign(hexsRequest, &hexsRequest[sRequest_len * 2]); + break; + } + + case REQTYPE_UPLOADMULTIPOST: + payload_hash = s3fs_sha256sum(partdata.fd, partdata.startpos, partdata.size); + break; + default: + break; } - case REQTYPE_UPLOADMULTIPOST: - payload_hash = s3fs_sha256sum(partdata.fd, partdata.startpos, partdata.size); - break; - default: - break; - } + S3FS_PRN_INFO3("computing signature [%s] [%s] [%s] [%s]", op.c_str(), server_path.c_str(), query_string.c_str(), payload_hash.c_str()); + string strdate; + string date8601; + get_date_sigv3(strdate, date8601); - S3FS_PRN_INFO3("computing signature [%s] [%s] [%s] [%s]", op.c_str(), server_path.c_str(), query_string.c_str(), payload_hash.c_str()); - string strdate; - string date8601; - get_date_sigv3(strdate, date8601); + string contentSHA256 = payload_hash.empty() ? empty_payload_hash : payload_hash; + const std::string realpath = pathrequeststyle ? "/" + bucket + server_path : server_path; - string contentSHA256 = payload_hash.empty() ? empty_payload_hash : payload_hash; - const std::string realpath = pathrequeststyle ? "/" + bucket + server_path : server_path; + //string canonical_headers, signed_headers; + requestHeaders = curl_slist_sort_insert(requestHeaders, "host", get_bucket_host().c_str()); + requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-content-sha256", contentSHA256.c_str()); + requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-date", date8601.c_str()); - //string canonical_headers, signed_headers; - requestHeaders = curl_slist_sort_insert(requestHeaders, "host", get_bucket_host().c_str()); - requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-content-sha256", contentSHA256.c_str()); - requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-date", date8601.c_str()); - - if (S3fsCurl::IsRequesterPays()) { - requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-request-payer", "requester"); - } + if (S3fsCurl::IsRequesterPays()) { + requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-request-payer", "requester"); + } - if(!S3fsCurl::IsPublicBucket()){ - string Signature = CalcSignature(op, realpath, query_string + (type == REQTYPE_PREMULTIPOST || type == REQTYPE_MULTILIST ? "=" : ""), strdate, contentSHA256, date8601); - string auth = "AWS4-HMAC-SHA256 Credential=" + AWSAccessKeyId + "/" + strdate + "/" + endpoint + - "/s3/aws4_request, SignedHeaders=" + get_sorted_header_keys(requestHeaders) + ", Signature=" + Signature; - requestHeaders = curl_slist_sort_insert(requestHeaders, "Authorization", auth.c_str()); - } + if(!S3fsCurl::IsPublicBucket()){ + string Signature = CalcSignature(op, realpath, query_string + (type == REQTYPE_PREMULTIPOST || type == REQTYPE_MULTILIST ? "=" : ""), strdate, contentSHA256, date8601); + string auth = "AWS4-HMAC-SHA256 Credential=" + AWSAccessKeyId + "/" + strdate + "/" + endpoint + "/s3/aws4_request, SignedHeaders=" + get_sorted_header_keys(requestHeaders) + ", Signature=" + Signature; + requestHeaders = curl_slist_sort_insert(requestHeaders, "Authorization", auth.c_str()); + } } void S3fsCurl::insertV2Headers() { - string resource; - string turl; - string server_path = type == REQTYPE_LISTBUCKET ? "/" : path; - MakeUrlResource(server_path.c_str(), resource, turl); - if(!query_string.empty() && type != REQTYPE_LISTBUCKET){ - resource += "?" + query_string; - } + string resource; + string turl; + string server_path = type == REQTYPE_LISTBUCKET ? "/" : path; + MakeUrlResource(server_path.c_str(), resource, turl); + if(!query_string.empty() && type != REQTYPE_LISTBUCKET){ + resource += "?" + query_string; + } - string date = get_date_rfc850(); - requestHeaders = curl_slist_sort_insert(requestHeaders, "Date", date.c_str()); - if(op != "PUT" && op != "POST"){ - requestHeaders = curl_slist_sort_insert(requestHeaders, "Content-Type", NULL); - } + string date = get_date_rfc850(); + requestHeaders = curl_slist_sort_insert(requestHeaders, "Date", date.c_str()); + if(op != "PUT" && op != "POST"){ + requestHeaders = curl_slist_sort_insert(requestHeaders, "Content-Type", NULL); + } - if(!S3fsCurl::IsPublicBucket()){ - string Signature = CalcSignatureV2(op, get_header_value(requestHeaders, "Content-MD5"), get_header_value(requestHeaders, "Content-Type"), date, resource); - requestHeaders = curl_slist_sort_insert(requestHeaders, "Authorization", string("AWS " + AWSAccessKeyId + ":" + Signature).c_str()); - } + if(!S3fsCurl::IsPublicBucket()){ + string Signature = CalcSignatureV2(op, get_header_value(requestHeaders, "Content-MD5"), get_header_value(requestHeaders, "Content-Type"), date, resource); + requestHeaders = curl_slist_sort_insert(requestHeaders, "Authorization", string("AWS " + AWSAccessKeyId + ":" + Signature).c_str()); + } } void S3fsCurl::insertIBMIAMHeaders() { - requestHeaders = curl_slist_sort_insert(requestHeaders, "Authorization", ("Bearer " + S3fsCurl::AWSAccessToken).c_str()); + requestHeaders = curl_slist_sort_insert(requestHeaders, "Authorization", ("Bearer " + S3fsCurl::AWSAccessToken).c_str()); - if(op == "PUT" && path == mount_prefix + "/"){ - // ibm-service-instance-id header is required for bucket creation requests - requestHeaders = curl_slist_sort_insert(requestHeaders, "ibm-service-instance-id", S3fsCurl::AWSAccessKeyId.c_str()); - } + if(op == "PUT" && path == mount_prefix + "/"){ + // ibm-service-instance-id header is required for bucket creation requests + requestHeaders = curl_slist_sort_insert(requestHeaders, "ibm-service-instance-id", S3fsCurl::AWSAccessKeyId.c_str()); + } } void S3fsCurl::insertAuthHeaders() { - if(!S3fsCurl::CheckIAMCredentialUpdate()){ - S3FS_PRN_ERR("An error occurred in checking IAM credential."); - return; // do not insert auth headers on error - } + if(!S3fsCurl::CheckIAMCredentialUpdate()){ + S3FS_PRN_ERR("An error occurred in checking IAM credential."); + return; // do not insert auth headers on error + } - if(S3fsCurl::is_ibm_iam_auth){ - insertIBMIAMHeaders(); - }else if(!S3fsCurl::is_sigv4){ - insertV2Headers(); - }else{ - insertV4Headers(); - } + if(S3fsCurl::is_ibm_iam_auth){ + insertIBMIAMHeaders(); + }else if(!S3fsCurl::is_sigv4){ + insertV2Headers(); + }else{ + insertV4Headers(); + } } int S3fsCurl::DeleteRequest(const char* tpath) { - S3FS_PRN_INFO3("[tpath=%s]", SAFESTRPTR(tpath)); + S3FS_PRN_INFO3("[tpath=%s]", SAFESTRPTR(tpath)); - if(!tpath){ - return -1; - } - if(!CreateCurlHandle()){ - return -1; - } - string resource; - string turl; - MakeUrlResource(get_realpath(tpath).c_str(), resource, turl); + if(!tpath){ + return -1; + } + if(!CreateCurlHandle()){ + return -1; + } + string resource; + string turl; + MakeUrlResource(get_realpath(tpath).c_str(), resource, turl); - url = prepare_url(turl.c_str()); - path = get_realpath(tpath); - requestHeaders = NULL; - responseHeaders.clear(); + url = prepare_url(turl.c_str()); + path = get_realpath(tpath); + requestHeaders = NULL; + responseHeaders.clear(); - op = "DELETE"; - type = REQTYPE_DELETE; + op = "DELETE"; + type = REQTYPE_DELETE; - curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); - curl_easy_setopt(hCurl, CURLOPT_CUSTOMREQUEST, "DELETE"); - S3fsCurl::AddUserAgent(hCurl); // put User-Agent + curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); + curl_easy_setopt(hCurl, CURLOPT_CUSTOMREQUEST, "DELETE"); + S3fsCurl::AddUserAgent(hCurl); // put User-Agent - return RequestPerform(); + return RequestPerform(); } // @@ -2978,72 +2677,71 @@ int S3fsCurl::DeleteRequest(const char* tpath) // int S3fsCurl::GetIAMCredentials() { - if (!S3fsCurl::is_ecs && !S3fsCurl::is_ibm_iam_auth) { - S3FS_PRN_INFO3("[IAM role=%s]", S3fsCurl::IAM_role.c_str()); + if (!S3fsCurl::is_ecs && !S3fsCurl::is_ibm_iam_auth) { + S3FS_PRN_INFO3("[IAM role=%s]", S3fsCurl::IAM_role.c_str()); - if(S3fsCurl::IAM_role.empty()) { - S3FS_PRN_ERR("IAM role name is empty."); - return -EIO; + if(S3fsCurl::IAM_role.empty()) { + S3FS_PRN_ERR("IAM role name is empty."); + return -EIO; + } } - } - // at first set type for handle - type = REQTYPE_IAMCRED; + // at first set type for handle + type = REQTYPE_IAMCRED; - if(!CreateCurlHandle()){ - return -EIO; - } + if(!CreateCurlHandle()){ + return -EIO; + } - // url - if (is_ecs) { - url = string(S3fsCurl::IAM_cred_url) + std::getenv(ECS_IAM_ENV_VAR.c_str()); - } - else { - url = string(S3fsCurl::IAM_cred_url) + S3fsCurl::IAM_role; - } + // url + if(is_ecs){ + url = string(S3fsCurl::IAM_cred_url) + std::getenv(ECS_IAM_ENV_VAR.c_str()); + }else{ + url = string(S3fsCurl::IAM_cred_url) + S3fsCurl::IAM_role; + } - requestHeaders = NULL; - responseHeaders.clear(); - bodydata.Clear(); - string postContent; + requestHeaders = NULL; + responseHeaders.clear(); + bodydata.Clear(); + string postContent; - if(S3fsCurl::is_ibm_iam_auth){ - url = string(S3fsCurl::IAM_cred_url); + if(S3fsCurl::is_ibm_iam_auth){ + url = string(S3fsCurl::IAM_cred_url); - // make contents - postContent += "grant_type=urn:ibm:params:oauth:grant-type:apikey"; - postContent += "&response_type=cloud_iam"; - postContent += "&apikey=" + S3fsCurl::AWSSecretAccessKey; + // make contents + postContent += "grant_type=urn:ibm:params:oauth:grant-type:apikey"; + postContent += "&response_type=cloud_iam"; + postContent += "&apikey=" + S3fsCurl::AWSSecretAccessKey; - // set postdata - postdata = reinterpret_cast(postContent.c_str()); - b_postdata = postdata; - postdata_remaining = postContent.size(); // without null - b_postdata_remaining = postdata_remaining; + // set postdata + postdata = reinterpret_cast(postContent.c_str()); + b_postdata = postdata; + postdata_remaining = postContent.size(); // without null + b_postdata_remaining = postdata_remaining; - requestHeaders = curl_slist_sort_insert(requestHeaders, "Authorization", "Basic Yng6Yng="); + requestHeaders = curl_slist_sort_insert(requestHeaders, "Authorization", "Basic Yng6Yng="); - curl_easy_setopt(hCurl, CURLOPT_POST, true); // POST - curl_easy_setopt(hCurl, CURLOPT_POSTFIELDSIZE, static_cast(postdata_remaining)); - curl_easy_setopt(hCurl, CURLOPT_READDATA, (void*)this); - curl_easy_setopt(hCurl, CURLOPT_READFUNCTION, S3fsCurl::ReadCallback); - } + curl_easy_setopt(hCurl, CURLOPT_POST, true); // POST + curl_easy_setopt(hCurl, CURLOPT_POSTFIELDSIZE, static_cast(postdata_remaining)); + curl_easy_setopt(hCurl, CURLOPT_READDATA, (void*)this); + curl_easy_setopt(hCurl, CURLOPT_READFUNCTION, S3fsCurl::ReadCallback); + } - curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); - curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); - curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); - S3fsCurl::AddUserAgent(hCurl); // put User-Agent + curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); + curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); + curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); + S3fsCurl::AddUserAgent(hCurl); // put User-Agent - int result = RequestPerform(true); + int result = RequestPerform(true); - // analyzing response - if(0 == result && !S3fsCurl::SetIAMCredentials(bodydata.str())){ - S3FS_PRN_ERR("Something error occurred, could not get IAM credential."); - result = -EIO; - } - bodydata.Clear(); + // analyzing response + if(0 == result && !S3fsCurl::SetIAMCredentials(bodydata.str())){ + S3FS_PRN_ERR("Something error occurred, could not get IAM credential."); + result = -EIO; + } + bodydata.Clear(); - return result; + return result; } // @@ -3051,77 +2749,79 @@ int S3fsCurl::GetIAMCredentials() // bool S3fsCurl::LoadIAMRoleFromMetaData() { - S3FS_PRN_INFO3("Get IAM Role name"); + S3FS_PRN_INFO3("Get IAM Role name"); - // at first set type for handle - type = REQTYPE_IAMROLE; + // at first set type for handle + type = REQTYPE_IAMROLE; - if(!CreateCurlHandle()){ - return false; - } + if(!CreateCurlHandle()){ + return false; + } - // url - url = string(S3fsCurl::IAM_cred_url); - requestHeaders = NULL; - responseHeaders.clear(); - bodydata.Clear(); + // url + url = string(S3fsCurl::IAM_cred_url); + requestHeaders = NULL; + responseHeaders.clear(); + bodydata.Clear(); - curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); - curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); - curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); - S3fsCurl::AddUserAgent(hCurl); // put User-Agent + curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); + curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); + curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); + S3fsCurl::AddUserAgent(hCurl); // put User-Agent - int result = RequestPerform(true); + int result = RequestPerform(true); - // analyzing response - if(0 == result && !S3fsCurl::SetIAMRoleFromMetaData(bodydata.str())){ - S3FS_PRN_ERR("Something error occurred, could not get IAM role name."); - result = -EIO; - } - bodydata.Clear(); + // analyzing response + if(0 == result && !S3fsCurl::SetIAMRoleFromMetaData(bodydata.str())){ + S3FS_PRN_ERR("Something error occurred, could not get IAM role name."); + result = -EIO; + } + bodydata.Clear(); - return (0 == result); + return (0 == result); } bool S3fsCurl::AddSseRequestHead(sse_type_t ssetype, string& ssevalue, bool is_only_c, bool is_copy) { - switch(ssetype){ - case sse_type_t::SSE_DISABLE: - return true; - case sse_type_t::SSE_S3: - if(!is_only_c){ - requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-server-side-encryption", "AES256"); + switch(ssetype){ + case sse_type_t::SSE_DISABLE: + return true; + case sse_type_t::SSE_S3: + if(!is_only_c){ + requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-server-side-encryption", "AES256"); + } + return true; + case sse_type_t::SSE_C: + { + string sseckey; + if(S3fsCurl::GetSseKey(ssevalue, sseckey)){ + if(is_copy){ + requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-copy-source-server-side-encryption-customer-algorithm", "AES256"); + requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-copy-source-server-side-encryption-customer-key", sseckey.c_str()); + requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-copy-source-server-side-encryption-customer-key-md5", ssevalue.c_str()); + }else{ + requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-server-side-encryption-customer-algorithm", "AES256"); + requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-server-side-encryption-customer-key", sseckey.c_str()); + requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-server-side-encryption-customer-key-md5", ssevalue.c_str()); + } + }else{ + S3FS_PRN_WARN("Failed to insert SSE-C header."); + } + return true; + } + case sse_type_t::SSE_KMS: + if(!is_only_c){ + if(ssevalue.empty()){ + ssevalue = S3fsCurl::GetSseKmsId(); + } + requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-server-side-encryption", "aws:kms"); + requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-server-side-encryption-aws-kms-key-id", ssevalue.c_str()); + } + return true; } - return true; - case sse_type_t::SSE_C: { - string sseckey; - if(S3fsCurl::GetSseKey(ssevalue, sseckey)){ - if(is_copy){ - requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-copy-source-server-side-encryption-customer-algorithm", "AES256"); - requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-copy-source-server-side-encryption-customer-key", sseckey.c_str()); - requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-copy-source-server-side-encryption-customer-key-md5", ssevalue.c_str()); - }else{ - requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-server-side-encryption-customer-algorithm", "AES256"); - requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-server-side-encryption-customer-key", sseckey.c_str()); - requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-server-side-encryption-customer-key-md5", ssevalue.c_str()); - } - }else{ - S3FS_PRN_WARN("Failed to insert SSE-C header."); - } - return true; - } - case sse_type_t::SSE_KMS: - if(!is_only_c){ - if(ssevalue.empty()){ - ssevalue = S3fsCurl::GetSseKmsId(); - } - requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-server-side-encryption", "aws:kms"); - requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-server-side-encryption-aws-kms-key-id", ssevalue.c_str()); - } - return true; - } - S3FS_PRN_ERR("sse type is unknown(%d).", static_cast(S3fsCurl::ssetype)); - return false; + S3FS_PRN_ERR("sse type is unknown(%d).", static_cast(S3fsCurl::ssetype)); + + return false; } // @@ -3133,467 +2833,466 @@ bool S3fsCurl::AddSseRequestHead(sse_type_t ssetype, string& ssevalue, bool is_o // bool S3fsCurl::PreHeadRequest(const char* tpath, const char* bpath, const char* savedpath, int ssekey_pos) { - S3FS_PRN_INFO3("[tpath=%s][bpath=%s][save=%s][sseckeypos=%d]", SAFESTRPTR(tpath), SAFESTRPTR(bpath), SAFESTRPTR(savedpath), ssekey_pos); + S3FS_PRN_INFO3("[tpath=%s][bpath=%s][save=%s][sseckeypos=%d]", SAFESTRPTR(tpath), SAFESTRPTR(bpath), SAFESTRPTR(savedpath), ssekey_pos); - if(!tpath){ - return false; - } - string resource; - string turl; - MakeUrlResource(get_realpath(tpath).c_str(), resource, turl); - - // libcurl 7.17 does deep copy of url, deep copy "stable" url - url = prepare_url(turl.c_str()); - path = get_realpath(tpath); - base_path = SAFESTRPTR(bpath); - saved_path = SAFESTRPTR(savedpath); - requestHeaders = NULL; - responseHeaders.clear(); - - // requestHeaders - if(0 <= ssekey_pos){ - string md5; - if(!S3fsCurl::GetSseKeyMd5(ssekey_pos, md5) || !AddSseRequestHead(sse_type_t::SSE_C, md5, true, false)){ - S3FS_PRN_ERR("Failed to set SSE-C headers for sse-c key pos(%d)(=md5(%s)).", ssekey_pos, md5.c_str()); - return false; + if(!tpath){ + return false; } - } - b_ssekey_pos = ssekey_pos; + string resource; + string turl; + MakeUrlResource(get_realpath(tpath).c_str(), resource, turl); - op = "HEAD"; - type = REQTYPE_HEAD; + // libcurl 7.17 does deep copy of url, deep copy "stable" url + url = prepare_url(turl.c_str()); + path = get_realpath(tpath); + base_path = SAFESTRPTR(bpath); + saved_path = SAFESTRPTR(savedpath); + requestHeaders = NULL; + responseHeaders.clear(); - // set lazy function - fpLazySetup = PreHeadRequestSetCurlOpts; + // requestHeaders + if(0 <= ssekey_pos){ + string md5; + if(!S3fsCurl::GetSseKeyMd5(ssekey_pos, md5) || !AddSseRequestHead(sse_type_t::SSE_C, md5, true, false)){ + S3FS_PRN_ERR("Failed to set SSE-C headers for sse-c key pos(%d)(=md5(%s)).", ssekey_pos, md5.c_str()); + return false; + } + } + b_ssekey_pos = ssekey_pos; - return true; + op = "HEAD"; + type = REQTYPE_HEAD; + + // set lazy function + fpLazySetup = PreHeadRequestSetCurlOpts; + + return true; } int S3fsCurl::HeadRequest(const char* tpath, headers_t& meta) { - int result = -1; + int result = -1; - S3FS_PRN_INFO3("[tpath=%s]", SAFESTRPTR(tpath)); + S3FS_PRN_INFO3("[tpath=%s]", SAFESTRPTR(tpath)); - // At first, try to get without SSE-C headers - if(!PreHeadRequest(tpath) || !fpLazySetup || !fpLazySetup(this) || 0 != (result = RequestPerform())){ - // If has SSE-C keys, try to get with all SSE-C keys. - for(int pos = 0; static_cast(pos) < S3fsCurl::sseckeys.size(); pos++){ - if(!DestroyCurlHandle()){ - break; - } - if(!PreHeadRequest(tpath, NULL, NULL, pos)){ - break; - } - if(!fpLazySetup || !fpLazySetup(this)){ - S3FS_PRN_ERR("Failed to lazy setup in single head request."); - break; - } - if(0 == (result = RequestPerform())){ - break; - } + // At first, try to get without SSE-C headers + if(!PreHeadRequest(tpath) || !fpLazySetup || !fpLazySetup(this) || 0 != (result = RequestPerform())){ + // If has SSE-C keys, try to get with all SSE-C keys. + for(int pos = 0; static_cast(pos) < S3fsCurl::sseckeys.size(); pos++){ + if(!DestroyCurlHandle()){ + break; + } + if(!PreHeadRequest(tpath, NULL, NULL, pos)){ + break; + } + if(!fpLazySetup || !fpLazySetup(this)){ + S3FS_PRN_ERR("Failed to lazy setup in single head request."); + break; + } + if(0 == (result = RequestPerform())){ + break; + } + } + if(0 != result){ + DestroyCurlHandle(); // not check result. + return result; + } } - if(0 != result){ - DestroyCurlHandle(); // not check result. - return result; - } - } - // file exists in s3 - // fixme: clean this up. - meta.clear(); - for(headers_t::iterator iter = responseHeaders.begin(); iter != responseHeaders.end(); ++iter){ - string key = lower(iter->first); - string value = iter->second; - if(key == "content-type"){ - meta[iter->first] = value; - }else if(key == "content-length"){ - meta[iter->first] = value; - }else if(key == "etag"){ - meta[iter->first] = value; - }else if(key == "last-modified"){ - meta[iter->first] = value; - }else if(key.substr(0, 5) == "x-amz"){ - meta[key] = value; // key is lower case for "x-amz" + // file exists in s3 + // fixme: clean this up. + meta.clear(); + for(headers_t::iterator iter = responseHeaders.begin(); iter != responseHeaders.end(); ++iter){ + string key = lower(iter->first); + string value = iter->second; + if(key == "content-type"){ + meta[iter->first] = value; + }else if(key == "content-length"){ + meta[iter->first] = value; + }else if(key == "etag"){ + meta[iter->first] = value; + }else if(key == "last-modified"){ + meta[iter->first] = value; + }else if(key.substr(0, 5) == "x-amz"){ + meta[key] = value; // key is lower case for "x-amz" + } } - } - return 0; + return 0; } int S3fsCurl::PutHeadRequest(const char* tpath, headers_t& meta, bool is_copy) { - S3FS_PRN_INFO3("[tpath=%s]", SAFESTRPTR(tpath)); + S3FS_PRN_INFO3("[tpath=%s]", SAFESTRPTR(tpath)); - if(!tpath){ - return -1; - } - if(!CreateCurlHandle()){ - return -1; - } - string resource; - string turl; - MakeUrlResource(get_realpath(tpath).c_str(), resource, turl); + if(!tpath){ + return -1; + } + if(!CreateCurlHandle()){ + return -1; + } + string resource; + string turl; + MakeUrlResource(get_realpath(tpath).c_str(), resource, turl); - url = prepare_url(turl.c_str()); - path = get_realpath(tpath); - requestHeaders = NULL; - responseHeaders.clear(); - bodydata.Clear(); + url = prepare_url(turl.c_str()); + path = get_realpath(tpath); + requestHeaders = NULL; + responseHeaders.clear(); + bodydata.Clear(); - string contype = S3fsCurl::LookupMimeType(string(tpath)); - requestHeaders = curl_slist_sort_insert(requestHeaders, "Content-Type", contype.c_str()); + string contype = S3fsCurl::LookupMimeType(string(tpath)); + requestHeaders = curl_slist_sort_insert(requestHeaders, "Content-Type", contype.c_str()); - // Make request headers - for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){ - string key = lower(iter->first); - string value = iter->second; - if(key.substr(0, 9) == "x-amz-acl"){ - // not set value, but after set it. - }else if(key.substr(0, 10) == "x-amz-meta"){ - requestHeaders = curl_slist_sort_insert(requestHeaders, iter->first.c_str(), value.c_str()); - }else if(key == "x-amz-copy-source"){ - requestHeaders = curl_slist_sort_insert(requestHeaders, iter->first.c_str(), value.c_str()); - }else if(key == "x-amz-server-side-encryption" && value != "aws:kms"){ - // Only copy mode. - if(is_copy && !AddSseRequestHead(sse_type_t::SSE_S3, value, false, true)){ - S3FS_PRN_WARN("Failed to insert SSE-S3 header."); - } - }else if(key == "x-amz-server-side-encryption-aws-kms-key-id"){ - // Only copy mode. - if(is_copy && !value.empty() && !AddSseRequestHead(sse_type_t::SSE_KMS, value, false, true)){ - S3FS_PRN_WARN("Failed to insert SSE-KMS header."); - } - }else if(key == "x-amz-server-side-encryption-customer-key-md5"){ - // Only copy mode. - if(is_copy){ - if(!AddSseRequestHead(sse_type_t::SSE_C, value, true, true) || !AddSseRequestHead(sse_type_t::SSE_C, value, true, false)){ - S3FS_PRN_WARN("Failed to insert SSE-C header."); + // Make request headers + for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){ + string key = lower(iter->first); + string value = iter->second; + if(key.substr(0, 9) == "x-amz-acl"){ + // not set value, but after set it. + }else if(key.substr(0, 10) == "x-amz-meta"){ + requestHeaders = curl_slist_sort_insert(requestHeaders, iter->first.c_str(), value.c_str()); + }else if(key == "x-amz-copy-source"){ + requestHeaders = curl_slist_sort_insert(requestHeaders, iter->first.c_str(), value.c_str()); + }else if(key == "x-amz-server-side-encryption" && value != "aws:kms"){ + // Only copy mode. + if(is_copy && !AddSseRequestHead(sse_type_t::SSE_S3, value, false, true)){ + S3FS_PRN_WARN("Failed to insert SSE-S3 header."); + } + }else if(key == "x-amz-server-side-encryption-aws-kms-key-id"){ + // Only copy mode. + if(is_copy && !value.empty() && !AddSseRequestHead(sse_type_t::SSE_KMS, value, false, true)){ + S3FS_PRN_WARN("Failed to insert SSE-KMS header."); + } + }else if(key == "x-amz-server-side-encryption-customer-key-md5"){ + // Only copy mode. + if(is_copy){ + if(!AddSseRequestHead(sse_type_t::SSE_C, value, true, true) || !AddSseRequestHead(sse_type_t::SSE_C, value, true, false)){ + S3FS_PRN_WARN("Failed to insert SSE-C header."); + } + } } - } } - } - // "x-amz-acl", storage class, sse - if(S3fsCurl::default_acl != acl_t::PRIVATE){ - requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-acl", S3fsCurl::default_acl.str()); - } - if(GetStorageClass() != storage_class_t::STANDARD){ - requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-storage-class", GetStorageClass().str()); - } - // SSE - if(!is_copy){ - string ssevalue; - if(!AddSseRequestHead(S3fsCurl::GetSseType(), ssevalue, false, false)){ - S3FS_PRN_WARN("Failed to set SSE header, but continue..."); + // "x-amz-acl", storage class, sse + if(S3fsCurl::default_acl != acl_t::PRIVATE){ + requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-acl", S3fsCurl::default_acl.str()); } - } - if(is_use_ahbe){ - // set additional header by ahbe conf - requestHeaders = AdditionalHeader::get()->AddHeader(requestHeaders, tpath); - } - - op = "PUT"; - type = REQTYPE_PUTHEAD; - - // setopt - curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); - curl_easy_setopt(hCurl, CURLOPT_UPLOAD, true); // HTTP PUT - curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); - curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); - curl_easy_setopt(hCurl, CURLOPT_INFILESIZE, 0); // Content-Length - S3fsCurl::AddUserAgent(hCurl); // put User-Agent - - S3FS_PRN_INFO3("copying... [path=%s]", tpath); - - int result = RequestPerform(); - if(0 == result){ - // PUT returns 200 status code with something error, thus - // we need to check body. - // - // example error body: - // - // - // AccessDenied - // Access Denied - // E4CA6F6767D6685C - // BHzLOATeDuvN8Es1wI8IcERq4kl4dc2A9tOB8Yqr39Ys6fl7N4EJ8sjGiVvu6wLP - // - // - const char* pstrbody = bodydata.str(); - if(!pstrbody || NULL != strcasestr(pstrbody, "")){ - S3FS_PRN_ERR("PutHeadRequest get 200 status response, but it included error body(or NULL). The request failed during copying the object in S3."); - S3FS_PRN_DBG("PutHeadRequest Response Body : %s", (pstrbody ? pstrbody : "(null)")); - result = -EIO; + if(GetStorageClass() != storage_class_t::STANDARD){ + requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-storage-class", GetStorageClass().str()); + } + // SSE + if(!is_copy){ + string ssevalue; + if(!AddSseRequestHead(S3fsCurl::GetSseType(), ssevalue, false, false)){ + S3FS_PRN_WARN("Failed to set SSE header, but continue..."); + } + } + if(is_use_ahbe){ + // set additional header by ahbe conf + requestHeaders = AdditionalHeader::get()->AddHeader(requestHeaders, tpath); } - } - bodydata.Clear(); - return result; + op = "PUT"; + type = REQTYPE_PUTHEAD; + + // setopt + curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); + curl_easy_setopt(hCurl, CURLOPT_UPLOAD, true); // HTTP PUT + curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); + curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); + curl_easy_setopt(hCurl, CURLOPT_INFILESIZE, 0); // Content-Length + S3fsCurl::AddUserAgent(hCurl); // put User-Agent + + S3FS_PRN_INFO3("copying... [path=%s]", tpath); + + int result = RequestPerform(); + if(0 == result){ + // PUT returns 200 status code with something error, thus + // we need to check body. + // + // example error body: + // + // + // AccessDenied + // Access Denied + // E4CA6F6767D6685C + // BHzLOATeDuvN8Es1wI8IcERq4kl4dc2A9tOB8Yqr39Ys6fl7N4EJ8sjGiVvu6wLP + // + // + const char* pstrbody = bodydata.str(); + if(!pstrbody || NULL != strcasestr(pstrbody, "")){ + S3FS_PRN_ERR("PutHeadRequest get 200 status response, but it included error body(or NULL). The request failed during copying the object in S3."); + S3FS_PRN_DBG("PutHeadRequest Response Body : %s", (pstrbody ? pstrbody : "(null)")); + result = -EIO; + } + } + bodydata.Clear(); + + return result; } int S3fsCurl::PutRequest(const char* tpath, headers_t& meta, int fd) { - struct stat st; - FILE* file = NULL; + struct stat st; + FILE* file = NULL; - S3FS_PRN_INFO3("[tpath=%s]", SAFESTRPTR(tpath)); + S3FS_PRN_INFO3("[tpath=%s]", SAFESTRPTR(tpath)); - if(!tpath){ - return -1; - } - if(-1 != fd){ - // duplicate fd - int fd2; - if(-1 == (fd2 = dup(fd)) || -1 == fstat(fd2, &st) || 0 != lseek(fd2, 0, SEEK_SET) || NULL == (file = fdopen(fd2, "rb"))){ - S3FS_PRN_ERR("Could not duplicate file descriptor(errno=%d)", errno); - if(-1 != fd2){ - close(fd2); - } - return -errno; + if(!tpath){ + return -1; + } + if(-1 != fd){ + // duplicate fd + int fd2; + if(-1 == (fd2 = dup(fd)) || -1 == fstat(fd2, &st) || 0 != lseek(fd2, 0, SEEK_SET) || NULL == (file = fdopen(fd2, "rb"))){ + S3FS_PRN_ERR("Could not duplicate file descriptor(errno=%d)", errno); + if(-1 != fd2){ + close(fd2); + } + return -errno; + } + b_infile = file; + }else{ + // This case is creating zero byte object.(calling by create_file_object()) + S3FS_PRN_INFO3("create zero byte file object."); } - b_infile = file; - }else{ - // This case is creating zero byte object.(calling by create_file_object()) - S3FS_PRN_INFO3("create zero byte file object."); - } - if(!CreateCurlHandle()){ + if(!CreateCurlHandle()){ + if(file){ + fclose(file); + } + return -1; + } + string resource; + string turl; + MakeUrlResource(get_realpath(tpath).c_str(), resource, turl); + + url = prepare_url(turl.c_str()); + path = get_realpath(tpath); + requestHeaders = NULL; + responseHeaders.clear(); + bodydata.Clear(); + + // Make request headers + string strMD5; + if(-1 != fd && S3fsCurl::is_content_md5){ + strMD5 = s3fs_get_content_md5(fd); + requestHeaders = curl_slist_sort_insert(requestHeaders, "Content-MD5", strMD5.c_str()); + } + + string contype = S3fsCurl::LookupMimeType(string(tpath)); + requestHeaders = curl_slist_sort_insert(requestHeaders, "Content-Type", contype.c_str()); + + for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){ + string key = lower(iter->first); + string value = iter->second; + if(key.substr(0, 9) == "x-amz-acl"){ + // not set value, but after set it. + }else if(key.substr(0, 10) == "x-amz-meta"){ + requestHeaders = curl_slist_sort_insert(requestHeaders, iter->first.c_str(), value.c_str()); + }else if(key == "x-amz-server-side-encryption" && value != "aws:kms"){ + // skip this header, because this header is specified after logic. + }else if(key == "x-amz-server-side-encryption-aws-kms-key-id"){ + // skip this header, because this header is specified after logic. + }else if(key == "x-amz-server-side-encryption-customer-key-md5"){ + // skip this header, because this header is specified after logic. + } + } + // "x-amz-acl", storage class, sse + if(S3fsCurl::default_acl != acl_t::PRIVATE){ + requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-acl", S3fsCurl::default_acl.str()); + } + if(GetStorageClass() != storage_class_t::STANDARD){ + requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-storage-class", GetStorageClass().str()); + } + // SSE + string ssevalue; + if(!AddSseRequestHead(S3fsCurl::GetSseType(), ssevalue, false, false)){ + S3FS_PRN_WARN("Failed to set SSE header, but continue..."); + } + if(is_use_ahbe){ + // set additional header by ahbe conf + requestHeaders = AdditionalHeader::get()->AddHeader(requestHeaders, tpath); + } + + op = "PUT"; + type = REQTYPE_PUT; + + // setopt + curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); + curl_easy_setopt(hCurl, CURLOPT_UPLOAD, true); // HTTP PUT + curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); + curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); if(file){ - fclose(file); + curl_easy_setopt(hCurl, CURLOPT_INFILESIZE_LARGE, static_cast(st.st_size)); // Content-Length + curl_easy_setopt(hCurl, CURLOPT_INFILE, file); + }else{ + curl_easy_setopt(hCurl, CURLOPT_INFILESIZE, 0); // Content-Length: 0 } - return -1; - } - string resource; - string turl; - MakeUrlResource(get_realpath(tpath).c_str(), resource, turl); + S3fsCurl::AddUserAgent(hCurl); // put User-Agent - url = prepare_url(turl.c_str()); - path = get_realpath(tpath); - requestHeaders = NULL; - responseHeaders.clear(); - bodydata.Clear(); + S3FS_PRN_INFO3("uploading... [path=%s][fd=%d][size=%lld]", tpath, fd, static_cast(-1 != fd ? st.st_size : 0)); - // Make request headers - string strMD5; - if(-1 != fd && S3fsCurl::is_content_md5){ - strMD5 = s3fs_get_content_md5(fd); - requestHeaders = curl_slist_sort_insert(requestHeaders, "Content-MD5", strMD5.c_str()); - } - - string contype = S3fsCurl::LookupMimeType(string(tpath)); - requestHeaders = curl_slist_sort_insert(requestHeaders, "Content-Type", contype.c_str()); - - for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){ - string key = lower(iter->first); - string value = iter->second; - if(key.substr(0, 9) == "x-amz-acl"){ - // not set value, but after set it. - }else if(key.substr(0, 10) == "x-amz-meta"){ - requestHeaders = curl_slist_sort_insert(requestHeaders, iter->first.c_str(), value.c_str()); - }else if(key == "x-amz-server-side-encryption" && value != "aws:kms"){ - // skip this header, because this header is specified after logic. - }else if(key == "x-amz-server-side-encryption-aws-kms-key-id"){ - // skip this header, because this header is specified after logic. - }else if(key == "x-amz-server-side-encryption-customer-key-md5"){ - // skip this header, because this header is specified after logic. + int result = RequestPerform(); + bodydata.Clear(); + if(file){ + fclose(file); } - } - // "x-amz-acl", storage class, sse - if(S3fsCurl::default_acl != acl_t::PRIVATE){ - requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-acl", S3fsCurl::default_acl.str()); - } - if(GetStorageClass() != storage_class_t::STANDARD){ - requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-storage-class", GetStorageClass().str()); - } - // SSE - string ssevalue; - if(!AddSseRequestHead(S3fsCurl::GetSseType(), ssevalue, false, false)){ - S3FS_PRN_WARN("Failed to set SSE header, but continue..."); - } - if(is_use_ahbe){ - // set additional header by ahbe conf - requestHeaders = AdditionalHeader::get()->AddHeader(requestHeaders, tpath); - } - - op = "PUT"; - type = REQTYPE_PUT; - - // setopt - curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); - curl_easy_setopt(hCurl, CURLOPT_UPLOAD, true); // HTTP PUT - curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); - curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); - if(file){ - curl_easy_setopt(hCurl, CURLOPT_INFILESIZE_LARGE, static_cast(st.st_size)); // Content-Length - curl_easy_setopt(hCurl, CURLOPT_INFILE, file); - }else{ - curl_easy_setopt(hCurl, CURLOPT_INFILESIZE, 0); // Content-Length: 0 - } - S3fsCurl::AddUserAgent(hCurl); // put User-Agent - - S3FS_PRN_INFO3("uploading... [path=%s][fd=%d][size=%lld]", tpath, fd, static_cast(-1 != fd ? st.st_size : 0)); - - int result = RequestPerform(); - bodydata.Clear(); - if(file){ - fclose(file); - } - - return result; + return result; } int S3fsCurl::PreGetObjectRequest(const char* tpath, int fd, off_t start, ssize_t size, sse_type_t ssetype, string& ssevalue) { - S3FS_PRN_INFO3("[tpath=%s][start=%lld][size=%lld]", SAFESTRPTR(tpath), static_cast(start), static_cast(size)); + S3FS_PRN_INFO3("[tpath=%s][start=%lld][size=%lld]", SAFESTRPTR(tpath), static_cast(start), static_cast(size)); - if(!tpath || -1 == fd || 0 > start || 0 > size){ - return -1; - } + if(!tpath || -1 == fd || 0 > start || 0 > size){ + return -1; + } - string resource; - string turl; - MakeUrlResource(get_realpath(tpath).c_str(), resource, turl); + string resource; + string turl; + MakeUrlResource(get_realpath(tpath).c_str(), resource, turl); - url = prepare_url(turl.c_str()); - path = get_realpath(tpath); - requestHeaders = NULL; - responseHeaders.clear(); + url = prepare_url(turl.c_str()); + path = get_realpath(tpath); + requestHeaders = NULL; + responseHeaders.clear(); - if(-1 != start && 0 < size){ - string range = "bytes="; - range += str(start); - range += "-"; - range += str(start + size - 1); - requestHeaders = curl_slist_sort_insert(requestHeaders, "Range", range.c_str()); - } - // SSE - if(!AddSseRequestHead(ssetype, ssevalue, true, false)){ - S3FS_PRN_WARN("Failed to set SSE header, but continue..."); - } + if(-1 != start && 0 < size){ + string range = "bytes="; + range += str(start); + range += "-"; + range += str(start + size - 1); + requestHeaders = curl_slist_sort_insert(requestHeaders, "Range", range.c_str()); + } + // SSE + if(!AddSseRequestHead(ssetype, ssevalue, true, false)){ + S3FS_PRN_WARN("Failed to set SSE header, but continue..."); + } - op = "GET"; - type = REQTYPE_GET; + op = "GET"; + type = REQTYPE_GET; - // set lazy function - fpLazySetup = PreGetObjectRequestSetCurlOpts; + // set lazy function + fpLazySetup = PreGetObjectRequestSetCurlOpts; - // set info for callback func. - // (use only fd, startpos and size, other member is not used.) - partdata.clear(); - partdata.fd = fd; - partdata.startpos = start; - partdata.size = size; - b_partdata_startpos = start; - b_partdata_size = size; - b_ssetype = ssetype; - b_ssevalue = ssevalue; - b_ssekey_pos = -1; // not use this value for get object. + // set info for callback func. + // (use only fd, startpos and size, other member is not used.) + partdata.clear(); + partdata.fd = fd; + partdata.startpos = start; + partdata.size = size; + b_partdata_startpos = start; + b_partdata_size = size; + b_ssetype = ssetype; + b_ssevalue = ssevalue; + b_ssekey_pos = -1; // not use this value for get object. - return 0; + return 0; } int S3fsCurl::GetObjectRequest(const char* tpath, int fd, off_t start, ssize_t size) { - int result; + int result; - S3FS_PRN_INFO3("[tpath=%s][start=%lld][size=%lld]", SAFESTRPTR(tpath), static_cast(start), static_cast(size)); + S3FS_PRN_INFO3("[tpath=%s][start=%lld][size=%lld]", SAFESTRPTR(tpath), static_cast(start), static_cast(size)); - if(!tpath){ - return -1; - } - sse_type_t ssetype = sse_type_t::SSE_DISABLE; - string ssevalue; - if(!get_object_sse_type(tpath, ssetype, ssevalue)){ - S3FS_PRN_WARN("Failed to get SSE type for file(%s).", SAFESTRPTR(tpath)); - } + if(!tpath){ + return -1; + } + sse_type_t ssetype = sse_type_t::SSE_DISABLE; + string ssevalue; + if(!get_object_sse_type(tpath, ssetype, ssevalue)){ + S3FS_PRN_WARN("Failed to get SSE type for file(%s).", SAFESTRPTR(tpath)); + } + + if(0 != (result = PreGetObjectRequest(tpath, fd, start, size, ssetype, ssevalue))){ + return result; + } + if(!fpLazySetup || !fpLazySetup(this)){ + S3FS_PRN_ERR("Failed to lazy setup in single get object request."); + return -1; + } + + S3FS_PRN_INFO3("downloading... [path=%s][fd=%d]", tpath, fd); + + result = RequestPerform(); + partdata.clear(); - if(0 != (result = PreGetObjectRequest(tpath, fd, start, size, ssetype, ssevalue))){ return result; - } - if(!fpLazySetup || !fpLazySetup(this)){ - S3FS_PRN_ERR("Failed to lazy setup in single get object request."); - return -1; - } - - S3FS_PRN_INFO3("downloading... [path=%s][fd=%d]", tpath, fd); - - result = RequestPerform(); - partdata.clear(); - - return result; } int S3fsCurl::CheckBucket() { - S3FS_PRN_INFO3("check a bucket."); + S3FS_PRN_INFO3("check a bucket."); - if(!CreateCurlHandle()){ - return -1; - } - string resource; - string turl; - MakeUrlResource(get_realpath("/").c_str(), resource, turl); + if(!CreateCurlHandle()){ + return -1; + } + string resource; + string turl; + MakeUrlResource(get_realpath("/").c_str(), resource, turl); - url = prepare_url(turl.c_str()); - path = get_realpath("/"); - requestHeaders = NULL; - responseHeaders.clear(); - bodydata.Clear(); + url = prepare_url(turl.c_str()); + path = get_realpath("/"); + requestHeaders = NULL; + responseHeaders.clear(); + bodydata.Clear(); - op = "GET"; - type = REQTYPE_CHKBUCKET; + op = "GET"; + type = REQTYPE_CHKBUCKET; - // setopt - curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); - curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); - curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); - S3fsCurl::AddUserAgent(hCurl); // put User-Agent + // setopt + curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); + curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); + curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); + S3fsCurl::AddUserAgent(hCurl); // put User-Agent - int result = RequestPerform(); - if (result != 0) { - S3FS_PRN_ERR("Check bucket failed, S3 response: %s", bodydata.str()); - } - return result; + int result = RequestPerform(); + if (result != 0) { + S3FS_PRN_ERR("Check bucket failed, S3 response: %s", bodydata.str()); + } + return result; } int S3fsCurl::ListBucketRequest(const char* tpath, const char* query) { - S3FS_PRN_INFO3("[tpath=%s]", SAFESTRPTR(tpath)); + S3FS_PRN_INFO3("[tpath=%s]", SAFESTRPTR(tpath)); - if(!tpath){ - return -1; - } - if(!CreateCurlHandle()){ - return -1; - } - string resource; - string turl; - MakeUrlResource("", resource, turl); // NOTICE: path is "". - if(query){ - turl += "?"; - turl += query; - query_string = query; - } + if(!tpath){ + return -1; + } + if(!CreateCurlHandle()){ + return -1; + } + string resource; + string turl; + MakeUrlResource("", resource, turl); // NOTICE: path is "". + if(query){ + turl += "?"; + turl += query; + query_string = query; + } - url = prepare_url(turl.c_str()); - path = get_realpath(tpath); - requestHeaders = NULL; - responseHeaders.clear(); - bodydata.Clear(); + url = prepare_url(turl.c_str()); + path = get_realpath(tpath); + requestHeaders = NULL; + responseHeaders.clear(); + bodydata.Clear(); - op = "GET"; - type = REQTYPE_LISTBUCKET; + op = "GET"; + type = REQTYPE_LISTBUCKET; - // setopt - curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); - curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); - curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); - if(S3fsCurl::is_verbose){ - curl_easy_setopt(hCurl, CURLOPT_DEBUGFUNCTION, S3fsCurl::CurlDebugBodyInFunc); // replace debug function - } - S3fsCurl::AddUserAgent(hCurl); // put User-Agent + // setopt + curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); + curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); + curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); + if(S3fsCurl::is_verbose){ + curl_easy_setopt(hCurl, CURLOPT_DEBUGFUNCTION, S3fsCurl::CurlDebugBodyInFunc); // replace debug function + } + S3fsCurl::AddUserAgent(hCurl); // put User-Agent - return RequestPerform(); + return RequestPerform(); } // @@ -3607,246 +3306,246 @@ int S3fsCurl::ListBucketRequest(const char* tpath, const char* query) // int S3fsCurl::PreMultipartPostRequest(const char* tpath, headers_t& meta, string& upload_id, bool is_copy) { - S3FS_PRN_INFO3("[tpath=%s]", SAFESTRPTR(tpath)); + S3FS_PRN_INFO3("[tpath=%s]", SAFESTRPTR(tpath)); - if(!tpath){ - return -1; - } - if(!CreateCurlHandle()){ - return -1; - } - string resource; - string turl; - MakeUrlResource(get_realpath(tpath).c_str(), resource, turl); + if(!tpath){ + return -1; + } + if(!CreateCurlHandle()){ + return -1; + } + string resource; + string turl; + MakeUrlResource(get_realpath(tpath).c_str(), resource, turl); - query_string = "uploads"; - turl += "?" + query_string; - url = prepare_url(turl.c_str()); - path = get_realpath(tpath); - requestHeaders = NULL; - bodydata.Clear(); - responseHeaders.clear(); + query_string = "uploads"; + turl += "?" + query_string; + url = prepare_url(turl.c_str()); + path = get_realpath(tpath); + requestHeaders = NULL; + bodydata.Clear(); + responseHeaders.clear(); - string contype = S3fsCurl::LookupMimeType(string(tpath)); + string contype = S3fsCurl::LookupMimeType(string(tpath)); - for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){ - string key = lower(iter->first); - string value = iter->second; - if(key.substr(0, 9) == "x-amz-acl"){ - // not set value, but after set it. - }else if(key.substr(0, 10) == "x-amz-meta"){ - requestHeaders = curl_slist_sort_insert(requestHeaders, iter->first.c_str(), value.c_str()); - }else if(key == "x-amz-server-side-encryption" && value != "aws:kms"){ - // Only copy mode. - if(is_copy && !AddSseRequestHead(sse_type_t::SSE_S3, value, false, true)){ - S3FS_PRN_WARN("Failed to insert SSE-S3 header."); - } - }else if(key == "x-amz-server-side-encryption-aws-kms-key-id"){ - // Only copy mode. - if(is_copy && !value.empty() && !AddSseRequestHead(sse_type_t::SSE_KMS, value, false, true)){ - S3FS_PRN_WARN("Failed to insert SSE-KMS header."); - } - }else if(key == "x-amz-server-side-encryption-customer-key-md5"){ - // Only copy mode. - if(is_copy){ - if(!AddSseRequestHead(sse_type_t::SSE_C, value, true, true) || !AddSseRequestHead(sse_type_t::SSE_C, value, true, false)){ - S3FS_PRN_WARN("Failed to insert SSE-C header."); + for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){ + string key = lower(iter->first); + string value = iter->second; + if(key.substr(0, 9) == "x-amz-acl"){ + // not set value, but after set it. + }else if(key.substr(0, 10) == "x-amz-meta"){ + requestHeaders = curl_slist_sort_insert(requestHeaders, iter->first.c_str(), value.c_str()); + }else if(key == "x-amz-server-side-encryption" && value != "aws:kms"){ + // Only copy mode. + if(is_copy && !AddSseRequestHead(sse_type_t::SSE_S3, value, false, true)){ + S3FS_PRN_WARN("Failed to insert SSE-S3 header."); + } + }else if(key == "x-amz-server-side-encryption-aws-kms-key-id"){ + // Only copy mode. + if(is_copy && !value.empty() && !AddSseRequestHead(sse_type_t::SSE_KMS, value, false, true)){ + S3FS_PRN_WARN("Failed to insert SSE-KMS header."); + } + }else if(key == "x-amz-server-side-encryption-customer-key-md5"){ + // Only copy mode. + if(is_copy){ + if(!AddSseRequestHead(sse_type_t::SSE_C, value, true, true) || !AddSseRequestHead(sse_type_t::SSE_C, value, true, false)){ + S3FS_PRN_WARN("Failed to insert SSE-C header."); + } + } } - } } - } - // "x-amz-acl", storage class, sse - if(S3fsCurl::default_acl != acl_t::PRIVATE){ - requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-acl", S3fsCurl::default_acl.str()); - } - if(GetStorageClass() != storage_class_t::STANDARD){ - requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-storage-class", GetStorageClass().str()); - } - // SSE - if(!is_copy){ - string ssevalue; - if(!AddSseRequestHead(S3fsCurl::GetSseType(), ssevalue, false, false)){ - S3FS_PRN_WARN("Failed to set SSE header, but continue..."); + // "x-amz-acl", storage class, sse + if(S3fsCurl::default_acl != acl_t::PRIVATE){ + requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-acl", S3fsCurl::default_acl.str()); + } + if(GetStorageClass() != storage_class_t::STANDARD){ + requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-storage-class", GetStorageClass().str()); + } + // SSE + if(!is_copy){ + string ssevalue; + if(!AddSseRequestHead(S3fsCurl::GetSseType(), ssevalue, false, false)){ + S3FS_PRN_WARN("Failed to set SSE header, but continue..."); + } + } + if(is_use_ahbe){ + // set additional header by ahbe conf + requestHeaders = AdditionalHeader::get()->AddHeader(requestHeaders, tpath); } - } - if(is_use_ahbe){ - // set additional header by ahbe conf - requestHeaders = AdditionalHeader::get()->AddHeader(requestHeaders, tpath); - } - requestHeaders = curl_slist_sort_insert(requestHeaders, "Accept", NULL); - requestHeaders = curl_slist_sort_insert(requestHeaders, "Content-Length", NULL); - requestHeaders = curl_slist_sort_insert(requestHeaders, "Content-Type", contype.c_str()); + requestHeaders = curl_slist_sort_insert(requestHeaders, "Accept", NULL); + requestHeaders = curl_slist_sort_insert(requestHeaders, "Content-Length", NULL); + requestHeaders = curl_slist_sort_insert(requestHeaders, "Content-Type", contype.c_str()); - op = "POST"; - type = REQTYPE_PREMULTIPOST; + op = "POST"; + type = REQTYPE_PREMULTIPOST; - // setopt - curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); - curl_easy_setopt(hCurl, CURLOPT_POST, true); // POST - curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); - curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); - curl_easy_setopt(hCurl, CURLOPT_POSTFIELDSIZE, 0); - S3fsCurl::AddUserAgent(hCurl); // put User-Agent + // setopt + curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); + curl_easy_setopt(hCurl, CURLOPT_POST, true); // POST + curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); + curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); + curl_easy_setopt(hCurl, CURLOPT_POSTFIELDSIZE, 0); + S3fsCurl::AddUserAgent(hCurl); // put User-Agent + + // request + int result; + if(0 != (result = RequestPerform())){ + bodydata.Clear(); + return result; + } + + if(!simple_parse_xml(bodydata.str(), bodydata.size(), "UploadId", upload_id)){ + bodydata.Clear(); + return -1; + } - // request - int result; - if(0 != (result = RequestPerform())){ bodydata.Clear(); - return result; - } - - if(!simple_parse_xml(bodydata.str(), bodydata.size(), "UploadId", upload_id)){ - bodydata.Clear(); - return -1; - } - - bodydata.Clear(); - return 0; + return 0; } int S3fsCurl::CompleteMultipartPostRequest(const char* tpath, const string& upload_id, etaglist_t& parts) { - S3FS_PRN_INFO3("[tpath=%s][parts=%zu]", SAFESTRPTR(tpath), parts.size()); + S3FS_PRN_INFO3("[tpath=%s][parts=%zu]", SAFESTRPTR(tpath), parts.size()); - if(!tpath){ - return -1; - } - - // make contents - string postContent; - postContent += "\n"; - for(int cnt = 0; cnt < (int)parts.size(); cnt++){ - if(0 == parts[cnt].length()){ - S3FS_PRN_ERR("%d file part is not finished uploading.", cnt + 1); - return -1; + if(!tpath){ + return -1; } - postContent += "\n"; - postContent += " " + str(cnt + 1) + "\n"; - postContent += " " + parts[cnt] + "\n"; - postContent += "\n"; - } - postContent += "\n"; - // set postdata - postdata = reinterpret_cast(postContent.c_str()); - b_postdata = postdata; - postdata_remaining = postContent.size(); // without null - b_postdata_remaining = postdata_remaining; + // make contents + string postContent; + postContent += "\n"; + for(int cnt = 0; cnt < (int)parts.size(); cnt++){ + if(0 == parts[cnt].length()){ + S3FS_PRN_ERR("%d file part is not finished uploading.", cnt + 1); + return -1; + } + postContent += "\n"; + postContent += " " + str(cnt + 1) + "\n"; + postContent += " " + parts[cnt] + "\n"; + postContent += "\n"; + } + postContent += "\n"; - if(!CreateCurlHandle()){ - return -1; - } - string resource; - string turl; - MakeUrlResource(get_realpath(tpath).c_str(), resource, turl); + // set postdata + postdata = reinterpret_cast(postContent.c_str()); + b_postdata = postdata; + postdata_remaining = postContent.size(); // without null + b_postdata_remaining = postdata_remaining; - query_string = "uploadId=" + upload_id; - turl += "?" + query_string; - url = prepare_url(turl.c_str()); - path = get_realpath(tpath); - requestHeaders = NULL; - bodydata.Clear(); - responseHeaders.clear(); - string contype = "application/xml"; + if(!CreateCurlHandle()){ + return -1; + } + string resource; + string turl; + MakeUrlResource(get_realpath(tpath).c_str(), resource, turl); - requestHeaders = curl_slist_sort_insert(requestHeaders, "Accept", NULL); - requestHeaders = curl_slist_sort_insert(requestHeaders, "Content-Type", contype.c_str()); + query_string = "uploadId=" + upload_id; + turl += "?" + query_string; + url = prepare_url(turl.c_str()); + path = get_realpath(tpath); + requestHeaders = NULL; + bodydata.Clear(); + responseHeaders.clear(); + string contype = "application/xml"; - op = "POST"; - type = REQTYPE_COMPLETEMULTIPOST; + requestHeaders = curl_slist_sort_insert(requestHeaders, "Accept", NULL); + requestHeaders = curl_slist_sort_insert(requestHeaders, "Content-Type", contype.c_str()); - // setopt - curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); - curl_easy_setopt(hCurl, CURLOPT_POST, true); // POST - curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); - curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); - curl_easy_setopt(hCurl, CURLOPT_POSTFIELDSIZE, static_cast(postdata_remaining)); - curl_easy_setopt(hCurl, CURLOPT_READDATA, (void*)this); - curl_easy_setopt(hCurl, CURLOPT_READFUNCTION, S3fsCurl::ReadCallback); - if(S3fsCurl::is_verbose){ - curl_easy_setopt(hCurl, CURLOPT_DEBUGFUNCTION, S3fsCurl::CurlDebugBodyOutFunc); // replace debug function - } - S3fsCurl::AddUserAgent(hCurl); // put User-Agent + op = "POST"; + type = REQTYPE_COMPLETEMULTIPOST; - // request - int result = RequestPerform(); - bodydata.Clear(); - postdata = NULL; + // setopt + curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); + curl_easy_setopt(hCurl, CURLOPT_POST, true); // POST + curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); + curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); + curl_easy_setopt(hCurl, CURLOPT_POSTFIELDSIZE, static_cast(postdata_remaining)); + curl_easy_setopt(hCurl, CURLOPT_READDATA, (void*)this); + curl_easy_setopt(hCurl, CURLOPT_READFUNCTION, S3fsCurl::ReadCallback); + if(S3fsCurl::is_verbose){ + curl_easy_setopt(hCurl, CURLOPT_DEBUGFUNCTION, S3fsCurl::CurlDebugBodyOutFunc); // replace debug function + } + S3fsCurl::AddUserAgent(hCurl); // put User-Agent - return result; + // request + int result = RequestPerform(); + bodydata.Clear(); + postdata = NULL; + + return result; } int S3fsCurl::MultipartListRequest(string& body) { - S3FS_PRN_INFO3("list request(multipart)"); + S3FS_PRN_INFO3("list request(multipart)"); - if(!CreateCurlHandle()){ - return -1; - } - string resource; - string turl; - path = get_realpath("/"); - MakeUrlResource(path.c_str(), resource, turl); + if(!CreateCurlHandle()){ + return -1; + } + string resource; + string turl; + path = get_realpath("/"); + MakeUrlResource(path.c_str(), resource, turl); - query_string = "uploads"; - turl += "?" + query_string; - url = prepare_url(turl.c_str()); - requestHeaders = NULL; - responseHeaders.clear(); - bodydata.Clear(); + query_string = "uploads"; + turl += "?" + query_string; + url = prepare_url(turl.c_str()); + requestHeaders = NULL; + responseHeaders.clear(); + bodydata.Clear(); - requestHeaders = curl_slist_sort_insert(requestHeaders, "Accept", NULL); + requestHeaders = curl_slist_sort_insert(requestHeaders, "Accept", NULL); - op = "GET"; - type = REQTYPE_MULTILIST; + op = "GET"; + type = REQTYPE_MULTILIST; - // setopt - curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); - curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); - curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); - S3fsCurl::AddUserAgent(hCurl); // put User-Agent + // setopt + curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); + curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata); + curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); + S3fsCurl::AddUserAgent(hCurl); // put User-Agent - int result; - if(0 == (result = RequestPerform()) && 0 < bodydata.size()){ - body = bodydata.str(); - }else{ - body = ""; - } - bodydata.Clear(); + int result; + if(0 == (result = RequestPerform()) && 0 < bodydata.size()){ + body = bodydata.str(); + }else{ + body = ""; + } + bodydata.Clear(); - return result; + return result; } int S3fsCurl::AbortMultipartUpload(const char* tpath, const string& upload_id) { - S3FS_PRN_INFO3("[tpath=%s]", SAFESTRPTR(tpath)); + S3FS_PRN_INFO3("[tpath=%s]", SAFESTRPTR(tpath)); - if(!tpath){ - return -1; - } - if(!CreateCurlHandle()){ - return -1; - } - string resource; - string turl; - MakeUrlResource(get_realpath(tpath).c_str(), resource, turl); + if(!tpath){ + return -1; + } + if(!CreateCurlHandle()){ + return -1; + } + string resource; + string turl; + MakeUrlResource(get_realpath(tpath).c_str(), resource, turl); - query_string = "uploadId=" + upload_id; - turl += "?" + query_string; - url = prepare_url(turl.c_str()); - path = get_realpath(tpath); - requestHeaders = NULL; - responseHeaders.clear(); + query_string = "uploadId=" + upload_id; + turl += "?" + query_string; + url = prepare_url(turl.c_str()); + path = get_realpath(tpath); + requestHeaders = NULL; + responseHeaders.clear(); - op = "DELETE"; - type = REQTYPE_ABORTMULTIUPLOAD; + op = "DELETE"; + type = REQTYPE_ABORTMULTIUPLOAD; - curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); - curl_easy_setopt(hCurl, CURLOPT_CUSTOMREQUEST, "DELETE"); - S3fsCurl::AddUserAgent(hCurl); // put User-Agent + curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str()); + curl_easy_setopt(hCurl, CURLOPT_CUSTOMREQUEST, "DELETE"); + S3fsCurl::AddUserAgent(hCurl); // put User-Agent - return RequestPerform(); + return RequestPerform(); } // @@ -3863,1007 +3562,446 @@ int S3fsCurl::AbortMultipartUpload(const char* tpath, const string& upload_id) // Content-MD5: pUNXr/BjKK5G2UKvaRRrOA== // Authorization: AWS VGhpcyBtZXNzYWdlIHNpZ25lZGGieSRlbHZpbmc= // - int S3fsCurl::UploadMultipartPostSetup(const char* tpath, int part_num, const string& upload_id) { - S3FS_PRN_INFO3("[tpath=%s][start=%lld][size=%lld][part=%d]", SAFESTRPTR(tpath), static_cast(partdata.startpos), static_cast(partdata.size), part_num); + S3FS_PRN_INFO3("[tpath=%s][start=%lld][size=%lld][part=%d]", SAFESTRPTR(tpath), static_cast(partdata.startpos), static_cast(partdata.size), part_num); - if(-1 == partdata.fd || -1 == partdata.startpos || -1 == partdata.size){ - return -1; - } - - requestHeaders = NULL; - - // make md5 and file pointer - if(S3fsCurl::is_content_md5){ - unsigned char *md5raw = s3fs_md5hexsum(partdata.fd, partdata.startpos, partdata.size); - if(md5raw == NULL){ - S3FS_PRN_ERR("Could not make md5 for file(part %d)", part_num); - return -1; + if(-1 == partdata.fd || -1 == partdata.startpos || -1 == partdata.size){ + return -1; } - partdata.etag = s3fs_hex(md5raw, get_md5_digest_length()); - char* md5base64p = s3fs_base64(md5raw, get_md5_digest_length()); - requestHeaders = curl_slist_sort_insert(requestHeaders, "Content-MD5", md5base64p); - delete[] md5base64p; - delete[] md5raw; - } - // make request - query_string = "partNumber=" + str(part_num) + "&uploadId=" + upload_id; - string urlargs = "?" + query_string; - string resource; - string turl; - MakeUrlResource(get_realpath(tpath).c_str(), resource, turl); + requestHeaders = NULL; - turl += urlargs; - url = prepare_url(turl.c_str()); - path = get_realpath(tpath); - bodydata.Clear(); - headdata.Clear(); - responseHeaders.clear(); - - // SSE - if(sse_type_t::SSE_C == S3fsCurl::GetSseType()){ - string ssevalue; - if(!AddSseRequestHead(S3fsCurl::GetSseType(), ssevalue, false, false)){ - S3FS_PRN_WARN("Failed to set SSE header, but continue..."); + // make md5 and file pointer + if(S3fsCurl::is_content_md5){ + unsigned char *md5raw = s3fs_md5hexsum(partdata.fd, partdata.startpos, partdata.size); + if(md5raw == NULL){ + S3FS_PRN_ERR("Could not make md5 for file(part %d)", part_num); + return -1; + } + partdata.etag = s3fs_hex(md5raw, get_md5_digest_length()); + char* md5base64p = s3fs_base64(md5raw, get_md5_digest_length()); + requestHeaders = curl_slist_sort_insert(requestHeaders, "Content-MD5", md5base64p); + delete[] md5base64p; + delete[] md5raw; } - } - requestHeaders = curl_slist_sort_insert(requestHeaders, "Accept", NULL); + // make request + query_string = "partNumber=" + str(part_num) + "&uploadId=" + upload_id; + string urlargs = "?" + query_string; + string resource; + string turl; + MakeUrlResource(get_realpath(tpath).c_str(), resource, turl); - op = "PUT"; - type = REQTYPE_UPLOADMULTIPOST; + turl += urlargs; + url = prepare_url(turl.c_str()); + path = get_realpath(tpath); + bodydata.Clear(); + headdata.Clear(); + responseHeaders.clear(); - // set lazy function - fpLazySetup = UploadMultipartPostSetCurlOpts; + // SSE + if(sse_type_t::SSE_C == S3fsCurl::GetSseType()){ + string ssevalue; + if(!AddSseRequestHead(S3fsCurl::GetSseType(), ssevalue, false, false)){ + S3FS_PRN_WARN("Failed to set SSE header, but continue..."); + } + } - return 0; + requestHeaders = curl_slist_sort_insert(requestHeaders, "Accept", NULL); + + op = "PUT"; + type = REQTYPE_UPLOADMULTIPOST; + + // set lazy function + fpLazySetup = UploadMultipartPostSetCurlOpts; + + return 0; } int S3fsCurl::UploadMultipartPostRequest(const char* tpath, int part_num, const string& upload_id) { - int result; + int result; - S3FS_PRN_INFO3("[tpath=%s][start=%lld][size=%lld][part=%d]", SAFESTRPTR(tpath), static_cast(partdata.startpos), static_cast(partdata.size), part_num); + S3FS_PRN_INFO3("[tpath=%s][start=%lld][size=%lld][part=%d]", SAFESTRPTR(tpath), static_cast(partdata.startpos), static_cast(partdata.size), part_num); + + // setup + if(0 != (result = S3fsCurl::UploadMultipartPostSetup(tpath, part_num, upload_id))){ + return result; + } + + if(!fpLazySetup || !fpLazySetup(this)){ + S3FS_PRN_ERR("Failed to lazy setup in multipart upload post request."); + return -1; + } + + // request + if(0 == (result = RequestPerform())){ + // UploadMultipartPostComplete returns true on success -> convert to 0 + result = !UploadMultipartPostComplete(); + } + + // closing + bodydata.Clear(); + headdata.Clear(); - // setup - if(0 != (result = S3fsCurl::UploadMultipartPostSetup(tpath, part_num, upload_id))){ return result; - } - - if(!fpLazySetup || !fpLazySetup(this)){ - S3FS_PRN_ERR("Failed to lazy setup in multipart upload post request."); - return -1; - } - - // request - if(0 == (result = RequestPerform())){ - // UploadMultipartPostComplete returns true on success -> convert to 0 - result = !UploadMultipartPostComplete(); - } - - // closing - bodydata.Clear(); - headdata.Clear(); - - return result; } int S3fsCurl::CopyMultipartPostSetup(const char* from, const char* to, int part_num, const string& upload_id, headers_t& meta) { - S3FS_PRN_INFO3("[from=%s][to=%s][part=%d]", SAFESTRPTR(from), SAFESTRPTR(to), part_num); + S3FS_PRN_INFO3("[from=%s][to=%s][part=%d]", SAFESTRPTR(from), SAFESTRPTR(to), part_num); - if(!from || !to){ - return -1; - } - query_string = "partNumber=" + str(part_num) + "&uploadId=" + upload_id; - string urlargs = "?" + query_string; - string resource; - string turl; - MakeUrlResource(get_realpath(to).c_str(), resource, turl); - - turl += urlargs; - url = prepare_url(turl.c_str()); - path = get_realpath(to); - requestHeaders = NULL; - responseHeaders.clear(); - bodydata.Clear(); - headdata.Clear(); - - string contype = S3fsCurl::LookupMimeType(string(to)); - requestHeaders = curl_slist_sort_insert(requestHeaders, "Content-Type", contype.c_str()); - - // Make request headers - for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){ - string key = lower(iter->first); - string value = iter->second; - if(key == "x-amz-copy-source"){ - requestHeaders = curl_slist_sort_insert(requestHeaders, iter->first.c_str(), value.c_str()); - }else if(key == "x-amz-copy-source-range"){ - requestHeaders = curl_slist_sort_insert(requestHeaders, iter->first.c_str(), value.c_str()); + if(!from || !to){ + return -1; } - // NOTICE: x-amz-acl, x-amz-server-side-encryption is not set! - } + query_string = "partNumber=" + str(part_num) + "&uploadId=" + upload_id; + string urlargs = "?" + query_string; + string resource; + string turl; + MakeUrlResource(get_realpath(to).c_str(), resource, turl); - op = "PUT"; - type = REQTYPE_COPYMULTIPOST; + turl += urlargs; + url = prepare_url(turl.c_str()); + path = get_realpath(to); + requestHeaders = NULL; + responseHeaders.clear(); + bodydata.Clear(); + headdata.Clear(); - // set lazy function - fpLazySetup = CopyMultipartPostSetCurlOpts; + string contype = S3fsCurl::LookupMimeType(string(to)); + requestHeaders = curl_slist_sort_insert(requestHeaders, "Content-Type", contype.c_str()); - // request - S3FS_PRN_INFO3("copying... [from=%s][to=%s][part=%d]", from, to, part_num); + // Make request headers + for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){ + string key = lower(iter->first); + string value = iter->second; + if(key == "x-amz-copy-source"){ + requestHeaders = curl_slist_sort_insert(requestHeaders, iter->first.c_str(), value.c_str()); + }else if(key == "x-amz-copy-source-range"){ + requestHeaders = curl_slist_sort_insert(requestHeaders, iter->first.c_str(), value.c_str()); + } + // NOTICE: x-amz-acl, x-amz-server-side-encryption is not set! + } - return 0; + op = "PUT"; + type = REQTYPE_COPYMULTIPOST; + + // set lazy function + fpLazySetup = CopyMultipartPostSetCurlOpts; + + // request + S3FS_PRN_INFO3("copying... [from=%s][to=%s][part=%d]", from, to, part_num); + + return 0; } bool S3fsCurl::UploadMultipartPostComplete() { - headers_t::iterator it = responseHeaders.find("ETag"); - if (it == responseHeaders.end()) { - return false; - } - - // check etag(md5); - // - // The ETAG when using SSE_C and SSE_KMS does not reflect the MD5 we sent - // SSE_C: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html - // SSE_KMS is ignored in the above, but in the following it states the same in the highlights: - // https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html - // - if(S3fsCurl::is_content_md5 && sse_type_t::SSE_C != S3fsCurl::GetSseType() && sse_type_t::SSE_KMS != S3fsCurl::GetSseType()){ - if(!etag_equals(it->second, partdata.etag)){ - return false; + headers_t::iterator it = responseHeaders.find("ETag"); + if (it == responseHeaders.end()) { + return false; } - } - partdata.etaglist->at(partdata.etagpos).assign(it->second); - partdata.uploaded = true; - return true; + // check etag(md5); + // + // The ETAG when using SSE_C and SSE_KMS does not reflect the MD5 we sent + // SSE_C: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html + // SSE_KMS is ignored in the above, but in the following it states the same in the highlights: + // https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html + // + if(S3fsCurl::is_content_md5 && sse_type_t::SSE_C != S3fsCurl::GetSseType() && sse_type_t::SSE_KMS != S3fsCurl::GetSseType()){ + if(!etag_equals(it->second, partdata.etag)){ + return false; + } + } + partdata.etaglist->at(partdata.etagpos).assign(it->second); + partdata.uploaded = true; + + return true; } bool S3fsCurl::CopyMultipartPostCallback(S3fsCurl* s3fscurl) { - if(!s3fscurl){ - return false; - } + if(!s3fscurl){ + return false; + } - return s3fscurl->CopyMultipartPostComplete(); + return s3fscurl->CopyMultipartPostComplete(); } bool S3fsCurl::CopyMultipartPostComplete() { - std::string etag; - partdata.uploaded = simple_parse_xml(bodydata.str(), bodydata.size(), "ETag", etag); - if(etag.size() >= 2 && *etag.begin() == '"' && *etag.rbegin() == '"'){ - etag.assign(etag.substr(1, etag.size() - 2)); - } - partdata.etaglist->at(partdata.etagpos).assign(etag); + std::string etag; + partdata.uploaded = simple_parse_xml(bodydata.str(), bodydata.size(), "ETag", etag); + if(etag.size() >= 2 && *etag.begin() == '"' && *etag.rbegin() == '"'){ + etag.assign(etag.substr(1, etag.size() - 2)); + } + partdata.etaglist->at(partdata.etagpos).assign(etag); - bodydata.Clear(); - headdata.Clear(); + bodydata.Clear(); + headdata.Clear(); - return true; + return true; } bool S3fsCurl::MixMultipartPostComplete() { - bool result; - if(-1 == partdata.fd){ - result = CopyMultipartPostComplete(); - }else{ - result = UploadMultipartPostComplete(); - } - return result; + bool result; + if(-1 == partdata.fd){ + result = CopyMultipartPostComplete(); + }else{ + result = UploadMultipartPostComplete(); + } + return result; } int S3fsCurl::MultipartHeadRequest(const char* tpath, off_t size, headers_t& meta, bool is_copy) { - int result; - string upload_id; - off_t chunk; - off_t bytes_remaining; - etaglist_t list; - ostringstream strrange; + int result; + string upload_id; + off_t chunk; + off_t bytes_remaining; + etaglist_t list; + ostringstream strrange; - S3FS_PRN_INFO3("[tpath=%s]", SAFESTRPTR(tpath)); + S3FS_PRN_INFO3("[tpath=%s]", SAFESTRPTR(tpath)); - if(0 != (result = PreMultipartPostRequest(tpath, meta, upload_id, is_copy))){ - return result; - } - DestroyCurlHandle(); + if(0 != (result = PreMultipartPostRequest(tpath, meta, upload_id, is_copy))){ + return result; + } + DestroyCurlHandle(); - // Initialize S3fsMultiCurl - S3fsMultiCurl curlmulti(GetMaxParallelCount()); - curlmulti.SetSuccessCallback(S3fsCurl::CopyMultipartPostCallback); - curlmulti.SetRetryCallback(S3fsCurl::CopyMultipartPostRetryCallback); + // Initialize S3fsMultiCurl + S3fsMultiCurl curlmulti(GetMaxParallelCount()); + curlmulti.SetSuccessCallback(S3fsCurl::CopyMultipartPostCallback); + curlmulti.SetRetryCallback(S3fsCurl::CopyMultipartPostRetryCallback); - for(bytes_remaining = size, chunk = 0; 0 < bytes_remaining; bytes_remaining -= chunk){ - chunk = bytes_remaining > MAX_MULTI_COPY_SOURCE_SIZE ? MAX_MULTI_COPY_SOURCE_SIZE : bytes_remaining; + for(bytes_remaining = size, chunk = 0; 0 < bytes_remaining; bytes_remaining -= chunk){ + chunk = bytes_remaining > MAX_MULTI_COPY_SOURCE_SIZE ? MAX_MULTI_COPY_SOURCE_SIZE : bytes_remaining; - strrange << "bytes=" << (size - bytes_remaining) << "-" << (size - bytes_remaining + chunk - 1); - meta["x-amz-copy-source-range"] = strrange.str(); - strrange.str(""); - strrange.clear(stringstream::goodbit); + strrange << "bytes=" << (size - bytes_remaining) << "-" << (size - bytes_remaining + chunk - 1); + meta["x-amz-copy-source-range"] = strrange.str(); + strrange.str(""); + strrange.clear(stringstream::goodbit); - // s3fscurl sub object - S3fsCurl* s3fscurl_para = new S3fsCurl(true); - s3fscurl_para->b_from = SAFESTRPTR(tpath); - s3fscurl_para->b_meta = meta; - s3fscurl_para->partdata.add_etag_list(&list); + // s3fscurl sub object + S3fsCurl* s3fscurl_para = new S3fsCurl(true); + s3fscurl_para->b_from = SAFESTRPTR(tpath); + s3fscurl_para->b_meta = meta; + s3fscurl_para->partdata.add_etag_list(&list); - // initiate upload part for parallel - if(0 != (result = s3fscurl_para->CopyMultipartPostSetup(tpath, tpath, list.size(), upload_id, meta))){ - S3FS_PRN_ERR("failed uploading part setup(%d)", result); - delete s3fscurl_para; - return result; + // initiate upload part for parallel + if(0 != (result = s3fscurl_para->CopyMultipartPostSetup(tpath, tpath, list.size(), upload_id, meta))){ + S3FS_PRN_ERR("failed uploading part setup(%d)", result); + delete s3fscurl_para; + return result; + } + + // set into parallel object + if(!curlmulti.SetS3fsCurlObject(s3fscurl_para)){ + S3FS_PRN_ERR("Could not make curl object into multi curl(%s).", tpath); + delete s3fscurl_para; + return -1; + } } - // set into parallel object - if(!curlmulti.SetS3fsCurlObject(s3fscurl_para)){ - S3FS_PRN_ERR("Could not make curl object into multi curl(%s).", tpath); - delete s3fscurl_para; - return -1; - } - } + // Multi request + if(0 != (result = curlmulti.Request())){ + S3FS_PRN_ERR("error occurred in multi request(errno=%d).", result); - // Multi request - if(0 != (result = curlmulti.Request())){ - S3FS_PRN_ERR("error occurred in multi request(errno=%d).", result); - - S3fsCurl s3fscurl_abort(true); - int result2 = s3fscurl_abort.AbortMultipartUpload(tpath, upload_id); - s3fscurl_abort.DestroyCurlHandle(); - if(result2 != 0){ - S3FS_PRN_ERR("error aborting multipart upload(errno=%d).", result2); + S3fsCurl s3fscurl_abort(true); + int result2 = s3fscurl_abort.AbortMultipartUpload(tpath, upload_id); + s3fscurl_abort.DestroyCurlHandle(); + if(result2 != 0){ + S3FS_PRN_ERR("error aborting multipart upload(errno=%d).", result2); + } + return result; } - return result; - } - - if(0 != (result = CompleteMultipartPostRequest(tpath, upload_id, list))){ - return result; - } - return 0; + if(0 != (result = CompleteMultipartPostRequest(tpath, upload_id, list))){ + return result; + } + return 0; } int S3fsCurl::MultipartUploadRequest(const char* tpath, headers_t& meta, int fd, bool is_copy) { - int result; - string upload_id; - struct stat st; - int fd2; - etaglist_t list; - off_t remaining_bytes; - off_t chunk; + int result; + string upload_id; + struct stat st; + int fd2; + etaglist_t list; + off_t remaining_bytes; + off_t chunk; - S3FS_PRN_INFO3("[tpath=%s][fd=%d]", SAFESTRPTR(tpath), fd); + S3FS_PRN_INFO3("[tpath=%s][fd=%d]", SAFESTRPTR(tpath), fd); - // duplicate fd - if(-1 == (fd2 = dup(fd)) || 0 != lseek(fd2, 0, SEEK_SET)){ - S3FS_PRN_ERR("Could not duplicate file descriptor(errno=%d)", errno); - if(-1 != fd2){ - close(fd2); + // duplicate fd + if(-1 == (fd2 = dup(fd)) || 0 != lseek(fd2, 0, SEEK_SET)){ + S3FS_PRN_ERR("Could not duplicate file descriptor(errno=%d)", errno); + if(-1 != fd2){ + close(fd2); + } + return -errno; + } + if(-1 == fstat(fd2, &st)){ + S3FS_PRN_ERR("Invalid file descriptor(errno=%d)", errno); + close(fd2); + return -errno; } - return -errno; - } - if(-1 == fstat(fd2, &st)){ - S3FS_PRN_ERR("Invalid file descriptor(errno=%d)", errno); - close(fd2); - return -errno; - } - if(0 != (result = PreMultipartPostRequest(tpath, meta, upload_id, is_copy))){ - close(fd2); - return result; - } - DestroyCurlHandle(); + if(0 != (result = PreMultipartPostRequest(tpath, meta, upload_id, is_copy))){ + close(fd2); + return result; + } + DestroyCurlHandle(); - // cycle through open fd, pulling off 10MB chunks at a time - for(remaining_bytes = st.st_size; 0 < remaining_bytes; remaining_bytes -= chunk){ - // chunk size - chunk = remaining_bytes > S3fsCurl::multipart_size ? S3fsCurl::multipart_size : remaining_bytes; + // cycle through open fd, pulling off 10MB chunks at a time + for(remaining_bytes = st.st_size; 0 < remaining_bytes; remaining_bytes -= chunk){ + // chunk size + chunk = remaining_bytes > S3fsCurl::multipart_size ? S3fsCurl::multipart_size : remaining_bytes; + + // set + partdata.fd = fd2; + partdata.startpos = st.st_size - remaining_bytes; + partdata.size = chunk; + b_partdata_startpos = partdata.startpos; + b_partdata_size = partdata.size; + partdata.add_etag_list(&list); + + // upload part + if(0 != (result = UploadMultipartPostRequest(tpath, list.size(), upload_id))){ + S3FS_PRN_ERR("failed uploading part(%d)", result); + close(fd2); + return result; + } + DestroyCurlHandle(); + } + close(fd2); + + if(0 != (result = CompleteMultipartPostRequest(tpath, upload_id, list))){ + return result; + } + return 0; +} + +int S3fsCurl::MultipartUploadRequest(const string& upload_id, const char* tpath, int fd, off_t offset, off_t size, etaglist_t& list) +{ + S3FS_PRN_INFO3("[upload_id=%s][tpath=%s][fd=%d][offset=%lld][size=%lld]", upload_id.c_str(), SAFESTRPTR(tpath), fd, static_cast(offset), static_cast(size)); + + // duplicate fd + int fd2; + if(-1 == (fd2 = dup(fd)) || 0 != lseek(fd2, 0, SEEK_SET)){ + S3FS_PRN_ERR("Could not duplicate file descriptor(errno=%d)", errno); + if(-1 != fd2){ + close(fd2); + } + return -errno; + } // set partdata.fd = fd2; - partdata.startpos = st.st_size - remaining_bytes; - partdata.size = chunk; + partdata.startpos = offset; + partdata.size = size; b_partdata_startpos = partdata.startpos; b_partdata_size = partdata.size; partdata.add_etag_list(&list); // upload part + int result; if(0 != (result = UploadMultipartPostRequest(tpath, list.size(), upload_id))){ - S3FS_PRN_ERR("failed uploading part(%d)", result); - close(fd2); - return result; + S3FS_PRN_ERR("failed uploading part(%d)", result); + close(fd2); + return result; } DestroyCurlHandle(); - } - close(fd2); - - if(0 != (result = CompleteMultipartPostRequest(tpath, upload_id, list))){ - return result; - } - return 0; -} - -int S3fsCurl::MultipartUploadRequest(const string& upload_id, const char* tpath, int fd, off_t offset, off_t size, etaglist_t& list) -{ - S3FS_PRN_INFO3("[upload_id=%s][tpath=%s][fd=%d][offset=%lld][size=%lld]", upload_id.c_str(), SAFESTRPTR(tpath), fd, static_cast(offset), static_cast(size)); - - // duplicate fd - int fd2; - if(-1 == (fd2 = dup(fd)) || 0 != lseek(fd2, 0, SEEK_SET)){ - S3FS_PRN_ERR("Could not duplicate file descriptor(errno=%d)", errno); - if(-1 != fd2){ - close(fd2); - } - return -errno; - } - - // set - partdata.fd = fd2; - partdata.startpos = offset; - partdata.size = size; - b_partdata_startpos = partdata.startpos; - b_partdata_size = partdata.size; - partdata.add_etag_list(&list); - - // upload part - int result; - if(0 != (result = UploadMultipartPostRequest(tpath, list.size(), upload_id))){ - S3FS_PRN_ERR("failed uploading part(%d)", result); close(fd2); - return result; - } - DestroyCurlHandle(); - close(fd2); - return 0; + return 0; } int S3fsCurl::MultipartRenameRequest(const char* from, const char* to, headers_t& meta, off_t size) { - int result; - string upload_id; - off_t chunk; - off_t bytes_remaining; - etaglist_t list; - ostringstream strrange; + int result; + string upload_id; + off_t chunk; + off_t bytes_remaining; + etaglist_t list; + ostringstream strrange; - S3FS_PRN_INFO3("[from=%s][to=%s]", SAFESTRPTR(from), SAFESTRPTR(to)); + S3FS_PRN_INFO3("[from=%s][to=%s]", SAFESTRPTR(from), SAFESTRPTR(to)); - string srcresource; - string srcurl; - MakeUrlResource(get_realpath(from).c_str(), srcresource, srcurl); + string srcresource; + string srcurl; + MakeUrlResource(get_realpath(from).c_str(), srcresource, srcurl); - meta["Content-Type"] = S3fsCurl::LookupMimeType(string(to)); - meta["x-amz-copy-source"] = srcresource; + meta["Content-Type"] = S3fsCurl::LookupMimeType(string(to)); + meta["x-amz-copy-source"] = srcresource; - if(0 != (result = PreMultipartPostRequest(to, meta, upload_id, true))){ - return result; - } - DestroyCurlHandle(); - - // Initialize S3fsMultiCurl - S3fsMultiCurl curlmulti(GetMaxParallelCount()); - curlmulti.SetSuccessCallback(S3fsCurl::CopyMultipartPostCallback); - curlmulti.SetRetryCallback(S3fsCurl::CopyMultipartPostRetryCallback); - - for(bytes_remaining = size, chunk = 0; 0 < bytes_remaining; bytes_remaining -= chunk){ - chunk = bytes_remaining > MAX_MULTI_COPY_SOURCE_SIZE ? MAX_MULTI_COPY_SOURCE_SIZE : bytes_remaining; - - strrange << "bytes=" << (size - bytes_remaining) << "-" << (size - bytes_remaining + chunk - 1); - meta["x-amz-copy-source-range"] = strrange.str(); - strrange.str(""); - strrange.clear(stringstream::goodbit); - - // s3fscurl sub object - S3fsCurl* s3fscurl_para = new S3fsCurl(true); - s3fscurl_para->b_from = SAFESTRPTR(from); - s3fscurl_para->b_meta = meta; - s3fscurl_para->partdata.add_etag_list(&list); - - // initiate upload part for parallel - if(0 != (result = s3fscurl_para->CopyMultipartPostSetup(from, to, list.size(), upload_id, meta))){ - S3FS_PRN_ERR("failed uploading part setup(%d)", result); - delete s3fscurl_para; - return result; + if(0 != (result = PreMultipartPostRequest(to, meta, upload_id, true))){ + return result; } + DestroyCurlHandle(); - // set into parallel object - if(!curlmulti.SetS3fsCurlObject(s3fscurl_para)){ - S3FS_PRN_ERR("Could not make curl object into multi curl(%s).", to); - delete s3fscurl_para; - return -1; - } - } + // Initialize S3fsMultiCurl + S3fsMultiCurl curlmulti(GetMaxParallelCount()); + curlmulti.SetSuccessCallback(S3fsCurl::CopyMultipartPostCallback); + curlmulti.SetRetryCallback(S3fsCurl::CopyMultipartPostRetryCallback); - // Multi request - if(0 != (result = curlmulti.Request())){ - S3FS_PRN_ERR("error occurred in multi request(errno=%d).", result); + for(bytes_remaining = size, chunk = 0; 0 < bytes_remaining; bytes_remaining -= chunk){ + chunk = bytes_remaining > MAX_MULTI_COPY_SOURCE_SIZE ? MAX_MULTI_COPY_SOURCE_SIZE : bytes_remaining; - S3fsCurl s3fscurl_abort(true); - int result2 = s3fscurl_abort.AbortMultipartUpload(to, upload_id); - s3fscurl_abort.DestroyCurlHandle(); - if(result2 != 0){ - S3FS_PRN_ERR("error aborting multipart upload(errno=%d).", result2); - } + strrange << "bytes=" << (size - bytes_remaining) << "-" << (size - bytes_remaining + chunk - 1); + meta["x-amz-copy-source-range"] = strrange.str(); + strrange.str(""); + strrange.clear(stringstream::goodbit); - return result; - } + // s3fscurl sub object + S3fsCurl* s3fscurl_para = new S3fsCurl(true); + s3fscurl_para->b_from = SAFESTRPTR(from); + s3fscurl_para->b_meta = meta; + s3fscurl_para->partdata.add_etag_list(&list); - if(0 != (result = CompleteMultipartPostRequest(to, upload_id, list))){ - return result; - } - return 0; -} - -//------------------------------------------------------------------- -// method for S3fsMultiCurl -//------------------------------------------------------------------- -S3fsMultiCurl::S3fsMultiCurl(int maxParallelism) - : maxParallelism(maxParallelism) - , SuccessCallback(NULL) - , RetryCallback(NULL) -{ - int res; - pthread_mutexattr_t attr; - pthread_mutexattr_init(&attr); -#if S3FS_PTHREAD_ERRORCHECK - pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK); -#endif - if (0 != (res = pthread_mutex_init(&completed_tids_lock, &attr))) { - S3FS_PRN_ERR("could not initialize completed_tids_lock: %i", res); - } -} - -S3fsMultiCurl::~S3fsMultiCurl() -{ - Clear(); - int res; - if(0 != (res = pthread_mutex_destroy(&completed_tids_lock))){ - S3FS_PRN_ERR("could not destroy completed_tids_lock: %i", res); - } -} - -bool S3fsMultiCurl::ClearEx(bool is_all) -{ - s3fscurllist_t::iterator iter; - for(iter = clist_req.begin(); iter != clist_req.end(); ++iter){ - S3fsCurl* s3fscurl = *iter; - if(s3fscurl){ - s3fscurl->DestroyCurlHandle(); - delete s3fscurl; // with destroy curl handle. - } - } - clist_req.clear(); - - if(is_all){ - for(iter = clist_all.begin(); iter != clist_all.end(); ++iter){ - S3fsCurl* s3fscurl = *iter; - s3fscurl->DestroyCurlHandle(); - delete s3fscurl; - } - clist_all.clear(); - } - - S3FS_MALLOCTRIM(0); - - return true; -} - -S3fsMultiSuccessCallback S3fsMultiCurl::SetSuccessCallback(S3fsMultiSuccessCallback function) -{ - S3fsMultiSuccessCallback old = SuccessCallback; - SuccessCallback = function; - return old; -} - -S3fsMultiRetryCallback S3fsMultiCurl::SetRetryCallback(S3fsMultiRetryCallback function) -{ - S3fsMultiRetryCallback old = RetryCallback; - RetryCallback = function; - return old; -} - -bool S3fsMultiCurl::SetS3fsCurlObject(S3fsCurl* s3fscurl) -{ - if(!s3fscurl){ - return false; - } - clist_all.push_back(s3fscurl); - - return true; -} - -int S3fsMultiCurl::MultiPerform() -{ - std::vector threads; - bool success = true; - bool isMultiHead = false; - Semaphore sem(GetMaxParallelism()); - int rc; - - for(s3fscurllist_t::iterator iter = clist_req.begin(); iter != clist_req.end(); ++iter) { - pthread_t thread; - S3fsCurl* s3fscurl = *iter; - if(!s3fscurl){ - continue; - } - - sem.wait(); - - { - AutoLock lock(&completed_tids_lock); - for(std::vector::iterator it = completed_tids.begin(); it != completed_tids.end(); ++it){ - void* retval; - - rc = pthread_join(*it, &retval); - if (rc) { - success = false; - S3FS_PRN_ERR("failed pthread_join - rc(%d) %s", rc, strerror(rc)); - } else { - int int_retval = (int)(intptr_t)(retval); - if (int_retval && !(int_retval == -ENOENT && isMultiHead)) { - S3FS_PRN_WARN("thread failed - rc(%d)", int_retval); - } + // initiate upload part for parallel + if(0 != (result = s3fscurl_para->CopyMultipartPostSetup(from, to, list.size(), upload_id, meta))){ + S3FS_PRN_ERR("failed uploading part setup(%d)", result); + delete s3fscurl_para; + return result; } - } - completed_tids.clear(); - } - s3fscurl->sem = &sem; - s3fscurl->completed_tids_lock = &completed_tids_lock; - s3fscurl->completed_tids = &completed_tids; - isMultiHead |= s3fscurl->GetOp() == "HEAD"; - - rc = pthread_create(&thread, NULL, S3fsMultiCurl::RequestPerformWrapper, static_cast(s3fscurl)); - if (rc != 0) { - success = false; - S3FS_PRN_ERR("failed pthread_create - rc(%d)", rc); - break; - } - - threads.push_back(thread); - } - - for(int i = 0; i < sem.get_value(); ++i){ - sem.wait(); - } - - AutoLock lock(&completed_tids_lock); - for (std::vector::iterator titer = completed_tids.begin(); titer != completed_tids.end(); ++titer) { - void* retval; - - rc = pthread_join(*titer, &retval); - if (rc) { - success = false; - S3FS_PRN_ERR("failed pthread_join - rc(%d)", rc); - } else { - int int_retval = (int)(intptr_t)(retval); - if (int_retval && !(int_retval == -ENOENT && isMultiHead)) { - S3FS_PRN_WARN("thread failed - rc(%d)", int_retval); - } - } - } - completed_tids.clear(); - - return success ? 0 : -EIO; -} - -int S3fsMultiCurl::MultiRead() -{ - int result = 0; - - for(s3fscurllist_t::iterator iter = clist_req.begin(); iter != clist_req.end(); ){ - S3fsCurl* s3fscurl = *iter; - - bool isRetry = false; - bool isPostpone = false; - long responseCode = S3FSCURL_RESPONSECODE_NOTSET; - if(s3fscurl->GetResponseCode(responseCode, false)){ - if(S3FSCURL_RESPONSECODE_NOTSET == responseCode){ - // This is a case where the processing result has not yet been updated (should be very rare). - isPostpone = true; - }else if(400 > responseCode){ - // add into stat cache - if(SuccessCallback && !SuccessCallback(s3fscurl)){ - S3FS_PRN_WARN("error from callback function(%s).", s3fscurl->url.c_str()); + // set into parallel object + if(!curlmulti.SetS3fsCurlObject(s3fscurl_para)){ + S3FS_PRN_ERR("Could not make curl object into multi curl(%s).", to); + delete s3fscurl_para; + return -1; } - }else if(400 == responseCode){ - // as possibly in multipart - S3FS_PRN_WARN("failed a request(%ld: %s)", responseCode, s3fscurl->url.c_str()); - isRetry = true; - }else if(404 == responseCode){ - // not found - // HEAD requests on readdir_multi_head can return 404 - if(s3fscurl->GetOp() != "HEAD"){ - S3FS_PRN_WARN("failed a request(%ld: %s)", responseCode, s3fscurl->url.c_str()); + } + + // Multi request + if(0 != (result = curlmulti.Request())){ + S3FS_PRN_ERR("error occurred in multi request(errno=%d).", result); + + S3fsCurl s3fscurl_abort(true); + int result2 = s3fscurl_abort.AbortMultipartUpload(to, upload_id); + s3fscurl_abort.DestroyCurlHandle(); + if(result2 != 0){ + S3FS_PRN_ERR("error aborting multipart upload(errno=%d).", result2); } - }else if(500 == responseCode){ - // case of all other result, do retry.(11/13/2013) - // because it was found that s3fs got 500 error from S3, but could success - // to retry it. - S3FS_PRN_WARN("failed a request(%ld: %s)", responseCode, s3fscurl->url.c_str()); - isRetry = true; - }else{ - // Retry in other case. - S3FS_PRN_WARN("failed a request(%ld: %s)", responseCode, s3fscurl->url.c_str()); - isRetry = true; - } - }else{ - S3FS_PRN_ERR("failed a request(Unknown response code: %s)", s3fscurl->url.c_str()); + return result; } - if(isPostpone){ - clist_req.erase(iter); - clist_req.push_back(s3fscurl); // Re-evaluate at the end - iter = clist_req.begin(); - }else{ - if(!isRetry || 0 != result){ - // If an EIO error has already occurred, it will be terminated - // immediately even if retry processing is required. - s3fscurl->DestroyCurlHandle(); - delete s3fscurl; - }else{ - S3fsCurl* retrycurl = NULL; - - // For retry - if(RetryCallback){ - retrycurl = RetryCallback(s3fscurl); - if(NULL != retrycurl){ - clist_all.push_back(retrycurl); - }else{ - // set EIO and wait for other parts. - result = -EIO; - } - } - if(s3fscurl != retrycurl){ - s3fscurl->DestroyCurlHandle(); - delete s3fscurl; - } - } - iter = clist_req.erase(iter); + if(0 != (result = CompleteMultipartPostRequest(to, upload_id, list))){ + return result; } - } - clist_req.clear(); - - if(0 != result){ - // If an EIO error has already occurred, clear all retry objects. - for(s3fscurllist_t::iterator iter = clist_all.begin(); iter != clist_all.end(); ++iter){ - S3fsCurl* s3fscurl = *iter; - s3fscurl->DestroyCurlHandle(); - delete s3fscurl; - } - clist_all.clear(); - } - return result; -} - -int S3fsMultiCurl::Request() -{ - S3FS_PRN_INFO3("[count=%zu]", clist_all.size()); - - // Make request list. - // - // Send multi request loop( with retry ) - // (When many request is sends, sometimes gets "Couldn't connect to server") - // - while(!clist_all.empty()){ - // set curl handle to multi handle - int result; - s3fscurllist_t::iterator iter; - for(iter = clist_all.begin(); iter != clist_all.end(); ++iter){ - S3fsCurl* s3fscurl = *iter; - clist_req.push_back(s3fscurl); - } - clist_all.clear(); - - // Send multi request. - if(0 != (result = MultiPerform())){ - Clear(); - return result; - } - - // Read the result - if(0 != (result = MultiRead())){ - Clear(); - return result; - } - - // Cleanup curl handle in multi handle - ClearEx(false); - } - return 0; -} - -// thread function for performing an S3fsCurl request -// -void* S3fsMultiCurl::RequestPerformWrapper(void* arg) -{ - S3fsCurl* s3fscurl= static_cast(arg); - void* result = NULL; - if(!s3fscurl){ - return (void*)(intptr_t)(-EIO); - } - if(s3fscurl->fpLazySetup){ - if(!s3fscurl->fpLazySetup(s3fscurl)){ - S3FS_PRN_ERR("Failed to lazy setup, then respond EIO."); - result = (void*)(intptr_t)(-EIO); - } - } - - if(!result){ - result = (void*)(intptr_t)(s3fscurl->RequestPerform()); - s3fscurl->DestroyCurlHandle(true, false); - } - - AutoLock lock(s3fscurl->completed_tids_lock); - s3fscurl->completed_tids->push_back(pthread_self()); - s3fscurl->sem->post(); - - return result; -} - -//------------------------------------------------------------------- -// Utility functions -//------------------------------------------------------------------- -// -// curl_slist_sort_insert -// This function is like curl_slist_append function, but this adds data by a-sorting. -// Because AWS signature needs sorted header. -// -struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* data) -{ - if(!data){ - return list; - } - string strkey = data; - string strval; - - string::size_type pos = strkey.find(':', 0); - if(string::npos != pos){ - strval = strkey.substr(pos + 1); - strkey = strkey.substr(0, pos); - } - - return curl_slist_sort_insert(list, strkey.c_str(), strval.c_str()); -} - -struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* key, const char* value) -{ - struct curl_slist* curpos; - struct curl_slist* lastpos; - struct curl_slist* new_item; - - if(!key){ - return list; - } - if(NULL == (new_item = reinterpret_cast(malloc(sizeof(struct curl_slist))))){ - return list; - } - - // key & value are trimmed and lower (only key) - string strkey = trim(string(key)); - string strval = trim(string(value ? value : "")); - string strnew = key + string(": ") + strval; - if(NULL == (new_item->data = strdup(strnew.c_str()))){ - free(new_item); - return list; - } - new_item->next = NULL; - - for(lastpos = NULL, curpos = list; curpos; lastpos = curpos, curpos = curpos->next){ - string strcur = curpos->data; - size_t pos; - if(string::npos != (pos = strcur.find(':', 0))){ - strcur = strcur.substr(0, pos); - } - - int result = strcasecmp(strkey.c_str(), strcur.c_str()); - if(0 == result){ - // same data, so replace it. - if(lastpos){ - lastpos->next = new_item; - }else{ - list = new_item; - } - new_item->next = curpos->next; - free(curpos->data); - free(curpos); - break; - - }else if(0 > result){ - // add data before curpos. - if(lastpos){ - lastpos->next = new_item; - }else{ - list = new_item; - } - new_item->next = curpos; - break; - } - } - - if(!curpos){ - // append to last pos - if(lastpos){ - lastpos->next = new_item; - }else{ - // a case of list is null - list = new_item; - } - } - - return list; -} - -string get_sorted_header_keys(const struct curl_slist* list) -{ - string sorted_headers; - - if(!list){ - return sorted_headers; - } - - for( ; list; list = list->next){ - string strkey = list->data; - size_t pos; - if(string::npos != (pos = strkey.find(':', 0))){ - if (trim(strkey.substr(pos + 1)).empty()) { - // skip empty-value headers (as they are discarded by libcurl) - continue; - } - strkey = strkey.substr(0, pos); - } - if(0 < sorted_headers.length()){ - sorted_headers += ";"; - } - sorted_headers += lower(strkey); - } - - return sorted_headers; -} - -string get_header_value(const struct curl_slist* list, const string &key) -{ - if(!list){ - return ""; - } - - for( ; list; list = list->next){ - string strkey = list->data; - size_t pos; - if(string::npos != (pos = strkey.find(':', 0))){ - if(0 == strcasecmp(trim(strkey.substr(0, pos)).c_str(), key.c_str())){ - return trim(strkey.substr(pos+1)); - } - } - } - - return ""; -} - -string get_canonical_headers(const struct curl_slist* list) -{ - string canonical_headers; - - if(!list){ - canonical_headers = "\n"; - return canonical_headers; - } - - for( ; list; list = list->next){ - string strhead = list->data; - size_t pos; - if(string::npos != (pos = strhead.find(':', 0))){ - string strkey = trim(lower(strhead.substr(0, pos))); - string strval = trim(strhead.substr(pos + 1)); - if (strval.empty()) { - // skip empty-value headers (as they are discarded by libcurl) - continue; - } - strhead = strkey.append(":").append(strval); - }else{ - strhead = trim(lower(strhead)); - } - canonical_headers += strhead; - canonical_headers += "\n"; - } - return canonical_headers; -} - -string get_canonical_headers(const struct curl_slist* list, bool only_amz) -{ - string canonical_headers; - - if(!list){ - canonical_headers = "\n"; - return canonical_headers; - } - - for( ; list; list = list->next){ - string strhead = list->data; - size_t pos; - if(string::npos != (pos = strhead.find(':', 0))){ - string strkey = trim(lower(strhead.substr(0, pos))); - string strval = trim(strhead.substr(pos + 1)); - if (strval.empty()) { - // skip empty-value headers (as they are discarded by libcurl) - continue; - } - strhead = strkey.append(":").append(strval); - }else{ - strhead = trim(lower(strhead)); - } - if(only_amz && strhead.substr(0, 5) != "x-amz"){ - continue; - } - canonical_headers += strhead; - canonical_headers += "\n"; - } - return canonical_headers; -} - -// function for using global values -bool MakeUrlResource(const char* realpath, string& resourcepath, string& url) -{ - if(!realpath){ - return false; - } - resourcepath = urlEncode(service_path + bucket + realpath); - url = host + resourcepath; - return true; -} - -string prepare_url(const char* url) -{ - S3FS_PRN_INFO3("URL is %s", url); - - string uri; - string hostname; - string path; - string url_str = string(url); - string token = string("/") + bucket; - int bucket_pos; - int bucket_length = token.size(); - int uri_length = 0; - - if(!strncasecmp(url_str.c_str(), "https://", 8)){ - uri_length = 8; - } else if(!strncasecmp(url_str.c_str(), "http://", 7)) { - uri_length = 7; - } - uri = url_str.substr(0, uri_length); - bucket_pos = url_str.find(token, uri_length); - - if(!pathrequeststyle){ - hostname = bucket + "." + url_str.substr(uri_length, bucket_pos - uri_length); - path = url_str.substr((bucket_pos + bucket_length)); - }else{ - hostname = url_str.substr(uri_length, bucket_pos - uri_length); - string part = url_str.substr((bucket_pos + bucket_length)); - if('/' != part[0]){ - part = "/" + part; - } - path = "/" + bucket + part; - } - - url_str = uri + hostname + path; - - S3FS_PRN_INFO3("URL changed is %s", url_str.c_str()); - - return url_str; + return 0; } /* * Local variables: -* tab-width: 2 -* c-basic-offset: 2 +* tab-width: 4 +* c-basic-offset: 4 * End: -* vim600: expandtab sw=2 ts=2 fdm=marker -* vim<600: expandtab sw=2 ts=2 +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 */ diff --git a/src/curl.h b/src/curl.h index 2b60990..058a0cb 100644 --- a/src/curl.h +++ b/src/curl.h @@ -21,10 +21,13 @@ #ifndef S3FS_CURL_H_ #define S3FS_CURL_H_ -#include +#include +#include "curl_handlerpool.h" +#include "bodydata.h" #include "psemaphore.h" -#include "types.h" +#include "metaheader.h" +#include "fdcache_page.h" //---------------------------------------------- // Avoid dependency on libcurl version @@ -42,153 +45,33 @@ // a message is output. // #if defined(HAVE_CURLOPT_TCP_KEEPALIVE) && (HAVE_CURLOPT_TCP_KEEPALIVE == 1) - #define S3FS_CURLOPT_TCP_KEEPALIVE CURLOPT_TCP_KEEPALIVE + #define S3FS_CURLOPT_TCP_KEEPALIVE CURLOPT_TCP_KEEPALIVE #else - #define S3FS_CURLOPT_TCP_KEEPALIVE static_cast(213) + #define S3FS_CURLOPT_TCP_KEEPALIVE static_cast(213) #endif #if defined(HAVE_CURLOPT_SSL_ENABLE_ALPN) && (HAVE_CURLOPT_SSL_ENABLE_ALPN == 1) - #define S3FS_CURLOPT_SSL_ENABLE_ALPN CURLOPT_SSL_ENABLE_ALPN + #define S3FS_CURLOPT_SSL_ENABLE_ALPN CURLOPT_SSL_ENABLE_ALPN #else - #define S3FS_CURLOPT_SSL_ENABLE_ALPN static_cast(226) + #define S3FS_CURLOPT_SSL_ENABLE_ALPN static_cast(226) #endif #if defined(HAVE_CURLOPT_KEEP_SENDING_ON_ERROR) && (HAVE_CURLOPT_KEEP_SENDING_ON_ERROR == 1) - #define S3FS_CURLOPT_KEEP_SENDING_ON_ERROR CURLOPT_KEEP_SENDING_ON_ERROR + #define S3FS_CURLOPT_KEEP_SENDING_ON_ERROR CURLOPT_KEEP_SENDING_ON_ERROR #else - #define S3FS_CURLOPT_KEEP_SENDING_ON_ERROR static_cast(245) + #define S3FS_CURLOPT_KEEP_SENDING_ON_ERROR static_cast(245) #endif //---------------------------------------------- -// Symbols +// Structure / Typedefs //---------------------------------------------- -static const int MIN_MULTIPART_SIZE = 5 * 1024 * 1024; - -//---------------------------------------------- -// class BodyData -//---------------------------------------------- -// memory class for curl write memory callback -// -class BodyData -{ - private: - char* text; - size_t lastpos; - size_t bufsize; - - private: - bool IsSafeSize(size_t addbytes) const { - return ((lastpos + addbytes + 1) > bufsize ? false : true); - } - bool Resize(size_t addbytes); - - public: - BodyData() : text(NULL), lastpos(0), bufsize(0) {} - ~BodyData() { - Clear(); - } - - void Clear(void); - bool Append(void* ptr, size_t bytes); - bool Append(void* ptr, size_t blockSize, size_t numBlocks) { - return Append(ptr, (blockSize * numBlocks)); - } - const char* str() const; - size_t size() const { - return lastpos; - } -}; - -//---------------------------------------------- -// Utility structs & typedefs -//---------------------------------------------- -typedef std::vector etaglist_t; - -// Each part information for Multipart upload -struct filepart -{ - bool uploaded; // does finish uploading - std::string etag; // expected etag value - int fd; // base file(temporary full file) descriptor - off_t startpos; // seek fd point for uploading - off_t size; // uploading size - etaglist_t* etaglist; // use only parallel upload - int etagpos; // use only parallel upload - - filepart() : uploaded(false), fd(-1), startpos(0), size(-1), etaglist(NULL), etagpos(-1) {} - ~filepart() - { - clear(); - } - - void clear(void) - { - uploaded = false; - etag = ""; - fd = -1; - startpos = 0; - size = -1; - etaglist = NULL; - etagpos = - 1; - } - - void add_etag_list(etaglist_t* list) - { - if(list){ - list->push_back(std::string("")); - etaglist = list; - etagpos = list->size() - 1; - }else{ - etaglist = NULL; - etagpos = - 1; - } - } -}; - -// for progress -struct case_insensitive_compare_func -{ - bool operator()(const std::string& a, const std::string& b) const { - return strcasecmp(a.c_str(), b.c_str()) < 0; - } -}; -typedef std::map mimes_t; typedef std::pair progress_t; typedef std::map curltime_t; typedef std::map curlprogress_t; -class S3fsMultiCurl; - -//---------------------------------------------- -// class CurlHandlerPool -//---------------------------------------------- -typedef std::list hcurllist_t; - -class CurlHandlerPool -{ -public: - explicit CurlHandlerPool(int maxHandlers) : mMaxHandlers(maxHandlers) - { - assert(maxHandlers > 0); - } - - bool Init(); - bool Destroy(); - - CURL* GetHandler(bool only_pool); - void ReturnHandler(CURL* hCurl, bool restore_pool); - -private: - int mMaxHandlers; - pthread_mutex_t mLock; - hcurllist_t mPool; -}; - //---------------------------------------------- // class S3fsCurl //---------------------------------------------- -#include "fdcache.h" // for fdpage_list_t - class S3fsCurl; // Prototype function for lazy setup options for curl handle @@ -202,377 +85,326 @@ typedef std::list sseckeylist_t; // class S3fsCurl { - friend class S3fsMultiCurl; + friend class S3fsMultiCurl; - private: - enum REQTYPE { - REQTYPE_UNSET = -1, - REQTYPE_DELETE = 0, - REQTYPE_HEAD, - REQTYPE_PUTHEAD, - REQTYPE_PUT, - REQTYPE_GET, - REQTYPE_CHKBUCKET, - REQTYPE_LISTBUCKET, - REQTYPE_PREMULTIPOST, - REQTYPE_COMPLETEMULTIPOST, - REQTYPE_UPLOADMULTIPOST, - REQTYPE_COPYMULTIPOST, - REQTYPE_MULTILIST, - REQTYPE_IAMCRED, - REQTYPE_ABORTMULTIUPLOAD, - REQTYPE_IAMROLE - }; + private: + enum REQTYPE { + REQTYPE_UNSET = -1, + REQTYPE_DELETE = 0, + REQTYPE_HEAD, + REQTYPE_PUTHEAD, + REQTYPE_PUT, + REQTYPE_GET, + REQTYPE_CHKBUCKET, + REQTYPE_LISTBUCKET, + REQTYPE_PREMULTIPOST, + REQTYPE_COMPLETEMULTIPOST, + REQTYPE_UPLOADMULTIPOST, + REQTYPE_COPYMULTIPOST, + REQTYPE_MULTILIST, + REQTYPE_IAMCRED, + REQTYPE_ABORTMULTIUPLOAD, + REQTYPE_IAMROLE + }; - // class variables - static pthread_mutex_t curl_handles_lock; - static struct callback_locks_t { - pthread_mutex_t dns; - pthread_mutex_t ssl_session; - } callback_locks; - static bool is_initglobal_done; - static CurlHandlerPool* sCurlPool; - static int sCurlPoolSize; - static CURLSH* hCurlShare; - static bool is_cert_check; - static bool is_dns_cache; - static bool is_ssl_session_cache; - static long connect_timeout; - static time_t readwrite_timeout; - static int retries; - static bool is_public_bucket; - static acl_t default_acl; - static storage_class_t storage_class; - static sseckeylist_t sseckeys; - static std::string ssekmsid; - static sse_type_t ssetype; - static bool is_content_md5; - static bool is_verbose; - static bool is_dump_body; - static std::string AWSAccessKeyId; - static std::string AWSSecretAccessKey; - static std::string AWSAccessToken; - static time_t AWSAccessTokenExpire; - static bool is_ecs; - static bool is_use_session_token; - static bool is_ibm_iam_auth; - static std::string IAM_cred_url; - static size_t IAM_field_count; - static std::string IAM_token_field; - static std::string IAM_expiry_field; - static std::string IAM_role; - static long ssl_verify_hostname; - static curltime_t curl_times; - static curlprogress_t curl_progress; - static std::string curl_ca_bundle; - static mimes_t mimeTypes; - static std::string userAgent; - static int max_parallel_cnt; - static int max_multireq; - static off_t multipart_size; - static bool is_sigv4; - static bool is_ua; // User-Agent - static bool requester_pays; + // class variables + static pthread_mutex_t curl_handles_lock; + static struct callback_locks_t { + pthread_mutex_t dns; + pthread_mutex_t ssl_session; + } callback_locks; + static bool is_initglobal_done; + static CurlHandlerPool* sCurlPool; + static int sCurlPoolSize; + static CURLSH* hCurlShare; + static bool is_cert_check; + static bool is_dns_cache; + static bool is_ssl_session_cache; + static long connect_timeout; + static time_t readwrite_timeout; + static int retries; + static bool is_public_bucket; + static acl_t default_acl; + static storage_class_t storage_class; + static sseckeylist_t sseckeys; + static std::string ssekmsid; + static sse_type_t ssetype; + static bool is_content_md5; + static bool is_verbose; + static bool is_dump_body; + static std::string AWSAccessKeyId; + static std::string AWSSecretAccessKey; + static std::string AWSAccessToken; + static time_t AWSAccessTokenExpire; + static bool is_ecs; + static bool is_use_session_token; + static bool is_ibm_iam_auth; + static std::string IAM_cred_url; + static size_t IAM_field_count; + static std::string IAM_token_field; + static std::string IAM_expiry_field; + static std::string IAM_role; + static long ssl_verify_hostname; + static curltime_t curl_times; + static curlprogress_t curl_progress; + static std::string curl_ca_bundle; + static mimes_t mimeTypes; + static std::string userAgent; + static int max_parallel_cnt; + static int max_multireq; + static off_t multipart_size; + static bool is_sigv4; + static bool is_ua; // User-Agent + static bool requester_pays; - // variables - CURL* hCurl; - REQTYPE type; // type of request - std::string path; // target object path - std::string base_path; // base path (for multi curl head request) - std::string saved_path; // saved path = cache key (for multi curl head request) - std::string url; // target object path(url) - struct curl_slist* requestHeaders; - headers_t responseHeaders; // header data by HeaderCallback - BodyData bodydata; // body data by WriteMemoryCallback - BodyData headdata; // header data by WriteMemoryCallback - volatile long LastResponseCode; - const unsigned char* postdata; // use by post method and read callback function. - int postdata_remaining; // use by post method and read callback function. - filepart partdata; // use by multipart upload/get object callback - bool is_use_ahbe; // additional header by extension - int retry_count; // retry count for multipart - FILE* b_infile; // backup for retrying - const unsigned char* b_postdata; // backup for retrying - int b_postdata_remaining; // backup for retrying - off_t b_partdata_startpos; // backup for retrying - ssize_t b_partdata_size; // backup for retrying - int b_ssekey_pos; // backup for retrying - std::string b_ssevalue; // backup for retrying - sse_type_t b_ssetype; // backup for retrying - std::string b_from; // backup for retrying(for copy request) - headers_t b_meta; // backup for retrying(for copy request) - std::string op; // the HTTP verb of the request ("PUT", "GET", etc.) - std::string query_string; // request query string - Semaphore *sem; - pthread_mutex_t *completed_tids_lock; - std::vector *completed_tids; - s3fscurl_lazy_setup fpLazySetup; // curl options for lazy setting function + // variables + CURL* hCurl; + REQTYPE type; // type of request + std::string path; // target object path + std::string base_path; // base path (for multi curl head request) + std::string saved_path; // saved path = cache key (for multi curl head request) + std::string url; // target object path(url) + struct curl_slist* requestHeaders; + headers_t responseHeaders; // header data by HeaderCallback + BodyData bodydata; // body data by WriteMemoryCallback + BodyData headdata; // header data by WriteMemoryCallback + volatile long LastResponseCode; + const unsigned char* postdata; // use by post method and read callback function. + int postdata_remaining; // use by post method and read callback function. + filepart partdata; // use by multipart upload/get object callback + bool is_use_ahbe; // additional header by extension + int retry_count; // retry count for multipart + FILE* b_infile; // backup for retrying + const unsigned char* b_postdata; // backup for retrying + int b_postdata_remaining; // backup for retrying + off_t b_partdata_startpos; // backup for retrying + ssize_t b_partdata_size; // backup for retrying + int b_ssekey_pos; // backup for retrying + std::string b_ssevalue; // backup for retrying + sse_type_t b_ssetype; // backup for retrying + std::string b_from; // backup for retrying(for copy request) + headers_t b_meta; // backup for retrying(for copy request) + std::string op; // the HTTP verb of the request ("PUT", "GET", etc.) + std::string query_string; // request query string + Semaphore *sem; + pthread_mutex_t *completed_tids_lock; + std::vector *completed_tids; + s3fscurl_lazy_setup fpLazySetup; // curl options for lazy setting function - public: - // constructor/destructor - explicit S3fsCurl(bool ahbe = false); - ~S3fsCurl(); + public: + static const long S3FSCURL_RESPONSECODE_NOTSET = -1; + static const long S3FSCURL_RESPONSECODE_FATAL_ERROR = -2; + static const int S3FSCURL_PERFORM_RESULT_NOTSET = 1; - private: - // class methods - static bool InitGlobalCurl(void); - static bool DestroyGlobalCurl(void); - static bool InitShareCurl(void); - static bool DestroyShareCurl(void); - static void LockCurlShare(CURL* handle, curl_lock_data nLockData, curl_lock_access laccess, void* useptr); - static void UnlockCurlShare(CURL* handle, curl_lock_data nLockData, void* useptr); - static bool InitCryptMutex(void); - static bool DestroyCryptMutex(void); - static int CurlProgress(void *clientp, double dltotal, double dlnow, double ultotal, double ulnow); + public: + // constructor/destructor + explicit S3fsCurl(bool ahbe = false); + ~S3fsCurl(); - static bool LocateBundle(void); - static size_t HeaderCallback(void *data, size_t blockSize, size_t numBlocks, void *userPtr); - static size_t WriteMemoryCallback(void *ptr, size_t blockSize, size_t numBlocks, void *data); - static size_t ReadCallback(void *ptr, size_t size, size_t nmemb, void *userp); - static size_t UploadReadCallback(void *ptr, size_t size, size_t nmemb, void *userp); - static size_t DownloadWriteCallback(void* ptr, size_t size, size_t nmemb, void* userp); + private: + // class methods + static bool InitGlobalCurl(void); + static bool DestroyGlobalCurl(void); + static bool InitShareCurl(void); + static bool DestroyShareCurl(void); + static void LockCurlShare(CURL* handle, curl_lock_data nLockData, curl_lock_access laccess, void* useptr); + static void UnlockCurlShare(CURL* handle, curl_lock_data nLockData, void* useptr); + static bool InitCryptMutex(void); + static bool DestroyCryptMutex(void); + static int CurlProgress(void *clientp, double dltotal, double dlnow, double ultotal, double ulnow); - static bool UploadMultipartPostCallback(S3fsCurl* s3fscurl); - static bool CopyMultipartPostCallback(S3fsCurl* s3fscurl); - static bool MixMultipartPostCallback(S3fsCurl* s3fscurl); - static S3fsCurl* UploadMultipartPostRetryCallback(S3fsCurl* s3fscurl); - static S3fsCurl* CopyMultipartPostRetryCallback(S3fsCurl* s3fscurl); - static S3fsCurl* MixMultipartPostRetryCallback(S3fsCurl* s3fscurl); - static S3fsCurl* ParallelGetObjectRetryCallback(S3fsCurl* s3fscurl); + static bool LocateBundle(void); + static size_t HeaderCallback(void *data, size_t blockSize, size_t numBlocks, void *userPtr); + static size_t WriteMemoryCallback(void *ptr, size_t blockSize, size_t numBlocks, void *data); + static size_t ReadCallback(void *ptr, size_t size, size_t nmemb, void *userp); + static size_t UploadReadCallback(void *ptr, size_t size, size_t nmemb, void *userp); + static size_t DownloadWriteCallback(void* ptr, size_t size, size_t nmemb, void* userp); - // lazy functions for set curl options - static bool UploadMultipartPostSetCurlOpts(S3fsCurl* s3fscurl); - static bool CopyMultipartPostSetCurlOpts(S3fsCurl* s3fscurl); - static bool PreGetObjectRequestSetCurlOpts(S3fsCurl* s3fscurl); - static bool PreHeadRequestSetCurlOpts(S3fsCurl* s3fscurl); + static bool UploadMultipartPostCallback(S3fsCurl* s3fscurl); + static bool CopyMultipartPostCallback(S3fsCurl* s3fscurl); + static bool MixMultipartPostCallback(S3fsCurl* s3fscurl); + static S3fsCurl* UploadMultipartPostRetryCallback(S3fsCurl* s3fscurl); + static S3fsCurl* CopyMultipartPostRetryCallback(S3fsCurl* s3fscurl); + static S3fsCurl* MixMultipartPostRetryCallback(S3fsCurl* s3fscurl); + static S3fsCurl* ParallelGetObjectRetryCallback(S3fsCurl* s3fscurl); - static bool ParseIAMCredentialResponse(const char* response, iamcredmap_t& keyval); - static bool SetIAMCredentials(const char* response); - static bool ParseIAMRoleFromMetaDataResponse(const char* response, std::string& rolename); - static bool SetIAMRoleFromMetaData(const char* response); - static bool LoadEnvSseCKeys(void); - static bool LoadEnvSseKmsid(void); - static bool PushbackSseKeys(std::string& onekey); - static bool AddUserAgent(CURL* hCurl); + // lazy functions for set curl options + static bool UploadMultipartPostSetCurlOpts(S3fsCurl* s3fscurl); + static bool CopyMultipartPostSetCurlOpts(S3fsCurl* s3fscurl); + static bool PreGetObjectRequestSetCurlOpts(S3fsCurl* s3fscurl); + static bool PreHeadRequestSetCurlOpts(S3fsCurl* s3fscurl); - static int CurlDebugFunc(CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr); - static int CurlDebugBodyInFunc(CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr); - static int CurlDebugBodyOutFunc(CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr); - static int RawCurlDebugFunc(CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr, curl_infotype datatype); + static bool ParseIAMCredentialResponse(const char* response, iamcredmap_t& keyval); + static bool SetIAMCredentials(const char* response); + static bool ParseIAMRoleFromMetaDataResponse(const char* response, std::string& rolename); + static bool SetIAMRoleFromMetaData(const char* response); + static bool LoadEnvSseCKeys(void); + static bool LoadEnvSseKmsid(void); + static bool PushbackSseKeys(std::string& onekey); + static bool AddUserAgent(CURL* hCurl); - // methods - bool ResetHandle(bool lock_already_held = false); - bool RemakeHandle(void); - bool ClearInternalData(void); - void insertV4Headers(); - void insertV2Headers(); - void insertIBMIAMHeaders(); - void insertAuthHeaders(); - std::string CalcSignatureV2(const std::string& method, const std::string& strMD5, const std::string& content_type, const std::string& date, const std::string& resource); - std::string CalcSignature(const std::string& method, const std::string& canonical_uri, const std::string& query_string, const std::string& strdate, const std::string& payload_hash, const std::string& date8601); - int GetIAMCredentials(void); + static int CurlDebugFunc(CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr); + static int CurlDebugBodyInFunc(CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr); + static int CurlDebugBodyOutFunc(CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr); + static int RawCurlDebugFunc(CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr, curl_infotype datatype); - int UploadMultipartPostSetup(const char* tpath, int part_num, const std::string& upload_id); - int CopyMultipartPostSetup(const char* from, const char* to, int part_num, const std::string& upload_id, headers_t& meta); - bool UploadMultipartPostComplete(); - bool CopyMultipartPostComplete(); - bool MixMultipartPostComplete(); + // methods + bool ResetHandle(bool lock_already_held = false); + bool RemakeHandle(void); + bool ClearInternalData(void); + void insertV4Headers(); + void insertV2Headers(); + void insertIBMIAMHeaders(); + void insertAuthHeaders(); + std::string CalcSignatureV2(const std::string& method, const std::string& strMD5, const std::string& content_type, const std::string& date, const std::string& resource); + std::string CalcSignature(const std::string& method, const std::string& canonical_uri, const std::string& query_string, const std::string& strdate, const std::string& payload_hash, const std::string& date8601); + int GetIAMCredentials(void); - public: - // class methods - static bool InitS3fsCurl(void); - static bool InitMimeType(const std::string& strFile); - static bool DestroyS3fsCurl(void); - static int ParallelMultipartUploadRequest(const char* tpath, headers_t& meta, int fd); - static int ParallelMixMultipartUploadRequest(const char* tpath, headers_t& meta, int fd, const fdpage_list_t& mixuppages); - static int ParallelGetObjectRequest(const char* tpath, int fd, off_t start, ssize_t size); - static bool CheckIAMCredentialUpdate(void); + int UploadMultipartPostSetup(const char* tpath, int part_num, const std::string& upload_id); + int CopyMultipartPostSetup(const char* from, const char* to, int part_num, const std::string& upload_id, headers_t& meta); + bool UploadMultipartPostComplete(); + bool CopyMultipartPostComplete(); + bool MixMultipartPostComplete(); - // class methods(variables) - static std::string LookupMimeType(const std::string& name); - static bool SetCheckCertificate(bool isCertCheck); - static bool SetDnsCache(bool isCache); - static bool SetSslSessionCache(bool isCache); - static long SetConnectTimeout(long timeout); - static time_t SetReadwriteTimeout(time_t timeout); - static time_t GetReadwriteTimeout(void) { return S3fsCurl::readwrite_timeout; } - static int SetRetries(int count); - static bool SetPublicBucket(bool flag); - static bool IsPublicBucket(void) { return S3fsCurl::is_public_bucket; } - static acl_t SetDefaultAcl(acl_t acl); - static acl_t GetDefaultAcl(); - static storage_class_t SetStorageClass(storage_class_t storage_class); - static storage_class_t GetStorageClass() { return S3fsCurl::storage_class; } - static bool LoadEnvSse(void) { return (S3fsCurl::LoadEnvSseCKeys() && S3fsCurl::LoadEnvSseKmsid()); } - static sse_type_t SetSseType(sse_type_t type); - static sse_type_t GetSseType(void) { return S3fsCurl::ssetype; } - static bool IsSseDisable(void) { return (sse_type_t::SSE_DISABLE == S3fsCurl::ssetype); } - static bool IsSseS3Type(void) { return (sse_type_t::SSE_S3 == S3fsCurl::ssetype); } - static bool IsSseCType(void) { return (sse_type_t::SSE_C == S3fsCurl::ssetype); } - static bool IsSseKmsType(void) { return (sse_type_t::SSE_KMS == S3fsCurl::ssetype); } - static bool FinalCheckSse(void); - static bool SetSseCKeys(const char* filepath); - static bool SetSseKmsid(const char* kmsid); - static bool IsSetSseKmsId(void) { return !S3fsCurl::ssekmsid.empty(); } - static const char* GetSseKmsId(void) { return S3fsCurl::ssekmsid.c_str(); } - static bool GetSseKey(std::string& md5, std::string& ssekey); - static bool GetSseKeyMd5(int pos, std::string& md5); - static int GetSseKeyCount(void); - static bool SetContentMd5(bool flag); - static bool SetVerbose(bool flag); - static bool GetVerbose(void) { return S3fsCurl::is_verbose; } - static bool SetDumpBody(bool flag); - static bool IsDumpBody(void) { return S3fsCurl::is_dump_body; } - static bool SetAccessKey(const char* AccessKeyId, const char* SecretAccessKey); - static bool SetAccessKeyWithSessionToken(const char* AccessKeyId, const char* SecretAccessKey, const char * SessionToken); - static bool IsSetAccessKeyID(void){ - return (0 < S3fsCurl::AWSAccessKeyId.size()); - } - static bool IsSetAccessKeys(void){ - return (0 < S3fsCurl::IAM_role.size() || ((0 < S3fsCurl::AWSAccessKeyId.size() || S3fsCurl::is_ibm_iam_auth) && 0 < S3fsCurl::AWSSecretAccessKey.size())); - } - static long SetSslVerifyHostname(long value); - static long GetSslVerifyHostname(void) { return S3fsCurl::ssl_verify_hostname; } - // maximum parallel GET and PUT requests - static int SetMaxParallelCount(int value); - static int GetMaxParallelCount(void) { return S3fsCurl::max_parallel_cnt; } - // maximum parallel HEAD requests - static int SetMaxMultiRequest(int max); - static int GetMaxMultiRequest(void) { return S3fsCurl::max_multireq; } - static bool SetIsECS(bool flag); - static bool SetIsIBMIAMAuth(bool flag); - static size_t SetIAMFieldCount(size_t field_count); - static std::string SetIAMCredentialsURL(const char* url); - static std::string SetIAMTokenField(const char* token_field); - static std::string SetIAMExpiryField(const char* expiry_field); - static std::string SetIAMRole(const char* role); - static const char* GetIAMRole(void) { return S3fsCurl::IAM_role.c_str(); } - static bool SetMultipartSize(off_t size); - static off_t GetMultipartSize(void) { return S3fsCurl::multipart_size; } - static bool SetSignatureV4(bool isset) { bool bresult = S3fsCurl::is_sigv4; S3fsCurl::is_sigv4 = isset; return bresult; } - static bool IsSignatureV4(void) { return S3fsCurl::is_sigv4; } - static bool SetUserAgentFlag(bool isset) { bool bresult = S3fsCurl::is_ua; S3fsCurl::is_ua = isset; return bresult; } - static bool IsUserAgentFlag(void) { return S3fsCurl::is_ua; } - static void InitUserAgent(void); - static bool SetRequesterPays(bool flag) { bool old_flag = S3fsCurl::requester_pays; S3fsCurl::requester_pays = flag; return old_flag; } - static bool IsRequesterPays(void) { return S3fsCurl::requester_pays; } + public: + // class methods + static bool InitS3fsCurl(void); + static bool InitMimeType(const std::string& strFile); + static bool DestroyS3fsCurl(void); + static int ParallelMultipartUploadRequest(const char* tpath, headers_t& meta, int fd); + static int ParallelMixMultipartUploadRequest(const char* tpath, headers_t& meta, int fd, const fdpage_list_t& mixuppages); + static int ParallelGetObjectRequest(const char* tpath, int fd, off_t start, ssize_t size); + static bool CheckIAMCredentialUpdate(void); - // methods - bool CreateCurlHandle(bool only_pool = false, bool remake = false); - bool DestroyCurlHandle(bool restore_pool = true, bool clear_internal_data = true); + // class methods(variables) + static std::string LookupMimeType(const std::string& name); + static bool SetCheckCertificate(bool isCertCheck); + static bool SetDnsCache(bool isCache); + static bool SetSslSessionCache(bool isCache); + static long SetConnectTimeout(long timeout); + static time_t SetReadwriteTimeout(time_t timeout); + static time_t GetReadwriteTimeout(void) { return S3fsCurl::readwrite_timeout; } + static int SetRetries(int count); + static bool SetPublicBucket(bool flag); + static bool IsPublicBucket(void) { return S3fsCurl::is_public_bucket; } + static acl_t SetDefaultAcl(acl_t acl); + static acl_t GetDefaultAcl(); + static storage_class_t SetStorageClass(storage_class_t storage_class); + static storage_class_t GetStorageClass() { return S3fsCurl::storage_class; } + static bool LoadEnvSse(void) { return (S3fsCurl::LoadEnvSseCKeys() && S3fsCurl::LoadEnvSseKmsid()); } + static sse_type_t SetSseType(sse_type_t type); + static sse_type_t GetSseType(void) { return S3fsCurl::ssetype; } + static bool IsSseDisable(void) { return (sse_type_t::SSE_DISABLE == S3fsCurl::ssetype); } + static bool IsSseS3Type(void) { return (sse_type_t::SSE_S3 == S3fsCurl::ssetype); } + static bool IsSseCType(void) { return (sse_type_t::SSE_C == S3fsCurl::ssetype); } + static bool IsSseKmsType(void) { return (sse_type_t::SSE_KMS == S3fsCurl::ssetype); } + static bool FinalCheckSse(void); + static bool SetSseCKeys(const char* filepath); + static bool SetSseKmsid(const char* kmsid); + static bool IsSetSseKmsId(void) { return !S3fsCurl::ssekmsid.empty(); } + static const char* GetSseKmsId(void) { return S3fsCurl::ssekmsid.c_str(); } + static bool GetSseKey(std::string& md5, std::string& ssekey); + static bool GetSseKeyMd5(int pos, std::string& md5); + static int GetSseKeyCount(void); + static bool SetContentMd5(bool flag); + static bool SetVerbose(bool flag); + static bool GetVerbose(void) { return S3fsCurl::is_verbose; } + static bool SetDumpBody(bool flag); + static bool IsDumpBody(void) { return S3fsCurl::is_dump_body; } + static bool SetAccessKey(const char* AccessKeyId, const char* SecretAccessKey); + static bool SetAccessKeyWithSessionToken(const char* AccessKeyId, const char* SecretAccessKey, const char * SessionToken); + static bool IsSetAccessKeyID(void) + { + return (0 < S3fsCurl::AWSAccessKeyId.size()); + } + static bool IsSetAccessKeys(void) + { + return (0 < S3fsCurl::IAM_role.size() || ((0 < S3fsCurl::AWSAccessKeyId.size() || S3fsCurl::is_ibm_iam_auth) && 0 < S3fsCurl::AWSSecretAccessKey.size())); + } + static long SetSslVerifyHostname(long value); + static long GetSslVerifyHostname(void) { return S3fsCurl::ssl_verify_hostname; } + // maximum parallel GET and PUT requests + static int SetMaxParallelCount(int value); + static int GetMaxParallelCount(void) { return S3fsCurl::max_parallel_cnt; } + // maximum parallel HEAD requests + static int SetMaxMultiRequest(int max); + static int GetMaxMultiRequest(void) { return S3fsCurl::max_multireq; } + static bool SetIsECS(bool flag); + static bool SetIsIBMIAMAuth(bool flag); + static size_t SetIAMFieldCount(size_t field_count); + static std::string SetIAMCredentialsURL(const char* url); + static std::string SetIAMTokenField(const char* token_field); + static std::string SetIAMExpiryField(const char* expiry_field); + static std::string SetIAMRole(const char* role); + static const char* GetIAMRole(void) { return S3fsCurl::IAM_role.c_str(); } + static bool SetMultipartSize(off_t size); + static off_t GetMultipartSize(void) { return S3fsCurl::multipart_size; } + static bool SetSignatureV4(bool isset) { bool bresult = S3fsCurl::is_sigv4; S3fsCurl::is_sigv4 = isset; return bresult; } + static bool IsSignatureV4(void) { return S3fsCurl::is_sigv4; } + static bool SetUserAgentFlag(bool isset) { bool bresult = S3fsCurl::is_ua; S3fsCurl::is_ua = isset; return bresult; } + static bool IsUserAgentFlag(void) { return S3fsCurl::is_ua; } + static void InitUserAgent(void); + static bool SetRequesterPays(bool flag) { bool old_flag = S3fsCurl::requester_pays; S3fsCurl::requester_pays = flag; return old_flag; } + static bool IsRequesterPays(void) { return S3fsCurl::requester_pays; } - bool LoadIAMRoleFromMetaData(void); - bool AddSseRequestHead(sse_type_t ssetype, std::string& ssevalue, bool is_only_c, bool is_copy); - bool GetResponseCode(long& responseCode, bool from_curl_handle = true); - int RequestPerform(bool dontAddAuthHeaders=false); - int DeleteRequest(const char* tpath); - bool PreHeadRequest(const char* tpath, const char* bpath = NULL, const char* savedpath = NULL, int ssekey_pos = -1); - bool PreHeadRequest(std::string& tpath, std::string& bpath, std::string& savedpath, int ssekey_pos = -1) { - return PreHeadRequest(tpath.c_str(), bpath.c_str(), savedpath.c_str(), ssekey_pos); - } - int HeadRequest(const char* tpath, headers_t& meta); - int PutHeadRequest(const char* tpath, headers_t& meta, bool is_copy); - int PutRequest(const char* tpath, headers_t& meta, int fd); - int PreGetObjectRequest(const char* tpath, int fd, off_t start, ssize_t size, sse_type_t ssetype, std::string& ssevalue); - int GetObjectRequest(const char* tpath, int fd, off_t start = -1, ssize_t size = -1); - int CheckBucket(void); - int ListBucketRequest(const char* tpath, const char* query); - int PreMultipartPostRequest(const char* tpath, headers_t& meta, std::string& upload_id, bool is_copy); - int CompleteMultipartPostRequest(const char* tpath, const std::string& upload_id, etaglist_t& parts); - int UploadMultipartPostRequest(const char* tpath, int part_num, const std::string& upload_id); - int MultipartListRequest(std::string& body); - int AbortMultipartUpload(const char* tpath, const std::string& upload_id); - int MultipartHeadRequest(const char* tpath, off_t size, headers_t& meta, bool is_copy); - int MultipartUploadRequest(const char* tpath, headers_t& meta, int fd, bool is_copy); - int MultipartUploadRequest(const std::string& upload_id, const char* tpath, int fd, off_t offset, off_t size, etaglist_t& list); - int MultipartRenameRequest(const char* from, const char* to, headers_t& meta, off_t size); + // methods + bool CreateCurlHandle(bool only_pool = false, bool remake = false); + bool DestroyCurlHandle(bool restore_pool = true, bool clear_internal_data = true); - // methods(variables) - CURL* GetCurlHandle(void) const { return hCurl; } - std::string GetPath(void) const { return path; } - std::string GetBasePath(void) const { return base_path; } - std::string GetSpacialSavedPath(void) const { return saved_path; } - std::string GetUrl(void) const { return url; } - std::string GetOp(void) const { return op; } - headers_t* GetResponseHeaders(void) { return &responseHeaders; } - BodyData* GetBodyData(void) { return &bodydata; } - BodyData* GetHeadData(void) { return &headdata; } - long GetLastResponseCode(void) const { return LastResponseCode; } - bool SetUseAhbe(bool ahbe); - bool EnableUseAhbe(void) { return SetUseAhbe(true); } - bool DisableUseAhbe(void) { return SetUseAhbe(false); } - bool IsUseAhbe(void) const { return is_use_ahbe; } - int GetMultipartRetryCount(void) const { return retry_count; } - void SetMultipartRetryCount(int retrycnt) { retry_count = retrycnt; } - bool IsOverMultipartRetryCount(void) const { return (retry_count >= S3fsCurl::retries); } - int GetLastPreHeadSeecKeyPos(void) const { return b_ssekey_pos; } + bool LoadIAMRoleFromMetaData(void); + bool AddSseRequestHead(sse_type_t ssetype, std::string& ssevalue, bool is_only_c, bool is_copy); + bool GetResponseCode(long& responseCode, bool from_curl_handle = true); + int RequestPerform(bool dontAddAuthHeaders=false); + int DeleteRequest(const char* tpath); + bool PreHeadRequest(const char* tpath, const char* bpath = NULL, const char* savedpath = NULL, int ssekey_pos = -1); + bool PreHeadRequest(std::string& tpath, std::string& bpath, std::string& savedpath, int ssekey_pos = -1) { + return PreHeadRequest(tpath.c_str(), bpath.c_str(), savedpath.c_str(), ssekey_pos); + } + int HeadRequest(const char* tpath, headers_t& meta); + int PutHeadRequest(const char* tpath, headers_t& meta, bool is_copy); + int PutRequest(const char* tpath, headers_t& meta, int fd); + int PreGetObjectRequest(const char* tpath, int fd, off_t start, ssize_t size, sse_type_t ssetype, std::string& ssevalue); + int GetObjectRequest(const char* tpath, int fd, off_t start = -1, ssize_t size = -1); + int CheckBucket(void); + int ListBucketRequest(const char* tpath, const char* query); + int PreMultipartPostRequest(const char* tpath, headers_t& meta, std::string& upload_id, bool is_copy); + int CompleteMultipartPostRequest(const char* tpath, const std::string& upload_id, etaglist_t& parts); + int UploadMultipartPostRequest(const char* tpath, int part_num, const std::string& upload_id); + int MultipartListRequest(std::string& body); + int AbortMultipartUpload(const char* tpath, const std::string& upload_id); + int MultipartHeadRequest(const char* tpath, off_t size, headers_t& meta, bool is_copy); + int MultipartUploadRequest(const char* tpath, headers_t& meta, int fd, bool is_copy); + int MultipartUploadRequest(const std::string& upload_id, const char* tpath, int fd, off_t offset, off_t size, etaglist_t& list); + int MultipartRenameRequest(const char* from, const char* to, headers_t& meta, off_t size); + + // methods(variables) + CURL* GetCurlHandle(void) const { return hCurl; } + std::string GetPath(void) const { return path; } + std::string GetBasePath(void) const { return base_path; } + std::string GetSpacialSavedPath(void) const { return saved_path; } + std::string GetUrl(void) const { return url; } + std::string GetOp(void) const { return op; } + headers_t* GetResponseHeaders(void) { return &responseHeaders; } + BodyData* GetBodyData(void) { return &bodydata; } + BodyData* GetHeadData(void) { return &headdata; } + long GetLastResponseCode(void) const { return LastResponseCode; } + bool SetUseAhbe(bool ahbe); + bool EnableUseAhbe(void) { return SetUseAhbe(true); } + bool DisableUseAhbe(void) { return SetUseAhbe(false); } + bool IsUseAhbe(void) const { return is_use_ahbe; } + int GetMultipartRetryCount(void) const { return retry_count; } + void SetMultipartRetryCount(int retrycnt) { retry_count = retrycnt; } + bool IsOverMultipartRetryCount(void) const { return (retry_count >= S3fsCurl::retries); } + int GetLastPreHeadSeecKeyPos(void) const { return b_ssekey_pos; } }; -//---------------------------------------------- -// class S3fsMultiCurl -//---------------------------------------------- -// Class for lapping multi curl -// -typedef std::vector s3fscurllist_t; -typedef bool (*S3fsMultiSuccessCallback)(S3fsCurl* s3fscurl); // callback for succeed multi request -typedef S3fsCurl* (*S3fsMultiRetryCallback)(S3fsCurl* s3fscurl); // callback for failure and retrying - -class S3fsMultiCurl -{ - private: - const int maxParallelism; - - s3fscurllist_t clist_all; // all of curl requests - s3fscurllist_t clist_req; // curl requests are sent - - S3fsMultiSuccessCallback SuccessCallback; - S3fsMultiRetryCallback RetryCallback; - - pthread_mutex_t completed_tids_lock; - std::vector completed_tids; - - private: - bool ClearEx(bool is_all); - int MultiPerform(void); - int MultiRead(void); - - static void* RequestPerformWrapper(void* arg); - - public: - explicit S3fsMultiCurl(int maxParallelism); - ~S3fsMultiCurl(); - - int GetMaxParallelism() { return maxParallelism; } - - S3fsMultiSuccessCallback SetSuccessCallback(S3fsMultiSuccessCallback function); - S3fsMultiRetryCallback SetRetryCallback(S3fsMultiRetryCallback function); - bool Clear(void) { return ClearEx(true); } - bool SetS3fsCurlObject(S3fsCurl* s3fscurl); - int Request(void); -}; - -//---------------------------------------------- -// Utility Functions -//---------------------------------------------- -std::string GetContentMD5(int fd); -unsigned char* md5hexsum(int fd, off_t start, ssize_t size); -std::string md5sum(int fd, off_t start, ssize_t size); -struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* data); -struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* key, const char* value); -std::string get_sorted_header_keys(const struct curl_slist* list); -std::string get_canonical_headers(const struct curl_slist* list, bool only_amz = false); -std::string get_header_value(const struct curl_slist* list, const std::string &key); -bool MakeUrlResource(const char* realpath, std::string& resourcepath, std::string& url); -std::string prepare_url(const char* url); -bool get_object_sse_type(const char* path, sse_type_t& ssetype, std::string& ssevalue); // implement in s3fs.cpp - #endif // S3FS_CURL_H_ /* * Local variables: -* tab-width: 2 -* c-basic-offset: 2 +* tab-width: 4 +* c-basic-offset: 4 * End: -* vim600: expandtab sw=2 ts=2 fdm=marker -* vim<600: expandtab sw=2 ts=2 +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 */ diff --git a/src/curl_handlerpool.cpp b/src/curl_handlerpool.cpp new file mode 100644 index 0000000..729ec85 --- /dev/null +++ b/src/curl_handlerpool.cpp @@ -0,0 +1,129 @@ +/* + * s3fs - FUSE-based file system backed by Amazon S3 + * + * Copyright(C) 2007 Randy Rizun + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include +#include + +#include "common.h" +#include "s3fs.h" +#include "curl_handlerpool.h" +#include "autolock.h" + +using namespace std; + +//------------------------------------------------------------------- +// Class CurlHandlerPool +//------------------------------------------------------------------- +bool CurlHandlerPool::Init() +{ + pthread_mutexattr_t attr; + pthread_mutexattr_init(&attr); +#if S3FS_PTHREAD_ERRORCHECK + pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK); +#endif + if (0 != pthread_mutex_init(&mLock, &attr)) { + S3FS_PRN_ERR("Init curl handlers lock failed"); + return false; + } + + for(int cnt = 0; cnt < mMaxHandlers; ++cnt){ + CURL* hCurl = curl_easy_init(); + if(!hCurl){ + S3FS_PRN_ERR("Init curl handlers pool failed"); + Destroy(); + return false; + } + mPool.push_back(hCurl); + } + return true; +} + +bool CurlHandlerPool::Destroy() +{ + while(!mPool.empty()){ + CURL* hCurl = mPool.back(); + mPool.pop_back(); + if(hCurl){ + curl_easy_cleanup(hCurl); + } + } + if (0 != pthread_mutex_destroy(&mLock)) { + S3FS_PRN_ERR("Destroy curl handlers lock failed"); + return false; + } + return true; +} + +CURL* CurlHandlerPool::GetHandler(bool only_pool) +{ + CURL* hCurl = NULL; + { + AutoLock lock(&mLock); + + if(!mPool.empty()){ + hCurl = mPool.back(); + mPool.pop_back(); + S3FS_PRN_DBG("Get handler from pool: rest = %d", static_cast(mPool.size())); + } + } + if(only_pool){ + return hCurl; + } + if(!hCurl){ + S3FS_PRN_INFO("Pool empty: force to create new handler"); + hCurl = curl_easy_init(); + } + return hCurl; +} + +void CurlHandlerPool::ReturnHandler(CURL* hCurl, bool restore_pool) +{ + if(!hCurl){ + return; + } + + if(restore_pool){ + AutoLock lock(&mLock); + + S3FS_PRN_DBG("Return handler to pool"); + mPool.push_back(hCurl); + + while(mMaxHandlers <= static_cast(mPool.size())){ + CURL* hOldCurl = mPool.front(); + mPool.pop_front(); + if(hOldCurl){ + S3FS_PRN_INFO("Pool full: destroy the oldest handler"); + curl_easy_cleanup(hOldCurl); + } + } + }else{ + S3FS_PRN_INFO("Pool full: destroy the handler"); + curl_easy_cleanup(hCurl); + } +} + +/* +* Local variables: +* tab-width: 4 +* c-basic-offset: 4 +* End: +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 +*/ diff --git a/src/curl_handlerpool.h b/src/curl_handlerpool.h new file mode 100644 index 0000000..d0df4d5 --- /dev/null +++ b/src/curl_handlerpool.h @@ -0,0 +1,64 @@ +/* + * s3fs - FUSE-based file system backed by Amazon S3 + * + * Copyright(C) 2007 Randy Rizun + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef S3FS_CURL_HANDLERPOOL_H_ +#define S3FS_CURL_HANDLERPOOL_H_ + +#include +#include + +//---------------------------------------------- +// Typedefs +//---------------------------------------------- +typedef std::list hcurllist_t; + +//---------------------------------------------- +// class CurlHandlerPool +//---------------------------------------------- +class CurlHandlerPool +{ + public: + explicit CurlHandlerPool(int maxHandlers) : mMaxHandlers(maxHandlers) + { + assert(maxHandlers > 0); + } + + bool Init(); + bool Destroy(); + + CURL* GetHandler(bool only_pool); + void ReturnHandler(CURL* hCurl, bool restore_pool); + + private: + int mMaxHandlers; + pthread_mutex_t mLock; + hcurllist_t mPool; +}; + +#endif // S3FS_CURL_HANDLERPOOL_H_ + +/* +* Local variables: +* tab-width: 4 +* c-basic-offset: 4 +* End: +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 +*/ diff --git a/src/curl_multi.cpp b/src/curl_multi.cpp new file mode 100644 index 0000000..8a71495 --- /dev/null +++ b/src/curl_multi.cpp @@ -0,0 +1,344 @@ +/* + * s3fs - FUSE-based file system backed by Amazon S3 + * + * Copyright(C) 2007 Randy Rizun + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include +#include +#include + +#include "common.h" +#include "s3fs.h" +#include "curl_multi.h" +#include "curl.h" +#include "autolock.h" + +using namespace std; + +//------------------------------------------------------------------- +// Class S3fsMultiCurl +//------------------------------------------------------------------- +S3fsMultiCurl::S3fsMultiCurl(int maxParallelism) : maxParallelism(maxParallelism), SuccessCallback(NULL), RetryCallback(NULL) +{ + int res; + pthread_mutexattr_t attr; + pthread_mutexattr_init(&attr); +#if S3FS_PTHREAD_ERRORCHECK + pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK); +#endif + if (0 != (res = pthread_mutex_init(&completed_tids_lock, &attr))) { + S3FS_PRN_ERR("could not initialize completed_tids_lock: %i", res); + } +} + +S3fsMultiCurl::~S3fsMultiCurl() +{ + Clear(); + int res; + if(0 != (res = pthread_mutex_destroy(&completed_tids_lock))){ + S3FS_PRN_ERR("could not destroy completed_tids_lock: %i", res); + } +} + +bool S3fsMultiCurl::ClearEx(bool is_all) +{ + s3fscurllist_t::iterator iter; + for(iter = clist_req.begin(); iter != clist_req.end(); ++iter){ + S3fsCurl* s3fscurl = *iter; + if(s3fscurl){ + s3fscurl->DestroyCurlHandle(); + delete s3fscurl; // with destroy curl handle. + } + } + clist_req.clear(); + + if(is_all){ + for(iter = clist_all.begin(); iter != clist_all.end(); ++iter){ + S3fsCurl* s3fscurl = *iter; + s3fscurl->DestroyCurlHandle(); + delete s3fscurl; + } + clist_all.clear(); + } + + S3FS_MALLOCTRIM(0); + + return true; +} + +S3fsMultiSuccessCallback S3fsMultiCurl::SetSuccessCallback(S3fsMultiSuccessCallback function) +{ + S3fsMultiSuccessCallback old = SuccessCallback; + SuccessCallback = function; + return old; +} + +S3fsMultiRetryCallback S3fsMultiCurl::SetRetryCallback(S3fsMultiRetryCallback function) +{ + S3fsMultiRetryCallback old = RetryCallback; + RetryCallback = function; + return old; +} + +bool S3fsMultiCurl::SetS3fsCurlObject(S3fsCurl* s3fscurl) +{ + if(!s3fscurl){ + return false; + } + clist_all.push_back(s3fscurl); + + return true; +} + +int S3fsMultiCurl::MultiPerform() +{ + std::vector threads; + bool success = true; + bool isMultiHead = false; + Semaphore sem(GetMaxParallelism()); + int rc; + + for(s3fscurllist_t::iterator iter = clist_req.begin(); iter != clist_req.end(); ++iter) { + pthread_t thread; + S3fsCurl* s3fscurl = *iter; + if(!s3fscurl){ + continue; + } + + sem.wait(); + + { + AutoLock lock(&completed_tids_lock); + for(std::vector::iterator it = completed_tids.begin(); it != completed_tids.end(); ++it){ + void* retval; + + rc = pthread_join(*it, &retval); + if (rc) { + success = false; + S3FS_PRN_ERR("failed pthread_join - rc(%d) %s", rc, strerror(rc)); + } else { + int int_retval = (int)(intptr_t)(retval); + if (int_retval && !(int_retval == -ENOENT && isMultiHead)) { + S3FS_PRN_WARN("thread failed - rc(%d)", int_retval); + } + } + } + completed_tids.clear(); + } + s3fscurl->sem = &sem; + s3fscurl->completed_tids_lock = &completed_tids_lock; + s3fscurl->completed_tids = &completed_tids; + + isMultiHead |= s3fscurl->GetOp() == "HEAD"; + + rc = pthread_create(&thread, NULL, S3fsMultiCurl::RequestPerformWrapper, static_cast(s3fscurl)); + if (rc != 0) { + success = false; + S3FS_PRN_ERR("failed pthread_create - rc(%d)", rc); + break; + } + threads.push_back(thread); + } + + for(int i = 0; i < sem.get_value(); ++i){ + sem.wait(); + } + + AutoLock lock(&completed_tids_lock); + for (std::vector::iterator titer = completed_tids.begin(); titer != completed_tids.end(); ++titer) { + void* retval; + + rc = pthread_join(*titer, &retval); + if (rc) { + success = false; + S3FS_PRN_ERR("failed pthread_join - rc(%d)", rc); + } else { + int int_retval = (int)(intptr_t)(retval); + if (int_retval && !(int_retval == -ENOENT && isMultiHead)) { + S3FS_PRN_WARN("thread failed - rc(%d)", int_retval); + } + } + } + completed_tids.clear(); + + return success ? 0 : -EIO; +} + +int S3fsMultiCurl::MultiRead() +{ + int result = 0; + + for(s3fscurllist_t::iterator iter = clist_req.begin(); iter != clist_req.end(); ){ + S3fsCurl* s3fscurl = *iter; + + bool isRetry = false; + bool isPostpone = false; + long responseCode = S3fsCurl::S3FSCURL_RESPONSECODE_NOTSET; + if(s3fscurl->GetResponseCode(responseCode, false)){ + if(S3fsCurl::S3FSCURL_RESPONSECODE_NOTSET == responseCode){ + // This is a case where the processing result has not yet been updated (should be very rare). + isPostpone = true; + }else if(400 > responseCode){ + // add into stat cache + if(SuccessCallback && !SuccessCallback(s3fscurl)){ + S3FS_PRN_WARN("error from callback function(%s).", s3fscurl->url.c_str()); + } + }else if(400 == responseCode){ + // as possibly in multipart + S3FS_PRN_WARN("failed a request(%ld: %s)", responseCode, s3fscurl->url.c_str()); + isRetry = true; + }else if(404 == responseCode){ + // not found + // HEAD requests on readdir_multi_head can return 404 + if(s3fscurl->GetOp() != "HEAD"){ + S3FS_PRN_WARN("failed a request(%ld: %s)", responseCode, s3fscurl->url.c_str()); + } + }else if(500 == responseCode){ + // case of all other result, do retry.(11/13/2013) + // because it was found that s3fs got 500 error from S3, but could success + // to retry it. + S3FS_PRN_WARN("failed a request(%ld: %s)", responseCode, s3fscurl->url.c_str()); + isRetry = true; + }else{ + // Retry in other case. + S3FS_PRN_WARN("failed a request(%ld: %s)", responseCode, s3fscurl->url.c_str()); + isRetry = true; + } + }else{ + S3FS_PRN_ERR("failed a request(Unknown response code: %s)", s3fscurl->url.c_str()); + } + + if(isPostpone){ + clist_req.erase(iter); + clist_req.push_back(s3fscurl); // Re-evaluate at the end + iter = clist_req.begin(); + }else{ + if(!isRetry || 0 != result){ + // If an EIO error has already occurred, it will be terminated + // immediately even if retry processing is required. + s3fscurl->DestroyCurlHandle(); + delete s3fscurl; + }else{ + S3fsCurl* retrycurl = NULL; + + // For retry + if(RetryCallback){ + retrycurl = RetryCallback(s3fscurl); + if(NULL != retrycurl){ + clist_all.push_back(retrycurl); + }else{ + // set EIO and wait for other parts. + result = -EIO; + } + } + if(s3fscurl != retrycurl){ + s3fscurl->DestroyCurlHandle(); + delete s3fscurl; + } + } + iter = clist_req.erase(iter); + } + } + clist_req.clear(); + + if(0 != result){ + // If an EIO error has already occurred, clear all retry objects. + for(s3fscurllist_t::iterator iter = clist_all.begin(); iter != clist_all.end(); ++iter){ + S3fsCurl* s3fscurl = *iter; + s3fscurl->DestroyCurlHandle(); + delete s3fscurl; + } + clist_all.clear(); + } + return result; +} + +int S3fsMultiCurl::Request() +{ + S3FS_PRN_INFO3("[count=%zu]", clist_all.size()); + + // Make request list. + // + // Send multi request loop( with retry ) + // (When many request is sends, sometimes gets "Couldn't connect to server") + // + while(!clist_all.empty()){ + // set curl handle to multi handle + int result; + s3fscurllist_t::iterator iter; + for(iter = clist_all.begin(); iter != clist_all.end(); ++iter){ + S3fsCurl* s3fscurl = *iter; + clist_req.push_back(s3fscurl); + } + clist_all.clear(); + + // Send multi request. + if(0 != (result = MultiPerform())){ + Clear(); + return result; + } + + // Read the result + if(0 != (result = MultiRead())){ + Clear(); + return result; + } + + // Cleanup curl handle in multi handle + ClearEx(false); + } + return 0; +} + +// +// thread function for performing an S3fsCurl request +// +void* S3fsMultiCurl::RequestPerformWrapper(void* arg) +{ + S3fsCurl* s3fscurl= static_cast(arg); + void* result = NULL; + if(!s3fscurl){ + return (void*)(intptr_t)(-EIO); + } + if(s3fscurl->fpLazySetup){ + if(!s3fscurl->fpLazySetup(s3fscurl)){ + S3FS_PRN_ERR("Failed to lazy setup, then respond EIO."); + result = (void*)(intptr_t)(-EIO); + } + } + + if(!result){ + result = (void*)(intptr_t)(s3fscurl->RequestPerform()); + s3fscurl->DestroyCurlHandle(true, false); + } + + AutoLock lock(s3fscurl->completed_tids_lock); + s3fscurl->completed_tids->push_back(pthread_self()); + s3fscurl->sem->post(); + + return result; +} + +/* +* Local variables: +* tab-width: 4 +* c-basic-offset: 4 +* End: +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 +*/ diff --git a/src/curl_multi.h b/src/curl_multi.h new file mode 100644 index 0000000..aaf6904 --- /dev/null +++ b/src/curl_multi.h @@ -0,0 +1,79 @@ +/* + * s3fs - FUSE-based file system backed by Amazon S3 + * + * Copyright(C) 2007 Randy Rizun + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef S3FS_CURL_MULTI_H_ +#define S3FS_CURL_MULTI_H_ + +//---------------------------------------------- +// Typedef +//---------------------------------------------- +class S3fsCurl; + +typedef std::vector s3fscurllist_t; +typedef bool (*S3fsMultiSuccessCallback)(S3fsCurl* s3fscurl); // callback for succeed multi request +typedef S3fsCurl* (*S3fsMultiRetryCallback)(S3fsCurl* s3fscurl); // callback for failure and retrying + +//---------------------------------------------- +// class S3fsMultiCurl +//---------------------------------------------- +class S3fsMultiCurl +{ + private: + const int maxParallelism; + + s3fscurllist_t clist_all; // all of curl requests + s3fscurllist_t clist_req; // curl requests are sent + + S3fsMultiSuccessCallback SuccessCallback; + S3fsMultiRetryCallback RetryCallback; + + pthread_mutex_t completed_tids_lock; + std::vector completed_tids; + + private: + bool ClearEx(bool is_all); + int MultiPerform(void); + int MultiRead(void); + + static void* RequestPerformWrapper(void* arg); + + public: + explicit S3fsMultiCurl(int maxParallelism); + ~S3fsMultiCurl(); + + int GetMaxParallelism() { return maxParallelism; } + + S3fsMultiSuccessCallback SetSuccessCallback(S3fsMultiSuccessCallback function); + S3fsMultiRetryCallback SetRetryCallback(S3fsMultiRetryCallback function); + bool Clear(void) { return ClearEx(true); } + bool SetS3fsCurlObject(S3fsCurl* s3fscurl); + int Request(void); +}; + +#endif // S3FS_CURL_MULTI_H_ + +/* +* Local variables: +* tab-width: 4 +* c-basic-offset: 4 +* End: +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 +*/ diff --git a/src/curl_util.cpp b/src/curl_util.cpp new file mode 100644 index 0000000..3ce656f --- /dev/null +++ b/src/curl_util.cpp @@ -0,0 +1,397 @@ +/* + * s3fs - FUSE-based file system backed by Amazon S3 + * + * Copyright(C) 2007 Randy Rizun + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include +#include +#include + +#include "common.h" +#include "s3fs.h" +#include "curl_util.h" +#include "string_util.h" +#include "s3fs_auth.h" + +using namespace std; + +//------------------------------------------------------------------- +// Utility Functions +//------------------------------------------------------------------- +// +// curl_slist_sort_insert +// This function is like curl_slist_append function, but this adds data by a-sorting. +// Because AWS signature needs sorted header. +// +struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* data) +{ + if(!data){ + return list; + } + string strkey = data; + string strval; + + string::size_type pos = strkey.find(':', 0); + if(string::npos != pos){ + strval = strkey.substr(pos + 1); + strkey = strkey.substr(0, pos); + } + + return curl_slist_sort_insert(list, strkey.c_str(), strval.c_str()); +} + +struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* key, const char* value) +{ + struct curl_slist* curpos; + struct curl_slist* lastpos; + struct curl_slist* new_item; + + if(!key){ + return list; + } + if(NULL == (new_item = reinterpret_cast(malloc(sizeof(struct curl_slist))))){ + return list; + } + + // key & value are trimmed and lower (only key) + string strkey = trim(string(key)); + string strval = trim(string(value ? value : "")); + string strnew = key + string(": ") + strval; + if(NULL == (new_item->data = strdup(strnew.c_str()))){ + free(new_item); + return list; + } + new_item->next = NULL; + + for(lastpos = NULL, curpos = list; curpos; lastpos = curpos, curpos = curpos->next){ + string strcur = curpos->data; + size_t pos; + if(string::npos != (pos = strcur.find(':', 0))){ + strcur = strcur.substr(0, pos); + } + + int result = strcasecmp(strkey.c_str(), strcur.c_str()); + if(0 == result){ + // same data, so replace it. + if(lastpos){ + lastpos->next = new_item; + }else{ + list = new_item; + } + new_item->next = curpos->next; + free(curpos->data); + free(curpos); + break; + + }else if(0 > result){ + // add data before curpos. + if(lastpos){ + lastpos->next = new_item; + }else{ + list = new_item; + } + new_item->next = curpos; + break; + } + } + + if(!curpos){ + // append to last pos + if(lastpos){ + lastpos->next = new_item; + }else{ + // a case of list is null + list = new_item; + } + } + return list; +} + +string get_sorted_header_keys(const struct curl_slist* list) +{ + string sorted_headers; + + if(!list){ + return sorted_headers; + } + + for( ; list; list = list->next){ + string strkey = list->data; + size_t pos; + if(string::npos != (pos = strkey.find(':', 0))){ + if (trim(strkey.substr(pos + 1)).empty()) { + // skip empty-value headers (as they are discarded by libcurl) + continue; + } + strkey = strkey.substr(0, pos); + } + if(0 < sorted_headers.length()){ + sorted_headers += ";"; + } + sorted_headers += lower(strkey); + } + + return sorted_headers; +} + +string get_header_value(const struct curl_slist* list, const string &key) +{ + if(!list){ + return ""; + } + + for( ; list; list = list->next){ + string strkey = list->data; + size_t pos; + if(string::npos != (pos = strkey.find(':', 0))){ + if(0 == strcasecmp(trim(strkey.substr(0, pos)).c_str(), key.c_str())){ + return trim(strkey.substr(pos+1)); + } + } + } + + return ""; +} + +string get_canonical_headers(const struct curl_slist* list) +{ + string canonical_headers; + + if(!list){ + canonical_headers = "\n"; + return canonical_headers; + } + + for( ; list; list = list->next){ + string strhead = list->data; + size_t pos; + if(string::npos != (pos = strhead.find(':', 0))){ + string strkey = trim(lower(strhead.substr(0, pos))); + string strval = trim(strhead.substr(pos + 1)); + if (strval.empty()) { + // skip empty-value headers (as they are discarded by libcurl) + continue; + } + strhead = strkey.append(":").append(strval); + }else{ + strhead = trim(lower(strhead)); + } + canonical_headers += strhead; + canonical_headers += "\n"; + } + return canonical_headers; +} + +string get_canonical_headers(const struct curl_slist* list, bool only_amz) +{ + string canonical_headers; + + if(!list){ + canonical_headers = "\n"; + return canonical_headers; + } + + for( ; list; list = list->next){ + string strhead = list->data; + size_t pos; + if(string::npos != (pos = strhead.find(':', 0))){ + string strkey = trim(lower(strhead.substr(0, pos))); + string strval = trim(strhead.substr(pos + 1)); + if (strval.empty()) { + // skip empty-value headers (as they are discarded by libcurl) + continue; + } + strhead = strkey.append(":").append(strval); + }else{ + strhead = trim(lower(strhead)); + } + if(only_amz && strhead.substr(0, 5) != "x-amz"){ + continue; + } + canonical_headers += strhead; + canonical_headers += "\n"; + } + return canonical_headers; +} + +// function for using global values +bool MakeUrlResource(const char* realpath, string& resourcepath, string& url) +{ + if(!realpath){ + return false; + } + resourcepath = urlEncode(service_path + bucket + realpath); + url = s3host + resourcepath; + return true; +} + +string prepare_url(const char* url) +{ + S3FS_PRN_INFO3("URL is %s", url); + + string uri; + string hostname; + string path; + string url_str = string(url); + string token = string("/") + bucket; + int bucket_pos; + int bucket_length = token.size(); + int uri_length = 0; + + if(!strncasecmp(url_str.c_str(), "https://", 8)){ + uri_length = 8; + } else if(!strncasecmp(url_str.c_str(), "http://", 7)) { + uri_length = 7; + } + uri = url_str.substr(0, uri_length); + bucket_pos = url_str.find(token, uri_length); + + if(!pathrequeststyle){ + hostname = bucket + "." + url_str.substr(uri_length, bucket_pos - uri_length); + path = url_str.substr((bucket_pos + bucket_length)); + }else{ + hostname = url_str.substr(uri_length, bucket_pos - uri_length); + string part = url_str.substr((bucket_pos + bucket_length)); + if('/' != part[0]){ + part = "/" + part; + } + path = "/" + bucket + part; + } + + url_str = uri + hostname + path; + + S3FS_PRN_INFO3("URL changed is %s", url_str.c_str()); + + return url_str; +} + +// [TODO] +// This function uses temporary file, but should not use it. +// For not using it, we implement function in each auth file(openssl, nss. gnutls). +// +bool make_md5_from_binary(const char* pstr, size_t length, string& md5) +{ + if(!pstr || '\0' == pstr[0]){ + S3FS_PRN_ERR("Parameter is wrong."); + return false; + } + FILE* fp; + if(NULL == (fp = tmpfile())){ + S3FS_PRN_ERR("Could not make tmpfile."); + return false; + } + if(length != fwrite(pstr, sizeof(char), length, fp)){ + S3FS_PRN_ERR("Failed to write tmpfile."); + fclose(fp); + return false; + } + int fd; + if(0 != fflush(fp) || 0 != fseek(fp, 0L, SEEK_SET) || -1 == (fd = fileno(fp))){ + S3FS_PRN_ERR("Failed to make MD5."); + fclose(fp); + return false; + } + // base64 md5 + md5 = s3fs_get_content_md5(fd); + if(0 == md5.length()){ + S3FS_PRN_ERR("Failed to make MD5."); + fclose(fp); + return false; + } + fclose(fp); + return true; +} + +string url_to_host(const string &url) +{ + S3FS_PRN_INFO3("url is %s", url.c_str()); + + static const string http = "http://"; + static const string https = "https://"; + std::string hostname; + + if (url.compare(0, http.size(), http) == 0) { + hostname = url.substr(http.size()); + } else if (url.compare(0, https.size(), https) == 0) { + hostname = url.substr(https.size()); + } else { + S3FS_PRN_EXIT("url does not begin with http:// or https://"); + abort(); + } + + size_t idx; + if ((idx = hostname.find('/')) != string::npos) { + return hostname.substr(0, idx); + } else { + return hostname; + } +} + +string get_bucket_host() +{ + if(!pathrequeststyle){ + return bucket + "." + url_to_host(s3host); + } + return url_to_host(s3host); +} + +const char* getCurlDebugHead(curl_infotype type) +{ + const char* unknown = ""; + const char* dataIn = "BODY <"; + const char* dataOut = "BODY >"; + const char* headIn = "<"; + const char* headOut = ">"; + + switch(type){ + case CURLINFO_DATA_IN: + return dataIn; + case CURLINFO_DATA_OUT: + return dataOut; + case CURLINFO_HEADER_IN: + return headIn; + case CURLINFO_HEADER_OUT: + return headOut; + default: + break; + } + return unknown; +} + +// +// compare ETag ignoring quotes and case +// +bool etag_equals(string s1, string s2) +{ + if(s1.length() > 1 && s1[0] == '\"' && s1[s1.length() - 1] == '\"'){ + s1 = s1.substr(1, s1.size() - 2); + } + if(s2.length() > 1 && s2[0] == '\"' && s2[s2.length() - 1] == '\"'){ + s2 = s2.substr(1, s2.size() - 2); + } + return 0 == strcasecmp(s1.c_str(), s2.c_str()); +} + +/* +* Local variables: +* tab-width: 4 +* c-basic-offset: 4 +* End: +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 +*/ diff --git a/src/curl_util.h b/src/curl_util.h new file mode 100644 index 0000000..82989dc --- /dev/null +++ b/src/curl_util.h @@ -0,0 +1,57 @@ +/* + * s3fs - FUSE-based file system backed by Amazon S3 + * + * Copyright(C) 2007 Randy Rizun + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef S3FS_CURL_UTIL_H_ +#define S3FS_CURL_UTIL_H_ + +#include + +//---------------------------------------------- +// Functions +//---------------------------------------------- +std::string GetContentMD5(int fd); +unsigned char* md5hexsum(int fd, off_t start, ssize_t size); +std::string md5sum(int fd, off_t start, ssize_t size); +struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* data); +struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* key, const char* value); +std::string get_sorted_header_keys(const struct curl_slist* list); +std::string get_canonical_headers(const struct curl_slist* list, bool only_amz = false); +std::string get_header_value(const struct curl_slist* list, const std::string &key); +bool MakeUrlResource(const char* realpath, std::string& resourcepath, std::string& url); +std::string prepare_url(const char* url); +bool get_object_sse_type(const char* path, sse_type_t& ssetype, std::string& ssevalue); // implement in s3fs.cpp + +bool make_md5_from_binary(const char* pstr, size_t length, std::string& md5); +std::string url_to_host(const std::string &url); +std::string get_bucket_host(void); +const char* getCurlDebugHead(curl_infotype type); + +bool etag_equals(std::string s1, std::string s2); + +#endif // S3FS_CURL_UTIL_H_ + +/* +* Local variables: +* tab-width: 4 +* c-basic-offset: 4 +* End: +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 +*/ diff --git a/src/fdcache.cpp b/src/fdcache.cpp index 0f60779..1628050 100644 --- a/src/fdcache.cpp +++ b/src/fdcache.cpp @@ -20,52 +20,23 @@ #include #include -#include -#include -#include -#include -#include -#include -#include -#include #include -#include +#include +#include #include -#include -#include -#include -#include -#include -#include -#include -#include #include "common.h" #include "s3fs.h" +#include "fdcache.h" #include "s3fs_util.h" #include "string_util.h" -#include "curl.h" -#include "fdcache.h" +#include "autolock.h" using namespace std; //------------------------------------------------ // Symbols //------------------------------------------------ -static const int MAX_MULTIPART_CNT = 10 * 1000; // S3 multipart max count -static const int CHECK_CACHEFILE_PART_SIZE = 1024 * 16; // Buffer size in PageList::CheckZeroAreaInFile() - -// -// [NOTE] -// If the following symbols in lseek whence are undefined, define them. -// If it is not supported by lseek, s3fs judges by the processing result of lseek. -// -#ifndef SEEK_DATA -#define SEEK_DATA 3 -#endif -#ifndef SEEK_HOLE -#define SEEK_HOLE 4 -#endif #define TMPFILE_FOR_CHECK_HOLE "/tmp/.s3fs_hole_check.tmp" // @@ -99,2612 +70,6 @@ static const int CHECK_CACHEFILE_PART_SIZE = 1024 * 16; // Buffer size in PageLi #define CACHEDBG_FMT_CRIT_HEAD2 " -> [C] " #define CACHEDBG_FMT_PROB_BLOCK " 0x%016zx(0x%016zx bytes)" -//------------------------------------------------ -// CacheFileStat class methods -//------------------------------------------------ -string CacheFileStat::GetCacheFileStatTopDir() -{ - string top_path(""); - if(!FdManager::IsCacheDir() || bucket.empty()){ - return top_path; - } - - // stat top dir( "//..stat" ) - top_path += FdManager::GetCacheDir(); - top_path += "/."; - top_path += bucket; - top_path += ".stat"; - return top_path; -} - -bool CacheFileStat::MakeCacheFileStatPath(const char* path, string& sfile_path, bool is_create_dir) -{ - string top_path = CacheFileStat::GetCacheFileStatTopDir(); - if(top_path.empty()){ - S3FS_PRN_ERR("The path to cache top dir is empty."); - return false; - } - - if(is_create_dir){ - int result; - if(0 != (result = mkdirp(top_path + mydirname(path), 0777))){ - S3FS_PRN_ERR("failed to create dir(%s) by errno(%d).", path, result); - return false; - } - } - if(!path || '\0' == path[0]){ - sfile_path = top_path; - }else{ - sfile_path = top_path + SAFESTRPTR(path); - } - return true; -} - -bool CacheFileStat::CheckCacheFileStatTopDir() -{ - string top_path = CacheFileStat::GetCacheFileStatTopDir(); - if(top_path.empty()){ - S3FS_PRN_INFO("The path to cache top dir is empty, thus not need to check permission."); - return true; - } - - return check_exist_dir_permission(top_path.c_str()); -} - -bool CacheFileStat::DeleteCacheFileStat(const char* path) -{ - if(!path || '\0' == path[0]){ - return false; - } - // stat path - string sfile_path; - if(!CacheFileStat::MakeCacheFileStatPath(path, sfile_path, false)){ - S3FS_PRN_ERR("failed to create cache stat file path(%s)", path); - return false; - } - if(0 != unlink(sfile_path.c_str())){ - if(ENOENT == errno){ - S3FS_PRN_DBG("failed to delete file(%s): errno=%d", path, errno); - }else{ - S3FS_PRN_ERR("failed to delete file(%s): errno=%d", path, errno); - } - return false; - } - return true; -} - -// [NOTE] -// If remove stat file directory, it should do before removing -// file cache directory. -// -bool CacheFileStat::DeleteCacheFileStatDirectory() -{ - string top_path = CacheFileStat::GetCacheFileStatTopDir(); - if(top_path.empty()){ - S3FS_PRN_INFO("The path to cache top dir is empty, thus not need to remove it."); - return true; - } - return delete_files_in_dir(top_path.c_str(), true); -} - -bool CacheFileStat::RenameCacheFileStat(const char* oldpath, const char* newpath) -{ - if(!oldpath || '\0' == oldpath[0] || !newpath || '\0' == newpath[0]){ - return false; - } - - // stat path - string old_filestat; - string new_filestat; - if(!CacheFileStat::MakeCacheFileStatPath(oldpath, old_filestat, false) || !CacheFileStat::MakeCacheFileStatPath(newpath, new_filestat, false)){ - return false; - } - - // check new stat path - struct stat st; - if(0 == stat(new_filestat.c_str(), &st)){ - // new stat path is existed, then unlink it. - if(-1 == unlink(new_filestat.c_str())){ - S3FS_PRN_ERR("failed to unlink new cache file stat path(%s) by errno(%d).", new_filestat.c_str(), errno); - return false; - } - } - - // check old stat path - if(0 != stat(old_filestat.c_str(), &st)){ - // old stat path is not existed, then nothing to do any more. - return true; - } - - // link and unlink - if(-1 == link(old_filestat.c_str(), new_filestat.c_str())){ - S3FS_PRN_ERR("failed to link old cache file stat path(%s) to new cache file stat path(%s) by errno(%d).", old_filestat.c_str(), new_filestat.c_str(), errno); - return false; - } - if(-1 == unlink(old_filestat.c_str())){ - S3FS_PRN_ERR("failed to unlink old cache file stat path(%s) by errno(%d).", old_filestat.c_str(), errno); - return false; - } - - return true; -} - -//------------------------------------------------ -// CacheFileStat methods -//------------------------------------------------ -CacheFileStat::CacheFileStat(const char* tpath) : path(""), fd(-1) -{ - if(tpath && '\0' != tpath[0]){ - SetPath(tpath, true); - } -} - -CacheFileStat::~CacheFileStat() -{ - Release(); -} - -bool CacheFileStat::SetPath(const char* tpath, bool is_open) -{ - if(!tpath || '\0' == tpath[0]){ - return false; - } - if(!Release()){ - // could not close old stat file. - return false; - } - path = tpath; - if(!is_open){ - return true; - } - return Open(); -} - -bool CacheFileStat::RawOpen(bool readonly) -{ - if(path.empty()){ - return false; - } - if(-1 != fd){ - // already opened - return true; - } - // stat path - string sfile_path; - if(!CacheFileStat::MakeCacheFileStatPath(path.c_str(), sfile_path, true)){ - S3FS_PRN_ERR("failed to create cache stat file path(%s)", path.c_str()); - return false; - } - // open - if(readonly){ - if(-1 == (fd = open(sfile_path.c_str(), O_RDONLY))){ - S3FS_PRN_ERR("failed to read only open cache stat file path(%s) - errno(%d)", path.c_str(), errno); - return false; - } - }else{ - if(-1 == (fd = open(sfile_path.c_str(), O_CREAT|O_RDWR, 0600))){ - S3FS_PRN_ERR("failed to open cache stat file path(%s) - errno(%d)", path.c_str(), errno); - return false; - } - } - // lock - if(-1 == flock(fd, LOCK_EX)){ - S3FS_PRN_ERR("failed to lock cache stat file(%s) - errno(%d)", path.c_str(), errno); - close(fd); - fd = -1; - return false; - } - // seek top - if(0 != lseek(fd, 0, SEEK_SET)){ - S3FS_PRN_ERR("failed to lseek cache stat file(%s) - errno(%d)", path.c_str(), errno); - flock(fd, LOCK_UN); - close(fd); - fd = -1; - return false; - } - S3FS_PRN_DBG("file locked(%s - %s)", path.c_str(), sfile_path.c_str()); - - return true; -} - -bool CacheFileStat::Open() -{ - return RawOpen(false); -} - -bool CacheFileStat::ReadOnlyOpen() -{ - return RawOpen(true); -} - -bool CacheFileStat::Release() -{ - if(-1 == fd){ - // already release - return true; - } - // unlock - if(-1 == flock(fd, LOCK_UN)){ - S3FS_PRN_ERR("failed to unlock cache stat file(%s) - errno(%d)", path.c_str(), errno); - return false; - } - S3FS_PRN_DBG("file unlocked(%s)", path.c_str()); - - if(-1 == close(fd)){ - S3FS_PRN_ERR("failed to close cache stat file(%s) - errno(%d)", path.c_str(), errno); - return false; - } - fd = -1; - - return true; -} - -//------------------------------------------------ -// fdpage_list_t utility -//------------------------------------------------ -// Inline function for repeated processing -inline void raw_add_compress_fdpage_list(fdpage_list_t& pagelist, fdpage& page, bool ignore_load, bool ignore_modify, bool default_load, bool default_modify) -{ - if(0 < page.bytes){ - // [NOTE] - // The page variable is subject to change here. - // - if(ignore_load){ - page.loaded = default_load; - } - if(ignore_modify){ - page.modified = default_modify; - } - pagelist.push_back(page); - } -} - -// Compress the page list -// -// ignore_load: Ignore the flag of loaded member and compress -// ignore_modify: Ignore the flag of modified member and compress -// default_load: loaded flag value in the list after compression when ignore_load=true -// default_modify: modified flag value in the list after compression when default_modify=true -// -// NOTE: ignore_modify and ignore_load cannot both be true. -// -static fdpage_list_t raw_compress_fdpage_list(const fdpage_list_t& pages, bool ignore_load, bool ignore_modify, bool default_load, bool default_modify) -{ - fdpage_list_t compressed_pages; - fdpage tmppage; - bool is_first = true; - for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){ - if(!is_first){ - if( (!ignore_load && (tmppage.loaded != iter->loaded )) || - (!ignore_modify && (tmppage.modified != iter->modified)) ) - { - // Different from the previous area, add it to list - raw_add_compress_fdpage_list(compressed_pages, tmppage, ignore_load, ignore_modify, default_load, default_modify); - - // keep current area - tmppage = fdpage(iter->offset, iter->bytes, (ignore_load ? default_load : iter->loaded), (ignore_modify ? default_modify : iter->modified)); - }else{ - // Same as the previous area - if(tmppage.next() != iter->offset){ - // These are not contiguous areas, add it to list - raw_add_compress_fdpage_list(compressed_pages, tmppage, ignore_load, ignore_modify, default_load, default_modify); - - // keep current area - tmppage = fdpage(iter->offset, iter->bytes, (ignore_load ? default_load : iter->loaded), (ignore_modify ? default_modify : iter->modified)); - }else{ - // These are contiguous areas - - // add current area - tmppage.bytes += iter->bytes; - } - } - }else{ - // first erea - is_first = false; - - // keep current area - tmppage = fdpage(iter->offset, iter->bytes, (ignore_load ? default_load : iter->loaded), (ignore_modify ? default_modify : iter->modified)); - } - } - // add lastest area - if(!is_first){ - raw_add_compress_fdpage_list(compressed_pages, tmppage, ignore_load, ignore_modify, default_load, default_modify); - } - return compressed_pages; -} - -static fdpage_list_t compress_fdpage_list_ignore_modify(const fdpage_list_t& pages, bool default_modify) -{ - return raw_compress_fdpage_list(pages, /* ignore_load= */ false, /* ignore_modify= */ true, /* default_load= */false, /* default_modify= */default_modify); -} - -static fdpage_list_t compress_fdpage_list_ignore_load(const fdpage_list_t& pages, bool default_load) -{ - return raw_compress_fdpage_list(pages, /* ignore_load= */ true, /* ignore_modify= */ false, /* default_load= */default_load, /* default_modify= */false); -} - -static fdpage_list_t compress_fdpage_list(const fdpage_list_t& pages) -{ - return raw_compress_fdpage_list(pages, /* ignore_load= */ false, /* ignore_modify= */ false, /* default_load= */false, /* default_modify= */false); -} - -static fdpage_list_t parse_partsize_fdpage_list(const fdpage_list_t& pages, off_t max_partsize) -{ - fdpage_list_t parsed_pages; - for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){ - if(iter->modified){ - // modified page - fdpage tmppage = *iter; - for(off_t start = iter->offset, rest_bytes = iter->bytes; 0 < rest_bytes; ){ - if((max_partsize * 2) < rest_bytes){ - // do parse - tmppage.offset = start; - tmppage.bytes = max_partsize; - parsed_pages.push_back(tmppage); - - start += max_partsize; - rest_bytes -= max_partsize; - }else{ - // Since the number of remaining bytes is less than twice max_partsize, - // one of the divided areas will be smaller than max_partsize. - // Therefore, this area at the end should not be divided. - tmppage.offset = start; - tmppage.bytes = rest_bytes; - parsed_pages.push_back(tmppage); - - start += rest_bytes; - rest_bytes = 0; - } - } - }else{ - // not modified page is not parsed - parsed_pages.push_back(*iter); - } - } - return parsed_pages; -} - -//------------------------------------------------ -// PageList class methods -//------------------------------------------------ -// -// Examine and return the status of each block in the file. -// -// Assuming the file is a sparse file, check the HOLE and DATA areas -// and return it in fdpage_list_t. The loaded flag of each fdpage is -// set to false for HOLE blocks and true for DATA blocks. -// -bool PageList::GetSparseFilePages(int fd, size_t file_size, fdpage_list_t& sparse_list) -{ - // [NOTE] - // Express the status of the cache file using fdpage_list_t. - // There is a hole in the cache file(sparse file), and the - // state of this hole is expressed by the "loaded" member of - // struct fdpage. (the "modified" member is not used) - // - if(0 == file_size){ - // file is empty - return true; - } - - bool is_hole = false; - int hole_pos = lseek(fd, 0, SEEK_HOLE); - int data_pos = lseek(fd, 0, SEEK_DATA); - if(-1 == hole_pos && -1 == data_pos){ - S3FS_PRN_ERR("Could not find the first position both HOLE and DATA in the file(fd=%d).", fd); - return false; - }else if(-1 == hole_pos){ - is_hole = false; - }else if(-1 == data_pos){ - is_hole = true; - }else if(hole_pos < data_pos){ - is_hole = true; - }else{ - is_hole = false; - } - - for(int cur_pos = 0, next_pos = 0; 0 <= cur_pos; cur_pos = next_pos, is_hole = !is_hole){ - fdpage page; - page.offset = cur_pos; - page.loaded = !is_hole; - page.modified = false; - - next_pos = lseek(fd, cur_pos, (is_hole ? SEEK_DATA : SEEK_HOLE)); - if(-1 == next_pos){ - page.bytes = static_cast(file_size - cur_pos); - }else{ - page.bytes = next_pos - cur_pos; - } - sparse_list.push_back(page); - } - return true; -} - -// -// Confirm that the specified area is ZERO -// -bool PageList::CheckZeroAreaInFile(int fd, off_t start, size_t bytes) -{ - char* readbuff = new char[CHECK_CACHEFILE_PART_SIZE]; - - for(size_t comp_bytes = 0, check_bytes = 0; comp_bytes < bytes; comp_bytes += check_bytes){ - if(CHECK_CACHEFILE_PART_SIZE < (bytes - comp_bytes)){ - check_bytes = CHECK_CACHEFILE_PART_SIZE; - }else{ - check_bytes = bytes - comp_bytes; - } - bool found_bad_data = false; - ssize_t read_bytes; - if(-1 == (read_bytes = pread(fd, readbuff, check_bytes, (start + comp_bytes)))){ - S3FS_PRN_ERR("Something error is occurred in reading %zu bytes at %lld from file(%d).", check_bytes, static_cast(start + comp_bytes), fd); - found_bad_data = true; - }else{ - check_bytes = static_cast(read_bytes); - for(size_t tmppos = 0; tmppos < check_bytes; ++tmppos){ - if('\0' != readbuff[tmppos]){ - // found not ZERO data. - found_bad_data = true; - break; - } - } - } - if(found_bad_data){ - delete[] readbuff; - return false; - } - } - delete[] readbuff; - return true; -} - -// -// Checks that the specified area matches the state of the sparse file. -// -// [Parameters] -// checkpage: This is one state of the cache file, it is loaded from the stats file. -// sparse_list: This is a list of the results of directly checking the cache file status(HOLE/DATA). -// In the HOLE area, the "loaded" flag of fdpage is false. The DATA area has it set to true. -// fd: opened file discriptor to target cache file. -// -bool PageList::CheckAreaInSparseFile(const struct fdpage& checkpage, const fdpage_list_t& sparse_list, int fd, fdpage_list_t& err_area_list, fdpage_list_t& warn_area_list) -{ - // Check the block status of a part(Check Area: checkpage) of the target file. - // The elements of sparse_list have 5 patterns that overlap this block area. - // - // File |<---...--------------------------------------...--->| - // Check Area (offset)<-------------------->(offset + bytes - 1) - // Area case(0) <-------> - // Area case(1) <-------> - // Area case(2) <--------> - // Area case(3) <----------> - // Area case(4) <-----------> - // Area case(5) <-----------------------------> - // - bool result = true; - - for(fdpage_list_t::const_iterator iter = sparse_list.begin(); iter != sparse_list.end(); ++iter){ - off_t check_start = 0; - off_t check_bytes = 0; - if((iter->offset + iter->bytes) <= checkpage.offset){ - // case 0 - continue; // next - - }else if((checkpage.offset + checkpage.bytes) <= iter->offset){ - // case 1 - break; // finish - - }else if(iter->offset < checkpage.offset && (iter->offset + iter->bytes) < (checkpage.offset + checkpage.bytes)){ - // case 2 - check_start = checkpage.offset; - check_bytes = iter->bytes - (checkpage.offset - iter->offset); - - }else if(iter->offset < (checkpage.offset + checkpage.bytes) && (checkpage.offset + checkpage.bytes) < (iter->offset + iter->bytes)){ - // case 3 - check_start = iter->offset; - check_bytes = checkpage.bytes - (iter->offset - checkpage.offset); - - }else if(checkpage.offset < iter->offset && (iter->offset + iter->bytes) < (checkpage.offset + checkpage.bytes)){ - // case 4 - check_start = iter->offset; - check_bytes = iter->bytes; - - }else{ // (iter->offset <= checkpage.offset && (checkpage.offset + checkpage.bytes) <= (iter->offset + iter->bytes)) - // case 5 - check_start = checkpage.offset; - check_bytes = checkpage.bytes; - } - - // check target area type - if(checkpage.loaded || checkpage.modified){ - // target area must be not HOLE(DATA) area. - if(!iter->loaded){ - // Found bad area, it is HOLE area. - fdpage page(check_start, check_bytes, false, false); - err_area_list.push_back(page); - result = false; - } - }else{ - // target area should be HOLE area.(If it is not a block boundary, it may be a DATA area.) - if(iter->loaded){ - // need to check this area's each data, it should be ZERO. - if(!PageList::CheckZeroAreaInFile(fd, check_start, static_cast(check_bytes))){ - // Discovered an area that has un-initial status data but it probably does not effect bad. - fdpage page(check_start, check_bytes, true, false); - warn_area_list.push_back(page); - result = false; - } - } - } - } - return result; -} - -//------------------------------------------------ -// PageList methods -//------------------------------------------------ -void PageList::FreeList(fdpage_list_t& list) -{ - list.clear(); -} - -PageList::PageList(off_t size, bool is_loaded, bool is_modified) -{ - Init(size, is_loaded, is_modified); -} - -PageList::PageList(const PageList& other) -{ - for(fdpage_list_t::const_iterator iter = other.pages.begin(); iter != other.pages.end(); ++iter){ - pages.push_back(*iter); - } -} - -PageList::~PageList() -{ - Clear(); -} - -void PageList::Clear() -{ - PageList::FreeList(pages); -} - -bool PageList::Init(off_t size, bool is_loaded, bool is_modified) -{ - Clear(); - if(0 < size){ - fdpage page(0, size, is_loaded, is_modified); - pages.push_back(page); - } - return true; -} - -off_t PageList::Size() const -{ - if(pages.empty()){ - return 0; - } - fdpage_list_t::const_reverse_iterator riter = pages.rbegin(); - return riter->next(); -} - -bool PageList::Compress() -{ - pages = compress_fdpage_list(pages); - return true; -} - -bool PageList::Parse(off_t new_pos) -{ - for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ++iter){ - if(new_pos == iter->offset){ - // nothing to do - return true; - }else if(iter->offset < new_pos && new_pos < iter->next()){ - fdpage page(iter->offset, new_pos - iter->offset, iter->loaded, iter->modified); - iter->bytes -= (new_pos - iter->offset); - iter->offset = new_pos; - pages.insert(iter, page); - return true; - } - } - return false; -} - -bool PageList::Resize(off_t size, bool is_loaded, bool is_modified) -{ - off_t total = Size(); - - if(0 == total){ - Init(size, is_loaded, is_modified); - - }else if(total < size){ - // add new area - fdpage page(total, (size - total), is_loaded, is_modified); - pages.push_back(page); - - }else if(size < total){ - // cut area - for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ){ - if(iter->next() <= size){ - ++iter; - }else{ - if(size <= iter->offset){ - iter = pages.erase(iter); - }else{ - iter->bytes = size - iter->offset; - } - } - } - }else{ // total == size - // nothing to do - } - // compress area - return Compress(); -} - -bool PageList::IsPageLoaded(off_t start, off_t size) const -{ - for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){ - if(iter->end() < start){ - continue; - } - if(!iter->loaded){ - return false; - } - if(0 != size && start + size <= iter->next()){ - break; - } - } - return true; -} - -bool PageList::SetPageLoadedStatus(off_t start, off_t size, PageList::page_status pstatus, bool is_compress) -{ - off_t now_size = Size(); - bool is_loaded = (PAGE_LOAD_MODIFIED == pstatus || PAGE_LOADED == pstatus); - bool is_modified = (PAGE_LOAD_MODIFIED == pstatus || PAGE_MODIFIED == pstatus); - - if(now_size <= start){ - if(now_size < start){ - // add - Resize(start, false, is_modified); // set modified flag from now end pos to specified start pos. - } - Resize(start + size, is_loaded, is_modified); - - }else if(now_size <= start + size){ - // cut - Resize(start, false, false); // not changed loaded/modified flags in existing area. - // add - Resize(start + size, is_loaded, is_modified); - - }else{ - // start-size are inner pages area - // parse "start", and "start + size" position - Parse(start); - Parse(start + size); - - // set loaded flag - for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ++iter){ - if(iter->end() < start){ - continue; - }else if(start + size <= iter->offset){ - break; - }else{ - iter->loaded = is_loaded; - iter->modified = is_modified; - } - } - } - // compress area - return (is_compress ? Compress() : true); -} - -bool PageList::FindUnloadedPage(off_t start, off_t& resstart, off_t& ressize) const -{ - for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){ - if(start <= iter->end()){ - if(!iter->loaded && !iter->modified){ // Do not load unloaded and modified areas - resstart = iter->offset; - ressize = iter->bytes; - return true; - } - } - } - return false; -} - -off_t PageList::GetTotalUnloadedPageSize(off_t start, off_t size) const -{ - off_t restsize = 0; - off_t next = start + size; - for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){ - if(iter->next() <= start){ - continue; - } - if(next <= iter->offset){ - break; - } - if(iter->loaded || iter->modified){ - continue; - } - off_t tmpsize; - if(iter->offset <= start){ - if(iter->next() <= next){ - tmpsize = (iter->next() - start); - }else{ - tmpsize = next - start; // = size - } - }else{ - if(iter->next() <= next){ - tmpsize = iter->next() - iter->offset; // = iter->bytes - }else{ - tmpsize = next - iter->offset; - } - } - restsize += tmpsize; - } - return restsize; -} - -int PageList::GetUnloadedPages(fdpage_list_t& unloaded_list, off_t start, off_t size) const -{ - // If size is 0, it means loading to end. - if(0 == size){ - if(start < Size()){ - size = Size() - start; - } - } - off_t next = start + size; - - for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){ - if(iter->next() <= start){ - continue; - } - if(next <= iter->offset){ - break; - } - if(iter->loaded || iter->modified){ - continue; // already loaded or modified - } - - // page area - off_t page_start = max(iter->offset, start); - off_t page_next = min(iter->next(), next); - off_t page_size = page_next - page_start; - - // add list - fdpage_list_t::reverse_iterator riter = unloaded_list.rbegin(); - if(riter != unloaded_list.rend() && riter->next() == page_start){ - // merge to before page - riter->bytes += page_size; - }else{ - fdpage page(page_start, page_size, false, false); - unloaded_list.push_back(page); - } - } - return unloaded_list.size(); -} - -// [NOTE] -// This method is called in advance when mixing POST and COPY in multi-part upload. -// The minimum size of each part must be 5 MB, and the data area below this must be -// downloaded from S3. -// This method checks the current PageList status and returns the area that needs -// to be downloaded so that each part is at least 5 MB. -// -bool PageList::GetPageListsForMultipartUpload(fdpage_list_t& dlpages, fdpage_list_t& mixuppages, off_t max_partsize) -{ - // compress before this processing - if(!Compress()){ - return false; - } - - // make a list by modified flag - fdpage_list_t modified_pages = compress_fdpage_list_ignore_load(pages, false); - fdpage_list_t download_pages; // A non-contiguous page list showing the areas that need to be downloaded - fdpage_list_t mixupload_pages; // A continuous page list showing only modified flags for mixupload - fdpage prev_page; - for(fdpage_list_t::const_iterator iter = modified_pages.begin(); iter != modified_pages.end(); ++iter){ - if(iter->modified){ - // current is modified area - if(!prev_page.modified){ - // previous is not modified area - if(prev_page.bytes < static_cast(MIN_MULTIPART_SIZE)){ - // previous(not modified) area is too small for one multipart size, - // then all of previous area is needed to download. - download_pages.push_back(prev_page); - - // previous(not modified) area is set upload area. - prev_page.modified = true; - mixupload_pages.push_back(prev_page); - }else{ - // previous(not modified) area is set copy area. - prev_page.modified = false; - mixupload_pages.push_back(prev_page); - } - // set current to previous - prev_page = *iter; - }else{ - // previous is modified area, too - prev_page.bytes += iter->bytes; - } - - }else{ - // current is not modified area - if(!prev_page.modified){ - // previous is not modified area, too - prev_page.bytes += iter->bytes; - - }else{ - // previous is modified area - if(prev_page.bytes < static_cast(MIN_MULTIPART_SIZE)){ - // previous(modified) area is too small for one multipart size, - // then part or all of current area is needed to download. - off_t missing_bytes = static_cast(MIN_MULTIPART_SIZE) - prev_page.bytes; - - if((missing_bytes + static_cast(MIN_MULTIPART_SIZE)) < iter-> bytes){ - // The current size is larger than the missing size, and the remainder - // after deducting the missing size is larger than the minimum size. - - fdpage missing_page(iter->offset, missing_bytes, false, false); - download_pages.push_back(missing_page); - - // previous(not modified) area is set upload area. - prev_page.bytes = static_cast(MIN_MULTIPART_SIZE); - mixupload_pages.push_back(prev_page); - - // set current to previous - prev_page = *iter; - prev_page.offset += missing_bytes; - prev_page.bytes -= missing_bytes; - - }else{ - // The current size is less than the missing size, or the remaining - // size less the missing size is less than the minimum size. - download_pages.push_back(*iter); - - // add current to previous - prev_page.bytes += iter->bytes; - } - - }else{ - // previous(modified) area is enough size for one multipart size. - mixupload_pages.push_back(prev_page); - - // set current to previous - prev_page = *iter; - } - } - } - } - // lastest area - if(0 < prev_page.bytes){ - mixupload_pages.push_back(prev_page); - } - - // compress - dlpages = compress_fdpage_list_ignore_modify(download_pages, false); - mixuppages = compress_fdpage_list_ignore_load(mixupload_pages, false); - - // parse by max pagesize - dlpages = parse_partsize_fdpage_list(dlpages, max_partsize); - mixuppages = parse_partsize_fdpage_list(mixuppages, max_partsize); - - return true; -} - -bool PageList::IsModified() const -{ - for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){ - if(iter->modified){ - return true; - } - } - return false; -} - -bool PageList::ClearAllModified() -{ - for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ++iter){ - if(iter->modified){ - iter->modified = false; - } - } - return Compress(); -} - -bool PageList::Serialize(CacheFileStat& file, bool is_output, ino_t inode) -{ - if(!file.Open()){ - return false; - } - if(is_output){ - // - // put to file - // - ostringstream ssall; - ssall << inode << ":" << Size(); - - for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ++iter){ - ssall << "\n" << iter->offset << ":" << iter->bytes << ":" << (iter->loaded ? "1" : "0") << ":" << (iter->modified ? "1" : "0"); - } - - if(-1 == ftruncate(file.GetFd(), 0)){ - S3FS_PRN_ERR("failed to truncate file(to 0) for stats(%d)", errno); - return false; - } - string strall = ssall.str(); - if(0 >= pwrite(file.GetFd(), strall.c_str(), strall.length(), 0)){ - S3FS_PRN_ERR("failed to write stats(%d)", errno); - return false; - } - - }else{ - // - // loading from file - // - struct stat st; - memset(&st, 0, sizeof(struct stat)); - if(-1 == fstat(file.GetFd(), &st)){ - S3FS_PRN_ERR("fstat is failed. errno(%d)", errno); - return false; - } - if(0 >= st.st_size){ - // nothing - Init(0, false, false); - return true; - } - char* ptmp = new char[st.st_size + 1]; - ptmp[st.st_size] = '\0'; - // read from file - if(0 >= pread(file.GetFd(), ptmp, st.st_size, 0)){ - S3FS_PRN_ERR("failed to read stats(%d)", errno); - delete[] ptmp; - return false; - } - string oneline; - istringstream ssall(ptmp); - - // loaded - Clear(); - - // load head line(for size and inode) - off_t total; - ino_t cache_inode; // if this value is 0, it means old format. - if(!getline(ssall, oneline, '\n')){ - S3FS_PRN_ERR("failed to parse stats."); - delete[] ptmp; - return false; - }else{ - istringstream sshead(oneline); - string strhead1; - string strhead2; - - // get first part in head line. - if(!getline(sshead, strhead1, ':')){ - S3FS_PRN_ERR("failed to parse stats."); - delete[] ptmp; - return false; - } - // get second part in head line. - if(!getline(sshead, strhead2, ':')){ - // old head format is "\n" - total = cvt_strtoofft(strhead1.c_str(), /* base= */10); - cache_inode = 0; - }else{ - // current head format is ":\n" - total = cvt_strtoofft(strhead2.c_str(), /* base= */10); - cache_inode = static_cast(cvt_strtoofft(strhead1.c_str(), /* base= */10)); - if(0 == cache_inode){ - S3FS_PRN_ERR("wrong inode number in parsed cache stats."); - delete[] ptmp; - return false; - } - } - } - // check inode number - if(0 != cache_inode && cache_inode != inode){ - S3FS_PRN_ERR("differ inode and inode number in parsed cache stats."); - delete[] ptmp; - return false; - } - - // load each part - bool is_err = false; - while(getline(ssall, oneline, '\n')){ - string part; - istringstream ssparts(oneline); - // offset - if(!getline(ssparts, part, ':')){ - is_err = true; - break; - } - off_t offset = cvt_strtoofft(part.c_str(), /* base= */10); - // size - if(!getline(ssparts, part, ':')){ - is_err = true; - break; - } - off_t size = cvt_strtoofft(part.c_str(), /* base= */10); - // loaded - if(!getline(ssparts, part, ':')){ - is_err = true; - break; - } - bool is_loaded = (1 == cvt_strtoofft(part.c_str(), /* base= */10) ? true : false); - bool is_modified; - if(!getline(ssparts, part, ':')){ - is_modified = false; // old version does not have this part. - }else{ - is_modified = (1 == cvt_strtoofft(part.c_str(), /* base= */10) ? true : false); - } - // add new area - PageList::page_status pstatus = - ( is_loaded && is_modified ? PageList::PAGE_LOAD_MODIFIED : - !is_loaded && is_modified ? PageList::PAGE_MODIFIED : - is_loaded && !is_modified ? PageList::PAGE_LOADED : PageList::PAGE_NOT_LOAD_MODIFIED ); - - SetPageLoadedStatus(offset, size, pstatus); - } - delete[] ptmp; - if(is_err){ - S3FS_PRN_ERR("failed to parse stats."); - Clear(); - return false; - } - - // check size - if(total != Size()){ - S3FS_PRN_ERR("different size(%lld - %lld).", static_cast(total), static_cast(Size())); - Clear(); - return false; - } - } - return true; -} - -void PageList::Dump() const -{ - int cnt = 0; - - S3FS_PRN_DBG("pages = {"); - for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter, ++cnt){ - S3FS_PRN_DBG(" [%08d] -> {%014lld - %014lld : %s / %s}", cnt, static_cast(iter->offset), static_cast(iter->bytes), iter->loaded ? "loaded" : "unloaded", iter->modified ? "modified" : "not modified"); - } - S3FS_PRN_DBG("}"); -} - -// -// Compare the fdpage_list_t pages of the object with the state of the file. -// -// The loaded=true or modified=true area of pages must be a DATA block -// (not a HOLE block) in the file. -// The other area is a HOLE block in the file or is a DATA block(but the -// data of the target area in that block should be ZERO). -// If it is a bad area in the previous case, it will be reported as an error. -// If the latter case does not match, it will be reported as a warning. -// -bool PageList::CompareSparseFile(int fd, size_t file_size, fdpage_list_t& err_area_list, fdpage_list_t& warn_area_list) -{ - err_area_list.clear(); - warn_area_list.clear(); - - // First, list the block disk allocation area of the cache file. - // The cache file has holes(sparse file) and no disk block areas - // are assigned to any holes. - fdpage_list_t sparse_list; - if(!PageList::GetSparseFilePages(fd, file_size, sparse_list)){ - S3FS_PRN_ERR("Something error is occurred in parsing hole/data of the cache file(%d).", fd); - - fdpage page(0, static_cast(file_size), false, false); - err_area_list.push_back(page); - - return false; - } - - if(sparse_list.empty() && pages.empty()){ - // both file and stats information are empty, it means cache file size is ZERO. - return true; - } - - // Compare each pages and sparse_list - bool result = true; - for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){ - if(!PageList::CheckAreaInSparseFile(*iter, sparse_list, fd, err_area_list, warn_area_list)){ - result = false; - } - } - return result; -} - -//------------------------------------------------ -// FdEntity class methods -//------------------------------------------------ -bool FdEntity::mixmultipart = true; - -bool FdEntity::SetNoMixMultipart() -{ - bool old = mixmultipart; - mixmultipart = false; - return old; -} - -int FdEntity::FillFile(int fd, unsigned char byte, off_t size, off_t start) -{ - unsigned char bytes[1024 * 32]; // 32kb - memset(bytes, byte, min(static_cast(sizeof(bytes)), size)); - - for(off_t total = 0, onewrote = 0; total < size; total += onewrote){ - if(-1 == (onewrote = pwrite(fd, bytes, min(static_cast(sizeof(bytes)), size - total), start + total))){ - S3FS_PRN_ERR("pwrite failed. errno(%d)", errno); - return -errno; - } - } - return 0; -} - -// [NOTE] -// If fd is wrong or something error is occurred, return 0. -// The ino_t is allowed zero, but inode 0 is not realistic. -// So this method returns 0 on error assuming the correct -// inode is never 0. -// The caller must have exclusive control. -// -ino_t FdEntity::GetInode(int fd) -{ - if(-1 == fd){ - S3FS_PRN_ERR("file descriptor is wrong."); - return 0; - } - - struct stat st; - if(0 != fstat(fd, &st)){ - S3FS_PRN_ERR("could not get stat for file descriptor(%d) by errno(%d).", fd, errno); - return 0; - } - return st.st_ino; -} - -//------------------------------------------------ -// FdEntity methods -//------------------------------------------------ -FdEntity::FdEntity(const char* tpath, const char* cpath) - : is_lock_init(false), refcnt(0), path(SAFESTRPTR(tpath)), - fd(-1), pfile(NULL), inode(0), size_orgmeta(0), upload_id(""), mp_start(0), mp_size(0), - cachepath(SAFESTRPTR(cpath)), mirrorpath("") -{ - pthread_mutexattr_t attr; - pthread_mutexattr_init(&attr); -#if S3FS_PTHREAD_ERRORCHECK - pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK); -#endif - int res; - if(0 != (res = pthread_mutex_init(&fdent_lock, &attr))){ - S3FS_PRN_CRIT("failed to init fdent_lock: %d", res); - abort(); - } - if(0 != (res = pthread_mutex_init(&fdent_data_lock, &attr))){ - S3FS_PRN_CRIT("failed to init fdent_data_lock: %d", res); - abort(); - } - is_lock_init = true; -} - -FdEntity::~FdEntity() -{ - Clear(); - - if(is_lock_init){ - int res; - if(0 != (res = pthread_mutex_destroy(&fdent_data_lock))){ - S3FS_PRN_CRIT("failed to destroy fdent_data_lock: %d", res); - abort(); - } - if(0 != (res = pthread_mutex_destroy(&fdent_lock))){ - S3FS_PRN_CRIT("failed to destroy fdent_lock: %d", res); - abort(); - } - is_lock_init = false; - } -} - -void FdEntity::Clear() -{ - AutoLock auto_lock(&fdent_lock); - AutoLock auto_data_lock(&fdent_data_lock); - - if(-1 != fd){ - if(!cachepath.empty()){ - // [NOTE] - // Compare the inode of the existing cache file with the inode of - // the cache file output by this object, and if they are the same, - // serialize the pagelist. - // - ino_t cur_inode = GetInode(); - if(0 != cur_inode && cur_inode == inode){ - CacheFileStat cfstat(path.c_str()); - if(!pagelist.Serialize(cfstat, true, inode)){ - S3FS_PRN_WARN("failed to save cache stat file(%s).", path.c_str()); - } - } - } - if(pfile){ - fclose(pfile); - pfile = NULL; - } - fd = -1; - inode = 0; - - if(!mirrorpath.empty()){ - if(-1 == unlink(mirrorpath.c_str())){ - S3FS_PRN_WARN("failed to remove mirror cache file(%s) by errno(%d).", mirrorpath.c_str(), errno); - } - mirrorpath.erase(); - } - } - pagelist.Init(0, false, false); - refcnt = 0; - path = ""; - cachepath = ""; -} - -// [NOTE] -// This method returns the inode of the file in cachepath. -// The return value is the same as the class method GetInode(). -// The caller must have exclusive control. -// -ino_t FdEntity::GetInode() -{ - if(cachepath.empty()){ - S3FS_PRN_INFO("cache file path is empty, then return inode as 0."); - return 0; - } - - struct stat st; - if(0 != stat(cachepath.c_str(), &st)){ - S3FS_PRN_INFO("could not get stat for file(%s) by errno(%d).", cachepath.c_str(), errno); - return 0; - } - return st.st_ino; -} - -void FdEntity::Close() -{ - AutoLock auto_lock(&fdent_lock); - - S3FS_PRN_DBG("[path=%s][fd=%d][refcnt=%d]", path.c_str(), fd, (-1 != fd ? refcnt - 1 : refcnt)); - - if(-1 != fd){ - - if(0 < refcnt){ - refcnt--; - }else{ - S3FS_PRN_EXIT("reference count underflow"); - abort(); - } - if(0 == refcnt){ - AutoLock auto_data_lock(&fdent_data_lock); - if(!cachepath.empty()){ - // [NOTE] - // Compare the inode of the existing cache file with the inode of - // the cache file output by this object, and if they are the same, - // serialize the pagelist. - // - ino_t cur_inode = GetInode(); - if(0 != cur_inode && cur_inode == inode){ - CacheFileStat cfstat(path.c_str()); - if(!pagelist.Serialize(cfstat, true, inode)){ - S3FS_PRN_WARN("failed to save cache stat file(%s).", path.c_str()); - } - } - } - if(pfile){ - fclose(pfile); - pfile = NULL; - } - fd = -1; - inode = 0; - - if(!mirrorpath.empty()){ - if(-1 == unlink(mirrorpath.c_str())){ - S3FS_PRN_WARN("failed to remove mirror cache file(%s) by errno(%d).", mirrorpath.c_str(), errno); - } - mirrorpath.erase(); - } - } - } -} - -int FdEntity::Dup(bool lock_already_held) -{ - AutoLock auto_lock(&fdent_lock, lock_already_held ? AutoLock::ALREADY_LOCKED : AutoLock::NONE); - - S3FS_PRN_DBG("[path=%s][fd=%d][refcnt=%d]", path.c_str(), fd, (-1 != fd ? refcnt + 1 : refcnt)); - - if(-1 != fd){ - refcnt++; - } - return fd; -} - -// -// Open mirror file which is linked cache file. -// -int FdEntity::OpenMirrorFile() -{ - if(cachepath.empty()){ - S3FS_PRN_ERR("cache path is empty, why come here"); - return -EIO; - } - - // make temporary directory - string bupdir; - if(!FdManager::MakeCachePath(NULL, bupdir, true, true)){ - S3FS_PRN_ERR("could not make bup cache directory path or create it."); - return -EIO; - } - - // create seed generating mirror file name - unsigned int seed = static_cast(time(NULL)); - int urandom_fd; - if(-1 != (urandom_fd = open("/dev/urandom", O_RDONLY))){ - unsigned int rand_data; - if(sizeof(rand_data) == read(urandom_fd, &rand_data, sizeof(rand_data))){ - seed ^= rand_data; - } - close(urandom_fd); - } - - // try to link mirror file - while(true){ - // make random(temp) file path - // (do not care for threading, because allowed any value returned.) - // - char szfile[NAME_MAX + 1]; - sprintf(szfile, "%x.tmp", rand_r(&seed)); - mirrorpath = bupdir + "/" + szfile; - - // link mirror file to cache file - if(0 == link(cachepath.c_str(), mirrorpath.c_str())){ - break; - } - if(EEXIST != errno){ - S3FS_PRN_ERR("could not link mirror file(%s) to cache file(%s) by errno(%d).", mirrorpath.c_str(), cachepath.c_str(), errno); - return -errno; - } - ++seed; - } - - // open mirror file - int mirrorfd; - if(-1 == (mirrorfd = open(mirrorpath.c_str(), O_RDWR))){ - S3FS_PRN_ERR("could not open mirror file(%s) by errno(%d).", mirrorpath.c_str(), errno); - return -errno; - } - return mirrorfd; -} - -int FdEntity::Open(headers_t* pmeta, off_t size, time_t time, bool no_fd_lock_wait) -{ - AutoLock auto_lock(&fdent_lock, no_fd_lock_wait ? AutoLock::NO_WAIT : AutoLock::NONE); - - S3FS_PRN_DBG("[path=%s][fd=%d][size=%lld][time=%lld]", path.c_str(), fd, static_cast(size), static_cast(time)); - - if (!auto_lock.isLockAcquired()) { - // had to wait for fd lock, return - S3FS_PRN_ERR("Could not get lock."); - return -EIO; - } - - AutoLock auto_data_lock(&fdent_data_lock); - if(-1 != fd){ - // already opened, needs to increment refcnt. - Dup(/*lock_already_held=*/ true); - - // check only file size(do not need to save cfs and time. - if(0 <= size && pagelist.Size() != size){ - // truncate temporary file size - if(-1 == ftruncate(fd, size)){ - S3FS_PRN_ERR("failed to truncate temporary file(%d) by errno(%d).", fd, errno); - if(0 < refcnt){ - refcnt--; - } - return -EIO; - } - // resize page list - if(!pagelist.Resize(size, false, true)){ // Areas with increased size are modified - S3FS_PRN_ERR("failed to truncate temporary file information(%d).", fd); - if(0 < refcnt){ - refcnt--; - } - return -EIO; - } - } - // set original headers and set size. - off_t new_size = (0 <= size ? size : size_orgmeta); - if(pmeta){ - orgmeta = *pmeta; - new_size = get_size(orgmeta); - } - if(new_size < size_orgmeta){ - size_orgmeta = new_size; - } - return 0; - } - - bool need_save_csf = false; // need to save(reset) cache stat file - bool is_truncate = false; // need to truncate - - if(!cachepath.empty()){ - // using cache - - struct stat st; - if(stat(cachepath.c_str(), &st) == 0){ - if(st.st_mtime < time){ - S3FS_PRN_DBG("cache file stale, removing: %s", cachepath.c_str()); - if(unlink(cachepath.c_str()) != 0){ - return (0 == errno ? -EIO : -errno); - } - } - } - - // open cache and cache stat file, load page info. - CacheFileStat cfstat(path.c_str()); - - // try to open cache file - if( -1 != (fd = open(cachepath.c_str(), O_RDWR)) && - 0 != (inode = FdEntity::GetInode(fd)) && - pagelist.Serialize(cfstat, false, inode) ) - { - // succeed to open cache file and to load stats data - memset(&st, 0, sizeof(struct stat)); - if(-1 == fstat(fd, &st)){ - S3FS_PRN_ERR("fstat is failed. errno(%d)", errno); - fd = -1; - inode = 0; - return (0 == errno ? -EIO : -errno); - } - // check size, st_size, loading stat file - if(-1 == size){ - if(st.st_size != pagelist.Size()){ - pagelist.Resize(st.st_size, false, true); // Areas with increased size are modified - need_save_csf = true; // need to update page info - } - size = st.st_size; - }else{ - if(size != pagelist.Size()){ - pagelist.Resize(size, false, true); // Areas with increased size are modified - need_save_csf = true; // need to update page info - } - if(size != st.st_size){ - is_truncate = true; - } - } - - }else{ - if(-1 != fd){ - close(fd); - } - inode = 0; - - // could not open cache file or could not load stats data, so initialize it. - if(-1 == (fd = open(cachepath.c_str(), O_CREAT|O_RDWR|O_TRUNC, 0600))){ - S3FS_PRN_ERR("failed to open file(%s). errno(%d)", cachepath.c_str(), errno); - - // remove cache stat file if it is existed - if(!CacheFileStat::DeleteCacheFileStat(path.c_str())){ - if(ENOENT != errno){ - S3FS_PRN_WARN("failed to delete current cache stat file(%s) by errno(%d), but continue...", path.c_str(), errno); - } - } - return (0 == errno ? -EIO : -errno); - } - need_save_csf = true; // need to update page info - inode = FdEntity::GetInode(fd); - if(-1 == size){ - size = 0; - pagelist.Init(0, false, false); - }else{ - // [NOTE] - // The modify flag must not be set when opening a file, - // if the time parameter(mtime) is specified(not -1) and - // the cache file does not exist. - // If mtime is specified for the file and the cache file - // mtime is older than it, the cache file is removed and - // the processing comes here. - // - pagelist.Resize(size, false, (0 <= time ? false : true)); - - is_truncate = true; - } - } - - // open mirror file - int mirrorfd; - if(0 >= (mirrorfd = OpenMirrorFile())){ - S3FS_PRN_ERR("failed to open mirror file linked cache file(%s).", cachepath.c_str()); - return (0 == mirrorfd ? -EIO : mirrorfd); - } - // switch fd - close(fd); - fd = mirrorfd; - - // make file pointer(for being same tmpfile) - if(NULL == (pfile = fdopen(fd, "wb"))){ - S3FS_PRN_ERR("failed to get fileno(%s). errno(%d)", cachepath.c_str(), errno); - close(fd); - fd = -1; - inode = 0; - return (0 == errno ? -EIO : -errno); - } - - }else{ - // not using cache - inode = 0; - - // open temporary file - if(NULL == (pfile = tmpfile()) || -1 ==(fd = fileno(pfile))){ - S3FS_PRN_ERR("failed to open tmp file. err(%d)", errno); - if(pfile){ - fclose(pfile); - pfile = NULL; - } - return (0 == errno ? -EIO : -errno); - } - if(-1 == size){ - size = 0; - pagelist.Init(0, false, false); - }else{ - // [NOTE] - // The modify flag must not be set when opening a file, - // if the time parameter(mtime) is specified(not -1) and - // the cache file does not exist. - // If mtime is specified for the file and the cache file - // mtime is older than it, the cache file is removed and - // the processing comes here. - // - pagelist.Resize(size, false, (0 <= time ? false : true)); - is_truncate = true; - } - } - - // truncate cache(tmp) file - if(is_truncate){ - if(0 != ftruncate(fd, size) || 0 != fsync(fd)){ - S3FS_PRN_ERR("ftruncate(%s) or fsync returned err(%d)", cachepath.c_str(), errno); - fclose(pfile); - pfile = NULL; - fd = -1; - inode = 0; - return (0 == errno ? -EIO : -errno); - } - } - - // reset cache stat file - if(need_save_csf){ - CacheFileStat cfstat(path.c_str()); - if(!pagelist.Serialize(cfstat, true, inode)){ - S3FS_PRN_WARN("failed to save cache stat file(%s), but continue...", path.c_str()); - } - } - - // init internal data - refcnt = 1; - - // set original headers and size in it. - if(pmeta){ - orgmeta = *pmeta; - size_orgmeta = get_size(orgmeta); - }else{ - orgmeta.clear(); - size_orgmeta = 0; - } - - // set mtime(set "x-amz-meta-mtime" in orgmeta) - if(-1 != time){ - if(0 != SetMtime(time, /*lock_already_held=*/ true)){ - S3FS_PRN_ERR("failed to set mtime. errno(%d)", errno); - fclose(pfile); - pfile = NULL; - fd = -1; - inode = 0; - return (0 == errno ? -EIO : -errno); - } - } - - return 0; -} - -// [NOTE] -// This method is called from only nocopyapi functions. -// So we do not check disk space for this option mode, if there is no enough -// disk space this method will be failed. -// -bool FdEntity::OpenAndLoadAll(headers_t* pmeta, off_t* size, bool force_load) -{ - AutoLock auto_lock(&fdent_lock); - int result; - - S3FS_PRN_INFO3("[path=%s][fd=%d]", path.c_str(), fd); - - if(-1 == fd){ - if(0 != Open(pmeta)){ - return false; - } - } - AutoLock auto_data_lock(&fdent_data_lock); - - if(force_load){ - SetAllStatusUnloaded(); - } - // - // TODO: possibly do background for delay loading - // - if(0 != (result = Load(/*start=*/ 0, /*size=*/ 0, /*lock_already_held=*/ true))){ - S3FS_PRN_ERR("could not download, result(%d)", result); - return false; - } - if(size){ - *size = pagelist.Size(); - } - return true; -} - -// -// Rename file path. -// -// This method sets the FdManager::fent map registration key to fentmapkey. -// -// [NOTE] -// This method changes the file path of FdEntity. -// Old file is deleted after linking to the new file path, and this works -// without problem because the file descriptor is not affected even if the -// cache file is open. -// The mirror file descriptor is also the same. The mirror file path does -// not need to be changed and will remain as it is. -// -bool FdEntity::RenamePath(const string& newpath, string& fentmapkey) -{ - if(!cachepath.empty()){ - // has cache path - - // make new cache path - string newcachepath; - if(!FdManager::MakeCachePath(newpath.c_str(), newcachepath, true)){ - S3FS_PRN_ERR("failed to make cache path for object(%s).", newpath.c_str()); - return false; - } - - // rename cache file - if(-1 == rename(cachepath.c_str(), newcachepath.c_str())){ - S3FS_PRN_ERR("failed to rename old cache path(%s) to new cache path(%s) by errno(%d).", cachepath.c_str(), newcachepath.c_str(), errno); - return false; - } - - // link and unlink cache file stat - if(!CacheFileStat::RenameCacheFileStat(path.c_str(), newpath.c_str())){ - S3FS_PRN_ERR("failed to rename cache file stat(%s to %s).", path.c_str(), newpath.c_str()); - return false; - } - fentmapkey = newpath; - cachepath = newcachepath; - - }else{ - // does not have cache path - fentmapkey.erase(); - FdManager::MakeRandomTempPath(newpath.c_str(), fentmapkey); - } - // set new path - path = newpath; - - return true; -} - -bool FdEntity::IsModified(void) const -{ - AutoLock auto_data_lock(const_cast(&fdent_data_lock)); - return pagelist.IsModified(); -} - -bool FdEntity::GetStats(struct stat& st, bool lock_already_held) -{ - AutoLock auto_lock(&fdent_lock, lock_already_held ? AutoLock::ALREADY_LOCKED : AutoLock::NONE); - if(-1 == fd){ - return false; - } - - memset(&st, 0, sizeof(struct stat)); - if(-1 == fstat(fd, &st)){ - S3FS_PRN_ERR("fstat failed. errno(%d)", errno); - return false; - } - return true; -} - -int FdEntity::SetCtime(time_t time, bool lock_already_held) -{ - AutoLock auto_lock(&fdent_lock, lock_already_held ? AutoLock::ALREADY_LOCKED : AutoLock::NONE); - - S3FS_PRN_INFO3("[path=%s][fd=%d][time=%lld]", path.c_str(), fd, static_cast(time)); - - if(-1 == time){ - return 0; - } - orgmeta["x-amz-meta-ctime"] = str(time); - return 0; -} - -int FdEntity::SetMtime(time_t time, bool lock_already_held) -{ - AutoLock auto_lock(&fdent_lock, lock_already_held ? AutoLock::ALREADY_LOCKED : AutoLock::NONE); - - S3FS_PRN_INFO3("[path=%s][fd=%d][time=%lld]", path.c_str(), fd, static_cast(time)); - - if(-1 == time){ - return 0; - } - - if(-1 != fd){ - struct timeval tv[2]; - tv[0].tv_sec = time; - tv[0].tv_usec= 0L; - tv[1].tv_sec = tv[0].tv_sec; - tv[1].tv_usec= 0L; - if(-1 == futimes(fd, tv)){ - S3FS_PRN_ERR("futimes failed. errno(%d)", errno); - return -errno; - } - }else if(!cachepath.empty()){ - // not opened file yet. - struct utimbuf n_mtime; - n_mtime.modtime = time; - n_mtime.actime = time; - if(-1 == utime(cachepath.c_str(), &n_mtime)){ - S3FS_PRN_ERR("utime failed. errno(%d)", errno); - return -errno; - } - } - orgmeta["x-amz-meta-ctime"] = str(time); - orgmeta["x-amz-meta-mtime"] = str(time); - - return 0; -} - -bool FdEntity::UpdateCtime() -{ - AutoLock auto_lock(&fdent_lock); - struct stat st; - if(!GetStats(st, /*lock_already_held=*/ true)){ - return false; - } - orgmeta["x-amz-meta-ctime"] = str(st.st_ctime); - return true; -} - -bool FdEntity::UpdateMtime() -{ - AutoLock auto_lock(&fdent_lock); - struct stat st; - if(!GetStats(st, /*lock_already_held=*/ true)){ - return false; - } - orgmeta["x-amz-meta-ctime"] = str(st.st_ctime); - orgmeta["x-amz-meta-mtime"] = str(st.st_mtime); - return true; -} - -bool FdEntity::GetSize(off_t& size) -{ - AutoLock auto_lock(&fdent_lock); - if(-1 == fd){ - return false; - } - - AutoLock auto_data_lock(&fdent_data_lock); - size = pagelist.Size(); - return true; -} - -bool FdEntity::GetXattr(string& xattr) -{ - AutoLock auto_lock(&fdent_lock); - - headers_t::const_iterator iter = orgmeta.find("x-amz-meta-xattr"); - if(iter == orgmeta.end()){ - return false; - } - xattr = iter->second; - return true; -} - -bool FdEntity::SetXattr(const std::string& xattr) -{ - AutoLock auto_lock(&fdent_lock); - orgmeta["x-amz-meta-xattr"] = xattr; - return true; -} - -bool FdEntity::SetMode(mode_t mode) -{ - AutoLock auto_lock(&fdent_lock); - orgmeta["x-amz-meta-mode"] = str(mode); - return true; -} - -bool FdEntity::SetUId(uid_t uid) -{ - AutoLock auto_lock(&fdent_lock); - orgmeta["x-amz-meta-uid"] = str(uid); - return true; -} - -bool FdEntity::SetGId(gid_t gid) -{ - AutoLock auto_lock(&fdent_lock); - orgmeta["x-amz-meta-gid"] = str(gid); - return true; -} - -bool FdEntity::SetContentType(const char* path) -{ - if(!path){ - return false; - } - AutoLock auto_lock(&fdent_lock); - orgmeta["Content-Type"] = S3fsCurl::LookupMimeType(string(path)); - return true; -} - -bool FdEntity::SetAllStatus(bool is_loaded) -{ - S3FS_PRN_INFO3("[path=%s][fd=%d][%s]", path.c_str(), fd, is_loaded ? "loaded" : "unloaded"); - - if(-1 == fd){ - return false; - } - // [NOTE] - // this method is only internal use, and calling after locking. - // so do not lock now. - // - //AutoLock auto_lock(&fdent_lock); - - // get file size - struct stat st; - memset(&st, 0, sizeof(struct stat)); - if(-1 == fstat(fd, &st)){ - S3FS_PRN_ERR("fstat is failed. errno(%d)", errno); - return false; - } - // Reinit - pagelist.Init(st.st_size, is_loaded, false); - - return true; -} - -int FdEntity::Load(off_t start, off_t size, bool lock_already_held, bool is_modified_flag) -{ - AutoLock auto_lock(&fdent_lock, lock_already_held ? AutoLock::ALREADY_LOCKED : AutoLock::NONE); - - S3FS_PRN_DBG("[path=%s][fd=%d][offset=%lld][size=%lld]", path.c_str(), fd, static_cast(start), static_cast(size)); - - if(-1 == fd){ - return -EBADF; - } - AutoLock auto_data_lock(&fdent_data_lock, lock_already_held ? AutoLock::ALREADY_LOCKED : AutoLock::NONE); - - int result = 0; - - // check loaded area & load - fdpage_list_t unloaded_list; - if(0 < pagelist.GetUnloadedPages(unloaded_list, start, size)){ - for(fdpage_list_t::iterator iter = unloaded_list.begin(); iter != unloaded_list.end(); ++iter){ - if(0 != size && start + size <= iter->offset){ - // reached end - break; - } - // check loading size - off_t need_load_size = 0; - if(iter->offset < size_orgmeta){ - // original file size(on S3) is smaller than request. - need_load_size = (iter->next() <= size_orgmeta ? iter->bytes : (size_orgmeta - iter->offset)); - } - - // download - if(S3fsCurl::GetMultipartSize() <= need_load_size && !nomultipart){ - // parallel request - result = S3fsCurl::ParallelGetObjectRequest(path.c_str(), fd, iter->offset, need_load_size); - }else{ - // single request - if(0 < need_load_size){ - S3fsCurl s3fscurl; - result = s3fscurl.GetObjectRequest(path.c_str(), fd, iter->offset, need_load_size); - }else{ - result = 0; - } - } - if(0 != result){ - break; - } - // Set loaded flag - pagelist.SetPageLoadedStatus(iter->offset, iter->bytes, (is_modified_flag ? PageList::PAGE_LOAD_MODIFIED : PageList::PAGE_LOADED)); - } - PageList::FreeList(unloaded_list); - } - return result; -} - -// [NOTE] -// At no disk space for caching object. -// This method is downloading by dividing an object of the specified range -// and uploading by multipart after finishing downloading it. -// -// [NOTICE] -// Need to lock before calling this method. -// -int FdEntity::NoCacheLoadAndPost(off_t start, off_t size) -{ - int result = 0; - - S3FS_PRN_INFO3("[path=%s][fd=%d][offset=%lld][size=%lld]", path.c_str(), fd, static_cast(start), static_cast(size)); - - if(-1 == fd){ - return -EBADF; - } - - // [NOTE] - // This method calling means that the cache file is never used no more. - // - if(!cachepath.empty()){ - // remove cache files(and cache stat file) - FdManager::DeleteCacheFile(path.c_str()); - // cache file path does not use no more. - cachepath.erase(); - mirrorpath.erase(); - } - - // Change entity key in manager mapping - FdManager::get()->ChangeEntityToTempPath(this, path.c_str()); - - // open temporary file - FILE* ptmpfp; - int tmpfd; - if(NULL == (ptmpfp = tmpfile()) || -1 ==(tmpfd = fileno(ptmpfp))){ - S3FS_PRN_ERR("failed to open tmp file. err(%d)", errno); - if(ptmpfp){ - fclose(ptmpfp); - } - return (0 == errno ? -EIO : -errno); - } - - // loop uploading by multipart - for(fdpage_list_t::iterator iter = pagelist.pages.begin(); iter != pagelist.pages.end(); ++iter){ - if(iter->end() < start){ - continue; - } - if(0 != size && start + size <= iter->offset){ - break; - } - // download each multipart size(default 10MB) in unit - for(off_t oneread = 0, totalread = (iter->offset < start ? start : 0); totalread < static_cast(iter->bytes); totalread += oneread){ - int upload_fd = fd; - off_t offset = iter->offset + totalread; - oneread = min(static_cast(iter->bytes) - totalread, S3fsCurl::GetMultipartSize()); - - // check rest size is over minimum part size - // - // [NOTE] - // If the final part size is smaller than 5MB, it is not allowed by S3 API. - // For this case, if the previous part of the final part is not over 5GB, - // we incorporate the final part to the previous part. If the previous part - // is over 5GB, we want to even out the last part and the previous part. - // - if((iter->bytes - totalread - oneread) < MIN_MULTIPART_SIZE){ - if(FIVE_GB < iter->bytes - totalread){ - oneread = (iter->bytes - totalread) / 2; - }else{ - oneread = iter->bytes - totalread; - } - } - - if(!iter->loaded){ - // - // loading or initializing - // - upload_fd = tmpfd; - - // load offset & size - size_t need_load_size = 0; - if(size_orgmeta <= offset){ - // all area is over of original size - need_load_size = 0; - }else{ - if(size_orgmeta < (offset + oneread)){ - // original file size(on S3) is smaller than request. - need_load_size = size_orgmeta - offset; - }else{ - need_load_size = oneread; - } - } - size_t over_size = oneread - need_load_size; - - // [NOTE] - // truncate file to zero and set length to part offset + size - // after this, file length is (offset + size), but file does not use any disk space. - // - if(-1 == ftruncate(tmpfd, 0) || -1 == ftruncate(tmpfd, (offset + oneread))){ - S3FS_PRN_ERR("failed to truncate temporary file(%d).", tmpfd); - result = -EIO; - break; - } - - // single area get request - if(0 < need_load_size){ - S3fsCurl s3fscurl; - if(0 != (result = s3fscurl.GetObjectRequest(path.c_str(), tmpfd, offset, oneread))){ - S3FS_PRN_ERR("failed to get object(start=%lld, size=%lld) for file(%d).", static_cast(offset), static_cast(oneread), tmpfd); - break; - } - } - // initialize fd without loading - if(0 < over_size){ - if(0 != (result = FdEntity::FillFile(tmpfd, 0, over_size, offset + need_load_size))){ - S3FS_PRN_ERR("failed to fill rest bytes for fd(%d). errno(%d)", tmpfd, result); - break; - } - } - - }else{ - // already loaded area - } - - // single area upload by multipart post - if(0 != (result = NoCacheMultipartPost(upload_fd, offset, oneread))){ - S3FS_PRN_ERR("failed to multipart post(start=%lld, size=%lld) for file(%d).", static_cast(offset), static_cast(oneread), upload_fd); - break; - } - } - if(0 != result){ - break; - } - - // set loaded flag - if(!iter->loaded){ - if(iter->offset < start){ - fdpage page(iter->offset, start - iter->offset, iter->loaded, false); - iter->bytes -= (start - iter->offset); - iter->offset = start; - pagelist.pages.insert(iter, page); - } - if(0 != size && start + size < iter->next()){ - fdpage page(iter->offset, start + size - iter->offset, true, false); - iter->bytes -= (start + size - iter->offset); - iter->offset = start + size; - pagelist.pages.insert(iter, page); - }else{ - iter->loaded = true; - iter->modified = false; - } - } - } - if(0 == result){ - // compress pagelist - pagelist.Compress(); - - // fd data do empty - if(-1 == ftruncate(fd, 0)){ - S3FS_PRN_ERR("failed to truncate file(%d), but continue...", fd); - } - } - - // close temporary - fclose(ptmpfp); - - return result; -} - -// [NOTE] -// At no disk space for caching object. -// This method is starting multipart uploading. -// -int FdEntity::NoCachePreMultipartPost() -{ - // initialize multipart upload values - upload_id.erase(); - etaglist.clear(); - pending_headers.clear(); - - S3fsCurl s3fscurl(true); - int result; - if(0 != (result = s3fscurl.PreMultipartPostRequest(path.c_str(), orgmeta, upload_id, false))){ - return result; - } - s3fscurl.DestroyCurlHandle(); - return 0; -} - -// [NOTE] -// At no disk space for caching object. -// This method is uploading one part of multipart. -// -int FdEntity::NoCacheMultipartPost(int tgfd, off_t start, off_t size) -{ - if(-1 == tgfd || upload_id.empty()){ - S3FS_PRN_ERR("Need to initialize for multipart post."); - return -EIO; - } - S3fsCurl s3fscurl(true); - return s3fscurl.MultipartUploadRequest(upload_id, path.c_str(), tgfd, start, size, etaglist); -} - -// [NOTE] -// At no disk space for caching object. -// This method is finishing multipart uploading. -// -int FdEntity::NoCacheCompleteMultipartPost() -{ - if(upload_id.empty() || etaglist.empty()){ - S3FS_PRN_ERR("There is no upload id or etag list."); - return -EIO; - } - - S3fsCurl s3fscurl(true); - int result; - if(0 != (result = s3fscurl.CompleteMultipartPostRequest(path.c_str(), upload_id, etaglist))){ - return result; - } - s3fscurl.DestroyCurlHandle(); - - // reset values - upload_id.erase(); - etaglist.clear(); - mp_start = 0; - mp_size = 0; - - return 0; -} - -int FdEntity::RowFlush(const char* tpath, bool force_sync) -{ - int result = 0; - - std::string tmppath; - headers_t tmporgmeta; - { - AutoLock auto_lock(&fdent_lock); - tmppath = path; - tmporgmeta = orgmeta; - } - - S3FS_PRN_INFO3("[tpath=%s][path=%s][fd=%d]", SAFESTRPTR(tpath), tmppath.c_str(), fd); - - if(-1 == fd){ - return -EBADF; - } - AutoLock auto_lock(&fdent_data_lock); - - if(!force_sync && !pagelist.IsModified()){ - // nothing to update. - return 0; - } - - // If there is no loading all of the area, loading all area. - off_t restsize = pagelist.GetTotalUnloadedPageSize(); - if(0 < restsize){ - if(0 == upload_id.length()){ - // check disk space - if(ReserveDiskSpace(restsize)){ - // enough disk space - // Load all uninitialized area(no mix multipart uploading) - if(!FdEntity::mixmultipart){ - result = Load(/*start=*/ 0, /*size=*/ 0, /*lock_already_held=*/ true); - } - FdManager::FreeReservedDiskSpace(restsize); - if(0 != result){ - S3FS_PRN_ERR("failed to upload all area(errno=%d)", result); - return static_cast(result); - } - }else{ - // no enough disk space - // upload all by multipart uploading - if(0 != (result = NoCacheLoadAndPost())){ - S3FS_PRN_ERR("failed to upload all area by multipart uploading(errno=%d)", result); - return static_cast(result); - } - } - }else{ - // already start multipart uploading - } - } - - if(0 == upload_id.length()){ - // normal uploading - /* - * Make decision to do multi upload (or not) based upon file size - * - * According to the AWS spec: - * - 1 to 10,000 parts are allowed - * - minimum size of parts is 5MB (expect for the last part) - * - * For our application, we will define minimum part size to be 10MB (10 * 2^20 Bytes) - * minimum file size will be 64 GB - 2 ** 36 - * - * Initially uploads will be done serially - * - * If file is > 20MB, then multipart will kick in - */ - if(pagelist.Size() > MAX_MULTIPART_CNT * S3fsCurl::GetMultipartSize()){ - // close f ? - S3FS_PRN_ERR("Part count exceeds %d. Increase multipart size and try again.", MAX_MULTIPART_CNT); - return -ENOTSUP; - } - - // seek to head of file. - if(0 != lseek(fd, 0, SEEK_SET)){ - S3FS_PRN_ERR("lseek error(%d)", errno); - return -errno; - } - // backup upload file size - struct stat st; - memset(&st, 0, sizeof(struct stat)); - if(-1 == fstat(fd, &st)){ - S3FS_PRN_ERR("fstat is failed by errno(%d), but continue...", errno); - } - - if(pagelist.Size() >= S3fsCurl::GetMultipartSize() && !nomultipart){ - if(FdEntity::mixmultipart){ - // multipart uploading can use copy api - - // This is to ensure that each part is 5MB or more. - // If the part is less than 5MB, download it. - fdpage_list_t dlpages; - fdpage_list_t mixuppages; - if(!pagelist.GetPageListsForMultipartUpload(dlpages, mixuppages, S3fsCurl::GetMultipartSize())){ - S3FS_PRN_ERR("something error occurred during getting download pagelist."); - return -1; - } - - // [TODO] should use parallel downloading - // - for(fdpage_list_t::const_iterator iter = dlpages.begin(); iter != dlpages.end(); ++iter){ - if(0 != (result = Load(iter->offset, iter->bytes, /*lock_already_held=*/ true, /*is_modified_flag=*/ true))){ // set loaded and modified flag - S3FS_PRN_ERR("failed to get parts(start=%lld, size=%lld) before uploading.", static_cast(iter->offset), static_cast(iter->bytes)); - return result; - } - } - - // multipart uploading with copy api - result = S3fsCurl::ParallelMixMultipartUploadRequest(tpath ? tpath : tmppath.c_str(), tmporgmeta, fd, mixuppages); - - }else{ - // multipart uploading not using copy api - result = S3fsCurl::ParallelMultipartUploadRequest(tpath ? tpath : tmppath.c_str(), tmporgmeta, fd); - } - }else{ - // If there are unloaded pages, they are loaded at here. - if(0 != (result = Load(/*start=*/ 0, /*size=*/ 0, /*lock_already_held=*/ true))){ - S3FS_PRN_ERR("failed to load parts before uploading object(%d)", result); - return result; - } - - S3fsCurl s3fscurl(true); - result = s3fscurl.PutRequest(tpath ? tpath : tmppath.c_str(), tmporgmeta, fd); - } - - // seek to head of file. - if(0 == result && 0 != lseek(fd, 0, SEEK_SET)){ - S3FS_PRN_ERR("lseek error(%d)", errno); - return -errno; - } - - // reset uploaded file size - size_orgmeta = st.st_size; - }else{ - // upload rest data - if(0 < mp_size){ - if(0 != (result = NoCacheMultipartPost(fd, mp_start, mp_size))){ - S3FS_PRN_ERR("failed to multipart post(start=%lld, size=%lld) for file(%d).", static_cast(mp_start), static_cast(mp_size), fd); - return result; - } - mp_start = 0; - mp_size = 0; - } - // complete multipart uploading. - if(0 != (result = NoCacheCompleteMultipartPost())){ - S3FS_PRN_ERR("failed to complete(finish) multipart post for file(%d).", fd); - return result; - } - // truncate file to zero - if(-1 == ftruncate(fd, 0)){ - // So the file has already been removed, skip error. - S3FS_PRN_ERR("failed to truncate file(%d) to zero, but continue...", fd); - } - - // put pading headers - if(0 != (result = UploadPendingMeta())){ - return result; - } - } - - if(0 == result){ - pagelist.ClearAllModified(); - } - return result; -} - -// [NOTICE] -// Need to lock before calling this method. -bool FdEntity::ReserveDiskSpace(off_t size) -{ - if(FdManager::ReserveDiskSpace(size)){ - return true; - } - - if(!pagelist.IsModified()){ - // try to clear all cache for this fd. - pagelist.Init(pagelist.Size(), false, false); - if(-1 == ftruncate(fd, 0) || -1 == ftruncate(fd, pagelist.Size())){ - S3FS_PRN_ERR("failed to truncate temporary file(%d).", fd); - return false; - } - - if(FdManager::ReserveDiskSpace(size)){ - return true; - } - } - - FdManager::get()->CleanupCacheDir(); - - return FdManager::ReserveDiskSpace(size); -} - -ssize_t FdEntity::Read(char* bytes, off_t start, size_t size, bool force_load) -{ - S3FS_PRN_DBG("[path=%s][fd=%d][offset=%lld][size=%zu]", path.c_str(), fd, static_cast(start), size); - - if(-1 == fd){ - return -EBADF; - } - AutoLock auto_lock(&fdent_data_lock); - - if(force_load){ - pagelist.SetPageLoadedStatus(start, size, PageList::PAGE_NOT_LOAD_MODIFIED); - } - - ssize_t rsize; - - // check disk space - if(0 < pagelist.GetTotalUnloadedPageSize(start, size)){ - // load size(for prefetch) - size_t load_size = size; - if(start + static_cast(size) < pagelist.Size()){ - ssize_t prefetch_max_size = max(static_cast(size), S3fsCurl::GetMultipartSize() * S3fsCurl::GetMaxParallelCount()); - - if(start + prefetch_max_size < pagelist.Size()){ - load_size = prefetch_max_size; - }else{ - load_size = pagelist.Size() - start; - } - } - - if(!ReserveDiskSpace(load_size)){ - S3FS_PRN_WARN("could not reserve disk space for pre-fetch download"); - load_size = size; - if(!ReserveDiskSpace(load_size)){ - S3FS_PRN_ERR("could not reserve disk space for pre-fetch download"); - return -ENOSPC; - } - } - - // Loading - int result = 0; - if(0 < size){ - result = Load(start, load_size, /*lock_already_held=*/ true); - } - - FdManager::FreeReservedDiskSpace(load_size); - - if(0 != result){ - S3FS_PRN_ERR("could not download. start(%lld), size(%zu), errno(%d)", static_cast(start), size, result); - return -EIO; - } - } - // Reading - if(-1 == (rsize = pread(fd, bytes, size, start))){ - S3FS_PRN_ERR("pread failed. errno(%d)", errno); - return -errno; - } - return rsize; -} - -ssize_t FdEntity::Write(const char* bytes, off_t start, size_t size) -{ - S3FS_PRN_DBG("[path=%s][fd=%d][offset=%lld][size=%zu]", path.c_str(), fd, static_cast(start), size); - - if(-1 == fd){ - return -EBADF; - } - // check if not enough disk space left BEFORE locking fd - if(FdManager::IsCacheDir() && !FdManager::IsSafeDiskSpace(NULL, size)){ - FdManager::get()->CleanupCacheDir(); - } - AutoLock auto_lock(&fdent_data_lock); - - // check file size - if(pagelist.Size() < start){ - // grow file size - if(-1 == ftruncate(fd, start)){ - S3FS_PRN_ERR("failed to truncate temporary file(%d).", fd); - return -EIO; - } - // add new area - pagelist.SetPageLoadedStatus(pagelist.Size(), start - pagelist.Size(), PageList::PAGE_MODIFIED); - } - - int result = 0; - ssize_t wsize; - - if(0 == upload_id.length()){ - // check disk space - off_t restsize = pagelist.GetTotalUnloadedPageSize(0, start) + size; - if(ReserveDiskSpace(restsize)){ - // enough disk space - - // Load uninitialized area which starts from 0 to (start + size) before writing. - if(!FdEntity::mixmultipart){ - if(0 < start){ - result = Load(0, start, /*lock_already_held=*/ true); - } - } - - FdManager::FreeReservedDiskSpace(restsize); - if(0 != result){ - S3FS_PRN_ERR("failed to load uninitialized area before writing(errno=%d)", result); - return static_cast(result); - } - }else{ - // no enough disk space - if(0 != (result = NoCachePreMultipartPost())){ - S3FS_PRN_ERR("failed to switch multipart uploading with no cache(errno=%d)", result); - return static_cast(result); - } - // start multipart uploading - if(0 != (result = NoCacheLoadAndPost(0, start))){ - S3FS_PRN_ERR("failed to load uninitialized area and multipart uploading it(errno=%d)", result); - return static_cast(result); - } - mp_start = start; - mp_size = 0; - } - }else{ - // already start multipart uploading - } - - // Writing - if(-1 == (wsize = pwrite(fd, bytes, size, start))){ - S3FS_PRN_ERR("pwrite failed. errno(%d)", errno); - return -errno; - } - if(0 < wsize){ - pagelist.SetPageLoadedStatus(start, wsize, PageList::PAGE_LOAD_MODIFIED); - } - - // Load uninitialized area which starts from (start + size) to EOF after writing. - if(!FdEntity::mixmultipart){ - if(pagelist.Size() > start + static_cast(size)){ - result = Load(start + size, pagelist.Size(), /*lock_already_held=*/ true); - if(0 != result){ - S3FS_PRN_ERR("failed to load uninitialized area after writing(errno=%d)", result); - return static_cast(result); - } - } - } - - // check multipart uploading - if(0 < upload_id.length()){ - mp_size += wsize; - if(S3fsCurl::GetMultipartSize() <= mp_size){ - // over one multipart size - if(0 != (result = NoCacheMultipartPost(fd, mp_start, mp_size))){ - S3FS_PRN_ERR("failed to multipart post(start=%lld, size=%lld) for file(%d).", static_cast(mp_start), static_cast(mp_size), fd); - return result; - } - // [NOTE] - // truncate file to zero and set length to part offset + size - // after this, file length is (offset + size), but file does not use any disk space. - // - if(-1 == ftruncate(fd, 0) || -1 == ftruncate(fd, (mp_start + mp_size))){ - S3FS_PRN_ERR("failed to truncate file(%d).", fd); - return -EIO; - } - mp_start += mp_size; - mp_size = 0; - } - } - return wsize; -} - -// [NOTE] -// Returns true if merged to orgmeta. -// If true is returned, the caller can update the header. -// If it is false, do not update the header because multipart upload is in progress. -// In this case, the header is pending internally and is updated after the upload -// is complete(flush file). -// -bool FdEntity::MergeOrgMeta(headers_t& updatemeta) -{ - AutoLock auto_lock(&fdent_lock); - - bool is_pending; - if(upload_id.empty()){ - // merge update meta - headers_t mergedmeta = orgmeta; - - merge_headers(orgmeta, updatemeta, false); // overwrite existing keys only - merge_headers(mergedmeta, updatemeta, true); // overwrite all keys - updatemeta = mergedmeta; // swap - - is_pending = false; - }else{ - // could not update meta because uploading now, then put pending. - pending_headers.push_back(updatemeta); - is_pending = true; - } - return is_pending; -} - -// global function in s3fs.cpp -int put_headers(const char* path, headers_t& meta, bool is_copy); - -int FdEntity::UploadPendingMeta() -{ - AutoLock auto_lock(&fdent_lock); - - int result = 0; - for(headers_list_t::const_iterator iter = pending_headers.begin(); iter != pending_headers.end(); ++iter){ - // [NOTE] - // orgmeta will be updated sequentially. - headers_t putmeta = orgmeta; - merge_headers(putmeta, *iter, true); // overwrite all keys - merge_headers(orgmeta, *iter, false); // overwrite existing keys only - - // [NOTE] - // this is special cases, we remove the key which has empty values. - for(headers_t::iterator hiter = putmeta.begin(); hiter != putmeta.end(); ){ - if(hiter->second.empty()){ - if(orgmeta.end() != orgmeta.find(hiter->first)){ - orgmeta.erase(hiter->first); - } - putmeta.erase(hiter++); - }else{ - ++hiter; - } - } - - // update ctime/mtime - time_t updatetime = get_mtime((*iter), false); // not overcheck - if(0 != updatetime){ - SetMtime(updatetime, true); - } - updatetime = get_ctime((*iter), false); // not overcheck - if(0 != updatetime){ - SetCtime(updatetime, true); - } - - // put headers - int one_result = put_headers(path.c_str(), putmeta, true); - if(0 != one_result){ - S3FS_PRN_ERR("failed to put header after flushing file(%s) by(%d).", path.c_str(), one_result); - result = one_result; // keep lastest result code - } - } - pending_headers.clear(); - return result; -} - -//------------------------------------------------ -// FdManager symbol -//------------------------------------------------ // [NOTE] // NOCACHE_PATH_PREFIX symbol needs for not using cache mode. // Now s3fs I/F functions in s3fs.cpp has left the processing @@ -2740,244 +105,244 @@ bool FdManager::have_lseek_hole(false); //------------------------------------------------ bool FdManager::SetCacheDir(const char* dir) { - if(!dir || '\0' == dir[0]){ - cache_dir = ""; - }else{ - cache_dir = dir; - } - return true; + if(!dir || '\0' == dir[0]){ + cache_dir = ""; + }else{ + cache_dir = dir; + } + return true; } bool FdManager::SetCacheCheckOutput(const char* path) { - if(!path || '\0' == path[0]){ - check_cache_output.erase(); - }else{ - check_cache_output = path; - } - return true; + if(!path || '\0' == path[0]){ + check_cache_output.erase(); + }else{ + check_cache_output = path; + } + return true; } bool FdManager::DeleteCacheDirectory() { - if(FdManager::cache_dir.empty()){ + if(FdManager::cache_dir.empty()){ + return true; + } + + string cache_path; + if(!FdManager::MakeCachePath(NULL, cache_path, false)){ + return false; + } + if(!delete_files_in_dir(cache_path.c_str(), true)){ + return false; + } + + string mirror_path = FdManager::cache_dir + "/." + bucket + ".mirror"; + if(!delete_files_in_dir(mirror_path.c_str(), true)){ + return false; + } + return true; - } - - string cache_path; - if(!FdManager::MakeCachePath(NULL, cache_path, false)){ - return false; - } - if(!delete_files_in_dir(cache_path.c_str(), true)){ - return false; - } - - string mirror_path = FdManager::cache_dir + "/." + bucket + ".mirror"; - if(!delete_files_in_dir(mirror_path.c_str(), true)){ - return false; - } - - return true; } int FdManager::DeleteCacheFile(const char* path) { - S3FS_PRN_INFO3("[path=%s]", SAFESTRPTR(path)); + S3FS_PRN_INFO3("[path=%s]", SAFESTRPTR(path)); - if(!path){ - return -EIO; - } - if(FdManager::cache_dir.empty()){ - return 0; - } - string cache_path; - if(!FdManager::MakeCachePath(path, cache_path, false)){ - return 0; - } - int result = 0; - if(0 != unlink(cache_path.c_str())){ - if(ENOENT == errno){ - S3FS_PRN_DBG("failed to delete file(%s): errno=%d", path, errno); - }else{ - S3FS_PRN_ERR("failed to delete file(%s): errno=%d", path, errno); + if(!path){ + return -EIO; } - result = -errno; - } - if(!CacheFileStat::DeleteCacheFileStat(path)){ - if(ENOENT == errno){ - S3FS_PRN_DBG("failed to delete stat file(%s): errno=%d", path, errno); - }else{ - S3FS_PRN_ERR("failed to delete stat file(%s): errno=%d", path, errno); + if(FdManager::cache_dir.empty()){ + return 0; } - if(0 != errno){ - result = -errno; - }else{ - result = -EIO; + string cache_path; + if(!FdManager::MakeCachePath(path, cache_path, false)){ + return 0; } - } - return result; + int result = 0; + if(0 != unlink(cache_path.c_str())){ + if(ENOENT == errno){ + S3FS_PRN_DBG("failed to delete file(%s): errno=%d", path, errno); + }else{ + S3FS_PRN_ERR("failed to delete file(%s): errno=%d", path, errno); + } + result = -errno; + } + if(!CacheFileStat::DeleteCacheFileStat(path)){ + if(ENOENT == errno){ + S3FS_PRN_DBG("failed to delete stat file(%s): errno=%d", path, errno); + }else{ + S3FS_PRN_ERR("failed to delete stat file(%s): errno=%d", path, errno); + } + if(0 != errno){ + result = -errno; + }else{ + result = -EIO; + } + } + return result; } bool FdManager::MakeCachePath(const char* path, string& cache_path, bool is_create_dir, bool is_mirror_path) { - if(FdManager::cache_dir.empty()){ - cache_path = ""; - return true; - } - - string resolved_path(FdManager::cache_dir); - if(!is_mirror_path){ - resolved_path += "/"; - resolved_path += bucket; - }else{ - resolved_path += "/."; - resolved_path += bucket; - resolved_path += ".mirror"; - } - - if(is_create_dir){ - int result; - if(0 != (result = mkdirp(resolved_path + mydirname(path), 0777))){ - S3FS_PRN_ERR("failed to create dir(%s) by errno(%d).", path, result); - return false; + if(FdManager::cache_dir.empty()){ + cache_path = ""; + return true; } - } - if(!path || '\0' == path[0]){ - cache_path = resolved_path; - }else{ - cache_path = resolved_path + SAFESTRPTR(path); - } - return true; + + string resolved_path(FdManager::cache_dir); + if(!is_mirror_path){ + resolved_path += "/"; + resolved_path += bucket; + }else{ + resolved_path += "/."; + resolved_path += bucket; + resolved_path += ".mirror"; + } + + if(is_create_dir){ + int result; + if(0 != (result = mkdirp(resolved_path + mydirname(path), 0777))){ + S3FS_PRN_ERR("failed to create dir(%s) by errno(%d).", path, result); + return false; + } + } + if(!path || '\0' == path[0]){ + cache_path = resolved_path; + }else{ + cache_path = resolved_path + SAFESTRPTR(path); + } + return true; } bool FdManager::CheckCacheTopDir() { - if(FdManager::cache_dir.empty()){ - return true; - } - string toppath(FdManager::cache_dir + "/" + bucket); + if(FdManager::cache_dir.empty()){ + return true; + } + string toppath(FdManager::cache_dir + "/" + bucket); - return check_exist_dir_permission(toppath.c_str()); + return check_exist_dir_permission(toppath.c_str()); } bool FdManager::MakeRandomTempPath(const char* path, string& tmppath) { - char szBuff[64]; + char szBuff[64]; - sprintf(szBuff, NOCACHE_PATH_PREFIX_FORM, random()); // worry for performance, but maybe don't worry. - tmppath = szBuff; - tmppath += path ? path : ""; - return true; + sprintf(szBuff, NOCACHE_PATH_PREFIX_FORM, random()); // worry for performance, but maybe don't worry. + tmppath = szBuff; + tmppath += path ? path : ""; + return true; } bool FdManager::SetCheckCacheDirExist(bool is_check) { - bool old = FdManager::check_cache_dir_exist; - FdManager::check_cache_dir_exist = is_check; - return old; + bool old = FdManager::check_cache_dir_exist; + FdManager::check_cache_dir_exist = is_check; + return old; } bool FdManager::CheckCacheDirExist() { - if(!FdManager::check_cache_dir_exist){ + if(!FdManager::check_cache_dir_exist){ + return true; + } + if(FdManager::cache_dir.empty()){ + return true; + } + // check the directory + struct stat st; + if(0 != stat(cache_dir.c_str(), &st)){ + S3FS_PRN_ERR("could not access to cache directory(%s) by errno(%d).", cache_dir.c_str(), errno); + return false; + } + if(!S_ISDIR(st.st_mode)){ + S3FS_PRN_ERR("the cache directory(%s) is not directory.", cache_dir.c_str()); + return false; + } return true; - } - if(FdManager::cache_dir.empty()){ - return true; - } - // check the directory - struct stat st; - if(0 != stat(cache_dir.c_str(), &st)){ - S3FS_PRN_ERR("could not access to cache directory(%s) by errno(%d).", cache_dir.c_str(), errno); - return false; - } - if(!S_ISDIR(st.st_mode)){ - S3FS_PRN_ERR("the cache directory(%s) is not directory.", cache_dir.c_str()); - return false; - } - return true; } off_t FdManager::GetEnsureFreeDiskSpace() { - AutoLock auto_lock(&FdManager::reserved_diskspace_lock); - return FdManager::free_disk_space; + AutoLock auto_lock(&FdManager::reserved_diskspace_lock); + return FdManager::free_disk_space; } off_t FdManager::SetEnsureFreeDiskSpace(off_t size) { - AutoLock auto_lock(&FdManager::reserved_diskspace_lock); - off_t old = FdManager::free_disk_space; - FdManager::free_disk_space = size; - return old; + AutoLock auto_lock(&FdManager::reserved_diskspace_lock); + off_t old = FdManager::free_disk_space; + FdManager::free_disk_space = size; + return old; } off_t FdManager::GetFreeDiskSpace(const char* path) { - struct statvfs vfsbuf; - string ctoppath; - if(!FdManager::cache_dir.empty()){ - ctoppath = FdManager::cache_dir + "/"; - ctoppath = get_exist_directory_path(ctoppath); // existed directory - if(ctoppath != "/"){ - ctoppath += "/"; + struct statvfs vfsbuf; + string ctoppath; + if(!FdManager::cache_dir.empty()){ + ctoppath = FdManager::cache_dir + "/"; + ctoppath = get_exist_directory_path(ctoppath); // existed directory + if(ctoppath != "/"){ + ctoppath += "/"; + } + }else{ + ctoppath = TMPFILE_DIR_0PATH "/"; } - }else{ - ctoppath = TMPFILE_DIR_0PATH "/"; - } - if(path && '\0' != *path){ - ctoppath += path; - }else{ - ctoppath += "."; - } - if(-1 == statvfs(ctoppath.c_str(), &vfsbuf)){ - S3FS_PRN_ERR("could not get vfs stat by errno(%d)", errno); - return 0; - } - return (vfsbuf.f_bavail * vfsbuf.f_frsize); + if(path && '\0' != *path){ + ctoppath += path; + }else{ + ctoppath += "."; + } + if(-1 == statvfs(ctoppath.c_str(), &vfsbuf)){ + S3FS_PRN_ERR("could not get vfs stat by errno(%d)", errno); + return 0; + } + return (vfsbuf.f_bavail * vfsbuf.f_frsize); } bool FdManager::IsSafeDiskSpace(const char* path, off_t size) { - off_t fsize = FdManager::GetFreeDiskSpace(path); - return size + FdManager::GetEnsureFreeDiskSpace() <= fsize; + off_t fsize = FdManager::GetFreeDiskSpace(path); + return size + FdManager::GetEnsureFreeDiskSpace() <= fsize; } bool FdManager::HaveLseekHole(void) { - if(FdManager::checked_lseek){ - return FdManager::have_lseek_hole; - } + if(FdManager::checked_lseek){ + return FdManager::have_lseek_hole; + } + + // create tempolary file + int fd; + if(-1 == (fd = open(TMPFILE_FOR_CHECK_HOLE, O_CREAT|O_RDWR, 0600))){ + S3FS_PRN_ERR("failed to open tempolary file(%s) - errno(%d)", TMPFILE_FOR_CHECK_HOLE, errno); + FdManager::checked_lseek = true; + FdManager::have_lseek_hole = false; + return FdManager::have_lseek_hole; + } + + // check SEEK_DATA/SEEK_HOLE options + bool result = true; + if(-1 == lseek(fd, 0, SEEK_DATA)){ + if(EINVAL == errno){ + S3FS_PRN_ERR("lseek does not support SEEK_DATA"); + result = false; + } + } + if(result && -1 == lseek(fd, 0, SEEK_HOLE)){ + if(EINVAL == errno){ + S3FS_PRN_ERR("lseek does not support SEEK_HOLE"); + result = false; + } + } + close(fd); + unlink(TMPFILE_FOR_CHECK_HOLE); - // create tempolary file - int fd; - if(-1 == (fd = open(TMPFILE_FOR_CHECK_HOLE, O_CREAT|O_RDWR, 0600))){ - S3FS_PRN_ERR("failed to open tempolary file(%s) - errno(%d)", TMPFILE_FOR_CHECK_HOLE, errno); FdManager::checked_lseek = true; - FdManager::have_lseek_hole = false; + FdManager::have_lseek_hole = result; return FdManager::have_lseek_hole; - } - - // check SEEK_DATA/SEEK_HOLE options - bool result = true; - if(-1 == lseek(fd, 0, SEEK_DATA)){ - if(EINVAL == errno){ - S3FS_PRN_ERR("lseek does not support SEEK_DATA"); - result = false; - } - } - if(result && -1 == lseek(fd, 0, SEEK_HOLE)){ - if(EINVAL == errno){ - S3FS_PRN_ERR("lseek does not support SEEK_HOLE"); - result = false; - } - } - close(fd); - unlink(TMPFILE_FOR_CHECK_HOLE); - - FdManager::checked_lseek = true; - FdManager::have_lseek_hole = result; - return FdManager::have_lseek_hole; } //------------------------------------------------ @@ -2985,368 +350,368 @@ bool FdManager::HaveLseekHole(void) //------------------------------------------------ FdManager::FdManager() { - if(this == FdManager::get()){ - pthread_mutexattr_t attr; - pthread_mutexattr_init(&attr); + if(this == FdManager::get()){ + pthread_mutexattr_t attr; + pthread_mutexattr_init(&attr); #if S3FS_PTHREAD_ERRORCHECK - pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK); + pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK); #endif - int res; - if(0 != (res = pthread_mutex_init(&FdManager::fd_manager_lock, &attr))){ - S3FS_PRN_CRIT("failed to init fd_manager_lock: %d", res); - abort(); + int res; + if(0 != (res = pthread_mutex_init(&FdManager::fd_manager_lock, &attr))){ + S3FS_PRN_CRIT("failed to init fd_manager_lock: %d", res); + abort(); + } + if(0 != (res = pthread_mutex_init(&FdManager::cache_cleanup_lock, &attr))){ + S3FS_PRN_CRIT("failed to init cache_cleanup_lock: %d", res); + abort(); + } + if(0 != (res = pthread_mutex_init(&FdManager::reserved_diskspace_lock, &attr))){ + S3FS_PRN_CRIT("failed to init reserved_diskspace_lock: %d", res); + abort(); + } + FdManager::is_lock_init = true; + }else{ + abort(); } - if(0 != (res = pthread_mutex_init(&FdManager::cache_cleanup_lock, &attr))){ - S3FS_PRN_CRIT("failed to init cache_cleanup_lock: %d", res); - abort(); - } - if(0 != (res = pthread_mutex_init(&FdManager::reserved_diskspace_lock, &attr))){ - S3FS_PRN_CRIT("failed to init reserved_diskspace_lock: %d", res); - abort(); - } - FdManager::is_lock_init = true; - }else{ - abort(); - } } FdManager::~FdManager() { - if(this == FdManager::get()){ - for(fdent_map_t::iterator iter = fent.begin(); fent.end() != iter; ++iter){ - FdEntity* ent = (*iter).second; - delete ent; - } - fent.clear(); + if(this == FdManager::get()){ + for(fdent_map_t::iterator iter = fent.begin(); fent.end() != iter; ++iter){ + FdEntity* ent = (*iter).second; + delete ent; + } + fent.clear(); - if(FdManager::is_lock_init){ - int res; - if(0 != (res = pthread_mutex_destroy(&FdManager::fd_manager_lock))){ - S3FS_PRN_CRIT("failed to destroy fd_manager_lock: %d", res); + if(FdManager::is_lock_init){ + int res; + if(0 != (res = pthread_mutex_destroy(&FdManager::fd_manager_lock))){ + S3FS_PRN_CRIT("failed to destroy fd_manager_lock: %d", res); + abort(); + } + if(0 != (res = pthread_mutex_destroy(&FdManager::cache_cleanup_lock))){ + S3FS_PRN_CRIT("failed to destroy cache_cleanup_lock: %d", res); + abort(); + } + if(0 != (res = pthread_mutex_destroy(&FdManager::reserved_diskspace_lock))){ + S3FS_PRN_CRIT("failed to destroy reserved_diskspace_lock: %d", res); + abort(); + } + FdManager::is_lock_init = false; + } + }else{ abort(); - } - if(0 != (res = pthread_mutex_destroy(&FdManager::cache_cleanup_lock))){ - S3FS_PRN_CRIT("failed to destroy cache_cleanup_lock: %d", res); - abort(); - } - if(0 != (res = pthread_mutex_destroy(&FdManager::reserved_diskspace_lock))){ - S3FS_PRN_CRIT("failed to destroy reserved_diskspace_lock: %d", res); - abort(); - } - FdManager::is_lock_init = false; } - }else{ - abort(); - } } FdEntity* FdManager::GetFdEntity(const char* path, int existfd) { - S3FS_PRN_INFO3("[path=%s][fd=%d]", SAFESTRPTR(path), existfd); + S3FS_PRN_INFO3("[path=%s][fd=%d]", SAFESTRPTR(path), existfd); - if(!path || '\0' == path[0]){ - return NULL; - } - AutoLock auto_lock(&FdManager::fd_manager_lock); - - fdent_map_t::iterator iter = fent.find(string(path)); - if(fent.end() != iter && (-1 == existfd || (*iter).second->GetFd() == existfd)){ - iter->second->Dup(); - return (*iter).second; - } - - if(-1 != existfd){ - for(iter = fent.begin(); iter != fent.end(); ++iter){ - if((*iter).second && (*iter).second->GetFd() == existfd){ - // found opened fd in map - if(0 == strcmp((*iter).second->GetPath(), path)){ - iter->second->Dup(); - return (*iter).second; - } - // found fd, but it is used another file(file descriptor is recycled) - // so returns NULL. - break; - } + if(!path || '\0' == path[0]){ + return NULL; } - } - return NULL; + AutoLock auto_lock(&FdManager::fd_manager_lock); + + fdent_map_t::iterator iter = fent.find(string(path)); + if(fent.end() != iter && (-1 == existfd || (*iter).second->GetFd() == existfd)){ + iter->second->Dup(); + return (*iter).second; + } + + if(-1 != existfd){ + for(iter = fent.begin(); iter != fent.end(); ++iter){ + if((*iter).second && (*iter).second->GetFd() == existfd){ + // found opened fd in map + if(0 == strcmp((*iter).second->GetPath(), path)){ + iter->second->Dup(); + return (*iter).second; + } + // found fd, but it is used another file(file descriptor is recycled) + // so returns NULL. + break; + } + } + } + return NULL; } FdEntity* FdManager::Open(const char* path, headers_t* pmeta, off_t size, time_t time, bool force_tmpfile, bool is_create, bool no_fd_lock_wait) { - S3FS_PRN_DBG("[path=%s][size=%lld][time=%lld]", SAFESTRPTR(path), static_cast(size), static_cast(time)); + S3FS_PRN_DBG("[path=%s][size=%lld][time=%lld]", SAFESTRPTR(path), static_cast(size), static_cast(time)); - if(!path || '\0' == path[0]){ - return NULL; - } - bool close = false; - FdEntity* ent; - - AutoLock auto_lock(&FdManager::fd_manager_lock); - - // search in mapping by key(path) - fdent_map_t::iterator iter = fent.find(string(path)); - - if(fent.end() == iter && !force_tmpfile && !FdManager::IsCacheDir()){ - // If the cache directory is not specified, s3fs opens a temporary file - // when the file is opened. - // Then if it could not find a entity in map for the file, s3fs should - // search a entity in all which opened the temporary file. - // - for(iter = fent.begin(); iter != fent.end(); ++iter){ - if((*iter).second && (*iter).second->IsOpen() && 0 == strcmp((*iter).second->GetPath(), path)){ - break; // found opened fd in mapping - } + if(!path || '\0' == path[0]){ + return NULL; } - } + bool close = false; + FdEntity* ent; - if(fent.end() != iter){ - // found - ent = (*iter).second; - ent->Dup(); - if(ent->IsModified()){ - // If the file is being modified, it will not be resized. - size = -1; + AutoLock auto_lock(&FdManager::fd_manager_lock); + + // search in mapping by key(path) + fdent_map_t::iterator iter = fent.find(string(path)); + + if(fent.end() == iter && !force_tmpfile && !FdManager::IsCacheDir()){ + // If the cache directory is not specified, s3fs opens a temporary file + // when the file is opened. + // Then if it could not find a entity in map for the file, s3fs should + // search a entity in all which opened the temporary file. + // + for(iter = fent.begin(); iter != fent.end(); ++iter){ + if((*iter).second && (*iter).second->IsOpen() && 0 == strcmp((*iter).second->GetPath(), path)){ + break; // found opened fd in mapping + } + } } - close = true; - }else if(is_create){ - // not found - string cache_path; - if(!force_tmpfile && !FdManager::MakeCachePath(path, cache_path, true)){ - S3FS_PRN_ERR("failed to make cache path for object(%s).", path); - return NULL; - } - // make new obj - ent = new FdEntity(path, cache_path.c_str()); + if(fent.end() != iter){ + // found + ent = (*iter).second; + ent->Dup(); + if(ent->IsModified()){ + // If the file is being modified, it will not be resized. + size = -1; + } + close = true; - if(!cache_path.empty()){ - // using cache - fent[string(path)] = ent; + }else if(is_create){ + // not found + string cache_path; + if(!force_tmpfile && !FdManager::MakeCachePath(path, cache_path, true)){ + S3FS_PRN_ERR("failed to make cache path for object(%s).", path); + return NULL; + } + // make new obj + ent = new FdEntity(path, cache_path.c_str()); + + if(!cache_path.empty()){ + // using cache + fent[string(path)] = ent; + }else{ + // not using cache, so the key of fdentity is set not really existing path. + // (but not strictly unexisting path.) + // + // [NOTE] + // The reason why this process here, please look at the definition of the + // comments of NOCACHE_PATH_PREFIX_FORM symbol. + // + string tmppath; + FdManager::MakeRandomTempPath(path, tmppath); + fent[tmppath] = ent; + } }else{ - // not using cache, so the key of fdentity is set not really existing path. - // (but not strictly unexisting path.) - // - // [NOTE] - // The reason why this process here, please look at the definition of the - // comments of NOCACHE_PATH_PREFIX_FORM symbol. - // - string tmppath; - FdManager::MakeRandomTempPath(path, tmppath); - fent[tmppath] = ent; + return NULL; } - }else{ - return NULL; - } - // open - if(0 != ent->Open(pmeta, size, time, no_fd_lock_wait)){ - if(close){ - ent->Close(); + // open + if(0 != ent->Open(pmeta, size, time, no_fd_lock_wait)){ + if(close){ + ent->Close(); + } + return NULL; } - return NULL; - } - if(close){ - ent->Close(); - } - return ent; + if(close){ + ent->Close(); + } + return ent; } FdEntity* FdManager::ExistOpen(const char* path, int existfd, bool ignore_existfd) { - S3FS_PRN_DBG("[path=%s][fd=%d][ignore_existfd=%s]", SAFESTRPTR(path), existfd, ignore_existfd ? "true" : "false"); + S3FS_PRN_DBG("[path=%s][fd=%d][ignore_existfd=%s]", SAFESTRPTR(path), existfd, ignore_existfd ? "true" : "false"); - // search by real path - FdEntity* ent = Open(path, NULL, -1, -1, false, false); + // search by real path + FdEntity* ent = Open(path, NULL, -1, -1, false, false); - if(!ent && (ignore_existfd || (-1 != existfd))){ - // search from all fdentity because of not using cache. - AutoLock auto_lock(&FdManager::fd_manager_lock); + if(!ent && (ignore_existfd || (-1 != existfd))){ + // search from all fdentity because of not using cache. + AutoLock auto_lock(&FdManager::fd_manager_lock); - for(fdent_map_t::iterator iter = fent.begin(); iter != fent.end(); ++iter){ - if((*iter).second && (*iter).second->IsOpen() && (ignore_existfd || ((*iter).second->GetFd() == existfd))){ - // found opened fd in map - if(0 == strcmp((*iter).second->GetPath(), path)){ - ent = (*iter).second; - ent->Dup(); - }else{ - // found fd, but it is used another file(file descriptor is recycled) - // so returns NULL. + for(fdent_map_t::iterator iter = fent.begin(); iter != fent.end(); ++iter){ + if((*iter).second && (*iter).second->IsOpen() && (ignore_existfd || ((*iter).second->GetFd() == existfd))){ + // found opened fd in map + if(0 == strcmp((*iter).second->GetPath(), path)){ + ent = (*iter).second; + ent->Dup(); + }else{ + // found fd, but it is used another file(file descriptor is recycled) + // so returns NULL. + } + break; + } } - break; - } } - } - return ent; + return ent; } void FdManager::Rename(const std::string &from, const std::string &to) { - AutoLock auto_lock(&FdManager::fd_manager_lock); + AutoLock auto_lock(&FdManager::fd_manager_lock); - fdent_map_t::iterator iter = fent.find(from); - if(fent.end() == iter && !FdManager::IsCacheDir()){ - // If the cache directory is not specified, s3fs opens a temporary file - // when the file is opened. - // Then if it could not find a entity in map for the file, s3fs should - // search a entity in all which opened the temporary file. - // - for(iter = fent.begin(); iter != fent.end(); ++iter){ - if((*iter).second && (*iter).second->IsOpen() && 0 == strcmp((*iter).second->GetPath(), from.c_str())){ - break; // found opened fd in mapping - } - } - } - - if(fent.end() != iter){ - // found - S3FS_PRN_DBG("[from=%s][to=%s]", from.c_str(), to.c_str()); - - FdEntity* ent = (*iter).second; - - // retrieve old fd entity from map - fent.erase(iter); - - // rename path and caches in fd entity - string fentmapkey; - if(!ent->RenamePath(to, fentmapkey)){ - S3FS_PRN_ERR("Failed to rename FdEntity object for %s to %s", from.c_str(), to.c_str()); - return; + fdent_map_t::iterator iter = fent.find(from); + if(fent.end() == iter && !FdManager::IsCacheDir()){ + // If the cache directory is not specified, s3fs opens a temporary file + // when the file is opened. + // Then if it could not find a entity in map for the file, s3fs should + // search a entity in all which opened the temporary file. + // + for(iter = fent.begin(); iter != fent.end(); ++iter){ + if((*iter).second && (*iter).second->IsOpen() && 0 == strcmp((*iter).second->GetPath(), from.c_str())){ + break; // found opened fd in mapping + } + } } - // set new fd entity to map - fent[fentmapkey] = ent; - } + if(fent.end() != iter){ + // found + S3FS_PRN_DBG("[from=%s][to=%s]", from.c_str(), to.c_str()); + + FdEntity* ent = (*iter).second; + + // retrieve old fd entity from map + fent.erase(iter); + + // rename path and caches in fd entity + string fentmapkey; + if(!ent->RenamePath(to, fentmapkey)){ + S3FS_PRN_ERR("Failed to rename FdEntity object for %s to %s", from.c_str(), to.c_str()); + return; + } + + // set new fd entity to map + fent[fentmapkey] = ent; + } } bool FdManager::Close(FdEntity* ent) { - S3FS_PRN_DBG("[ent->file=%s][ent->fd=%d]", ent ? ent->GetPath() : "", ent ? ent->GetFd() : -1); + S3FS_PRN_DBG("[ent->file=%s][ent->fd=%d]", ent ? ent->GetPath() : "", ent ? ent->GetFd() : -1); - if(!ent){ - return true; // returns success - } - - AutoLock auto_lock(&FdManager::fd_manager_lock); - - for(fdent_map_t::iterator iter = fent.begin(); iter != fent.end(); ++iter){ - if((*iter).second == ent){ - ent->Close(); - if(!ent->IsOpen()){ - // remove found entity from map. - fent.erase(iter++); - - // check another key name for entity value to be on the safe side - for(; iter != fent.end(); ){ - if((*iter).second == ent){ - fent.erase(iter++); - }else{ - ++iter; - } - } - delete ent; - } - return true; + if(!ent){ + return true; // returns success } - } - return false; + + AutoLock auto_lock(&FdManager::fd_manager_lock); + + for(fdent_map_t::iterator iter = fent.begin(); iter != fent.end(); ++iter){ + if((*iter).second == ent){ + ent->Close(); + if(!ent->IsOpen()){ + // remove found entity from map. + fent.erase(iter++); + + // check another key name for entity value to be on the safe side + for(; iter != fent.end(); ){ + if((*iter).second == ent){ + fent.erase(iter++); + }else{ + ++iter; + } + } + delete ent; + } + return true; + } + } + return false; } bool FdManager::ChangeEntityToTempPath(FdEntity* ent, const char* path) { - AutoLock auto_lock(&FdManager::fd_manager_lock); + AutoLock auto_lock(&FdManager::fd_manager_lock); - for(fdent_map_t::iterator iter = fent.begin(); iter != fent.end(); ){ - if((*iter).second == ent){ - fent.erase(iter++); + for(fdent_map_t::iterator iter = fent.begin(); iter != fent.end(); ){ + if((*iter).second == ent){ + fent.erase(iter++); - string tmppath; - FdManager::MakeRandomTempPath(path, tmppath); - fent[tmppath] = ent; - }else{ - ++iter; + string tmppath; + FdManager::MakeRandomTempPath(path, tmppath); + fent[tmppath] = ent; + }else{ + ++iter; + } } - } - return false; + return false; } void FdManager::CleanupCacheDir() { - //S3FS_PRN_DBG("cache cleanup requested"); + //S3FS_PRN_DBG("cache cleanup requested"); - if(!FdManager::IsCacheDir()){ - return; - } + if(!FdManager::IsCacheDir()){ + return; + } - AutoLock auto_lock_no_wait(&FdManager::cache_cleanup_lock, AutoLock::NO_WAIT); + AutoLock auto_lock_no_wait(&FdManager::cache_cleanup_lock, AutoLock::NO_WAIT); - if(auto_lock_no_wait.isLockAcquired()){ - //S3FS_PRN_DBG("cache cleanup started"); - CleanupCacheDirInternal(""); - //S3FS_PRN_DBG("cache cleanup ended"); - }else{ - // wait for other thread to finish cache cleanup - AutoLock auto_lock(&FdManager::cache_cleanup_lock); - } + if(auto_lock_no_wait.isLockAcquired()){ + //S3FS_PRN_DBG("cache cleanup started"); + CleanupCacheDirInternal(""); + //S3FS_PRN_DBG("cache cleanup ended"); + }else{ + // wait for other thread to finish cache cleanup + AutoLock auto_lock(&FdManager::cache_cleanup_lock); + } } void FdManager::CleanupCacheDirInternal(const std::string &path) { - DIR* dp; - struct dirent* dent; - std::string abs_path = cache_dir + "/" + bucket + path; + DIR* dp; + struct dirent* dent; + std::string abs_path = cache_dir + "/" + bucket + path; - if(NULL == (dp = opendir(abs_path.c_str()))){ - S3FS_PRN_ERR("could not open cache dir(%s) - errno(%d)", abs_path.c_str(), errno); - return; - } + if(NULL == (dp = opendir(abs_path.c_str()))){ + S3FS_PRN_ERR("could not open cache dir(%s) - errno(%d)", abs_path.c_str(), errno); + return; + } - for(dent = readdir(dp); dent; dent = readdir(dp)){ - if(0 == strcmp(dent->d_name, "..") || 0 == strcmp(dent->d_name, ".")){ - continue; + for(dent = readdir(dp); dent; dent = readdir(dp)){ + if(0 == strcmp(dent->d_name, "..") || 0 == strcmp(dent->d_name, ".")){ + continue; + } + string fullpath = abs_path; + fullpath += "/"; + fullpath += dent->d_name; + struct stat st; + if(0 != lstat(fullpath.c_str(), &st)){ + S3FS_PRN_ERR("could not get stats of file(%s) - errno(%d)", fullpath.c_str(), errno); + closedir(dp); + return; + } + string next_path = path + "/" + dent->d_name; + if(S_ISDIR(st.st_mode)){ + CleanupCacheDirInternal(next_path); + }else{ + AutoLock auto_lock(&FdManager::fd_manager_lock, AutoLock::NO_WAIT); + if (!auto_lock.isLockAcquired()) { + S3FS_PRN_ERR("could not get fd_manager_lock when clean up file(%s)", next_path.c_str()); + continue; + } + fdent_map_t::iterator iter = fent.find(next_path); + if(fent.end() == iter) { + S3FS_PRN_DBG("cleaned up: %s", next_path.c_str()); + FdManager::DeleteCacheFile(next_path.c_str()); + } + } } - string fullpath = abs_path; - fullpath += "/"; - fullpath += dent->d_name; - struct stat st; - if(0 != lstat(fullpath.c_str(), &st)){ - S3FS_PRN_ERR("could not get stats of file(%s) - errno(%d)", fullpath.c_str(), errno); - closedir(dp); - return; - } - string next_path = path + "/" + dent->d_name; - if(S_ISDIR(st.st_mode)){ - CleanupCacheDirInternal(next_path); - }else{ - AutoLock auto_lock(&FdManager::fd_manager_lock, AutoLock::NO_WAIT); - if (!auto_lock.isLockAcquired()) { - S3FS_PRN_ERR("could not get fd_manager_lock when clean up file(%s)", next_path.c_str()); - continue; - } - fdent_map_t::iterator iter = fent.find(next_path); - if(fent.end() == iter) { - S3FS_PRN_DBG("cleaned up: %s", next_path.c_str()); - FdManager::DeleteCacheFile(next_path.c_str()); - } - } - } - closedir(dp); + closedir(dp); } bool FdManager::ReserveDiskSpace(off_t size) { - if(IsSafeDiskSpace(NULL, size)){ - AutoLock auto_lock(&FdManager::reserved_diskspace_lock); - free_disk_space += size; - return true; - } - return false; + if(IsSafeDiskSpace(NULL, size)){ + AutoLock auto_lock(&FdManager::reserved_diskspace_lock); + free_disk_space += size; + return true; + } + return false; } void FdManager::FreeReservedDiskSpace(off_t size) { - AutoLock auto_lock(&FdManager::reserved_diskspace_lock); - free_disk_space -= size; + AutoLock auto_lock(&FdManager::reserved_diskspace_lock); + free_disk_space -= size; } // @@ -3379,192 +744,191 @@ void FdManager::FreeReservedDiskSpace(off_t size) // bool FdManager::RawCheckAllCache(FILE* fp, const char* cache_stat_top_dir, const char* sub_path, int& total_file_cnt, int& err_file_cnt, int& err_dir_cnt) { - if(!cache_stat_top_dir || '\0' == cache_stat_top_dir[0] || !sub_path || '\0' == sub_path[0]){ - S3FS_PRN_ERR("Parameter cache_stat_top_dir is empty."); - return false; - } - - // open directory of cache file's stats - DIR* statsdir; - string target_dir = cache_stat_top_dir; - target_dir += sub_path; - if(NULL == (statsdir = opendir(target_dir.c_str()))){ - S3FS_PRN_ERR("Could not open directory(%s) by errno(%d)", target_dir.c_str(), errno); - return false; - } - - // loop in directory of cache file's stats - struct dirent* pdirent = NULL; - while(NULL != (pdirent = readdir(statsdir))){ - if(DT_DIR == pdirent->d_type){ - // found directory - if(0 == strcmp(pdirent->d_name, ".") || 0 == strcmp(pdirent->d_name, "..")){ - continue; - } - - // reentrant for sub directory - string subdir_path = sub_path; - subdir_path += pdirent->d_name; - subdir_path += '/'; - if(!RawCheckAllCache(fp, cache_stat_top_dir, subdir_path.c_str(), total_file_cnt, err_file_cnt, err_dir_cnt)){ - // put error message for this dir. - ++err_dir_cnt; - S3FS_PRN_CACHE(fp, CACHEDBG_FMT_DIR_PROB, subdir_path.c_str()); - S3FS_PRN_CACHE(fp, CACHEDBG_FMT_CRIT_HEAD, "Something error is occurred in checking this directory"); - } - - }else{ - ++total_file_cnt; - - // make cache file path - string strOpenedWarn; - string cache_path; - string object_file_path = sub_path; - object_file_path += pdirent->d_name; - if(!FdManager::MakeCachePath(object_file_path.c_str(), cache_path, false, false) || cache_path.empty()){ - ++err_file_cnt; - S3FS_PRN_CACHE(fp, CACHEDBG_FMT_FILE_PROB, object_file_path.c_str(), strOpenedWarn.c_str()); - S3FS_PRN_CACHE(fp, CACHEDBG_FMT_CRIT_HEAD, "Could not make cache file path"); - continue; - } - - // check if the target file is currently in operation. - { - AutoLock auto_lock(&FdManager::fd_manager_lock); - - fdent_map_t::iterator iter = fent.find(object_file_path); - if(fent.end() != iter){ - // This file is opened now, then we need to put warning message. - strOpenedWarn = CACHEDBG_FMT_WARN_OPEN; - } - } - - // open cache file - int cache_file_fd; - if(-1 == (cache_file_fd = open(cache_path.c_str(), O_RDONLY))){ - ++err_file_cnt; - S3FS_PRN_CACHE(fp, CACHEDBG_FMT_FILE_PROB, object_file_path.c_str(), strOpenedWarn.c_str()); - S3FS_PRN_CACHE(fp, CACHEDBG_FMT_CRIT_HEAD, "Could not open cache file"); - continue; - } - - // get inode number for cache file - struct stat st; - if(0 != fstat(cache_file_fd, &st)){ - ++err_file_cnt; - S3FS_PRN_CACHE(fp, CACHEDBG_FMT_FILE_PROB, object_file_path.c_str(), strOpenedWarn.c_str()); - S3FS_PRN_CACHE(fp, CACHEDBG_FMT_CRIT_HEAD, "Could not get file inode number for cache file"); - - close(cache_file_fd); - continue; - } - ino_t cache_file_inode = st.st_ino; - - // open cache stat file and load page info. - PageList pagelist; - CacheFileStat cfstat(object_file_path.c_str()); - if(!cfstat.ReadOnlyOpen() || !pagelist.Serialize(cfstat, false, cache_file_inode)){ - ++err_file_cnt; - S3FS_PRN_CACHE(fp, CACHEDBG_FMT_FILE_PROB, object_file_path.c_str(), strOpenedWarn.c_str()); - S3FS_PRN_CACHE(fp, CACHEDBG_FMT_CRIT_HEAD, "Could not load cache file stats information"); - - close(cache_file_fd); - continue; - } - cfstat.Release(); - - // compare cache file size and stats information - if(st.st_size != pagelist.Size()){ - ++err_file_cnt; - S3FS_PRN_CACHE(fp, CACHEDBG_FMT_FILE_PROB, object_file_path.c_str(), strOpenedWarn.c_str()); - S3FS_PRN_CACHE(fp, CACHEDBG_FMT_CRIT_HEAD2 "The cache file size(%lld) and the value(%lld) from cache file stats are different", static_cast(st.st_size), static_cast(pagelist.Size())); - - close(cache_file_fd); - continue; - } - - // compare cache file stats and cache file blocks - fdpage_list_t err_area_list; - fdpage_list_t warn_area_list; - if(!pagelist.CompareSparseFile(cache_file_fd, st.st_size, err_area_list, warn_area_list)){ - // Found some error or warning - S3FS_PRN_CACHE(fp, CACHEDBG_FMT_FILE_PROB, object_file_path.c_str(), strOpenedWarn.c_str()); - if(!warn_area_list.empty()){ - S3FS_PRN_CACHE(fp, CACHEDBG_FMT_WARN_HEAD); - for(fdpage_list_t::const_iterator witer = warn_area_list.begin(); witer != warn_area_list.end(); ++witer){ - S3FS_PRN_CACHE(fp, CACHEDBG_FMT_PROB_BLOCK, witer->offset, witer->bytes); - } - } - if(!err_area_list.empty()){ - ++err_file_cnt; - S3FS_PRN_CACHE(fp, CACHEDBG_FMT_ERR_HEAD); - for(fdpage_list_t::const_iterator eiter = err_area_list.begin(); eiter != err_area_list.end(); ++eiter){ - S3FS_PRN_CACHE(fp, CACHEDBG_FMT_PROB_BLOCK, eiter->offset, eiter->bytes); - } - } - }else{ - // There is no problem! - if(!strOpenedWarn.empty()){ - strOpenedWarn += "\n "; - } - S3FS_PRN_CACHE(fp, CACHEDBG_FMT_FILE_OK, object_file_path.c_str(), strOpenedWarn.c_str()); - } - err_area_list.clear(); - warn_area_list.clear(); - close(cache_file_fd); + if(!cache_stat_top_dir || '\0' == cache_stat_top_dir[0] || !sub_path || '\0' == sub_path[0]){ + S3FS_PRN_ERR("Parameter cache_stat_top_dir is empty."); + return false; } - } - closedir(statsdir); + // open directory of cache file's stats + DIR* statsdir; + string target_dir = cache_stat_top_dir; + target_dir += sub_path; + if(NULL == (statsdir = opendir(target_dir.c_str()))){ + S3FS_PRN_ERR("Could not open directory(%s) by errno(%d)", target_dir.c_str(), errno); + return false; + } - return true; + // loop in directory of cache file's stats + struct dirent* pdirent = NULL; + while(NULL != (pdirent = readdir(statsdir))){ + if(DT_DIR == pdirent->d_type){ + // found directory + if(0 == strcmp(pdirent->d_name, ".") || 0 == strcmp(pdirent->d_name, "..")){ + continue; + } + + // reentrant for sub directory + string subdir_path = sub_path; + subdir_path += pdirent->d_name; + subdir_path += '/'; + if(!RawCheckAllCache(fp, cache_stat_top_dir, subdir_path.c_str(), total_file_cnt, err_file_cnt, err_dir_cnt)){ + // put error message for this dir. + ++err_dir_cnt; + S3FS_PRN_CACHE(fp, CACHEDBG_FMT_DIR_PROB, subdir_path.c_str()); + S3FS_PRN_CACHE(fp, CACHEDBG_FMT_CRIT_HEAD, "Something error is occurred in checking this directory"); + } + + }else{ + ++total_file_cnt; + + // make cache file path + string strOpenedWarn; + string cache_path; + string object_file_path = sub_path; + object_file_path += pdirent->d_name; + if(!FdManager::MakeCachePath(object_file_path.c_str(), cache_path, false, false) || cache_path.empty()){ + ++err_file_cnt; + S3FS_PRN_CACHE(fp, CACHEDBG_FMT_FILE_PROB, object_file_path.c_str(), strOpenedWarn.c_str()); + S3FS_PRN_CACHE(fp, CACHEDBG_FMT_CRIT_HEAD, "Could not make cache file path"); + continue; + } + + // check if the target file is currently in operation. + { + AutoLock auto_lock(&FdManager::fd_manager_lock); + + fdent_map_t::iterator iter = fent.find(object_file_path); + if(fent.end() != iter){ + // This file is opened now, then we need to put warning message. + strOpenedWarn = CACHEDBG_FMT_WARN_OPEN; + } + } + + // open cache file + int cache_file_fd; + if(-1 == (cache_file_fd = open(cache_path.c_str(), O_RDONLY))){ + ++err_file_cnt; + S3FS_PRN_CACHE(fp, CACHEDBG_FMT_FILE_PROB, object_file_path.c_str(), strOpenedWarn.c_str()); + S3FS_PRN_CACHE(fp, CACHEDBG_FMT_CRIT_HEAD, "Could not open cache file"); + continue; + } + + // get inode number for cache file + struct stat st; + if(0 != fstat(cache_file_fd, &st)){ + ++err_file_cnt; + S3FS_PRN_CACHE(fp, CACHEDBG_FMT_FILE_PROB, object_file_path.c_str(), strOpenedWarn.c_str()); + S3FS_PRN_CACHE(fp, CACHEDBG_FMT_CRIT_HEAD, "Could not get file inode number for cache file"); + + close(cache_file_fd); + continue; + } + ino_t cache_file_inode = st.st_ino; + + // open cache stat file and load page info. + PageList pagelist; + CacheFileStat cfstat(object_file_path.c_str()); + if(!cfstat.ReadOnlyOpen() || !pagelist.Serialize(cfstat, false, cache_file_inode)){ + ++err_file_cnt; + S3FS_PRN_CACHE(fp, CACHEDBG_FMT_FILE_PROB, object_file_path.c_str(), strOpenedWarn.c_str()); + S3FS_PRN_CACHE(fp, CACHEDBG_FMT_CRIT_HEAD, "Could not load cache file stats information"); + + close(cache_file_fd); + continue; + } + cfstat.Release(); + + // compare cache file size and stats information + if(st.st_size != pagelist.Size()){ + ++err_file_cnt; + S3FS_PRN_CACHE(fp, CACHEDBG_FMT_FILE_PROB, object_file_path.c_str(), strOpenedWarn.c_str()); + S3FS_PRN_CACHE(fp, CACHEDBG_FMT_CRIT_HEAD2 "The cache file size(%lld) and the value(%lld) from cache file stats are different", static_cast(st.st_size), static_cast(pagelist.Size())); + + close(cache_file_fd); + continue; + } + + // compare cache file stats and cache file blocks + fdpage_list_t err_area_list; + fdpage_list_t warn_area_list; + if(!pagelist.CompareSparseFile(cache_file_fd, st.st_size, err_area_list, warn_area_list)){ + // Found some error or warning + S3FS_PRN_CACHE(fp, CACHEDBG_FMT_FILE_PROB, object_file_path.c_str(), strOpenedWarn.c_str()); + if(!warn_area_list.empty()){ + S3FS_PRN_CACHE(fp, CACHEDBG_FMT_WARN_HEAD); + for(fdpage_list_t::const_iterator witer = warn_area_list.begin(); witer != warn_area_list.end(); ++witer){ + S3FS_PRN_CACHE(fp, CACHEDBG_FMT_PROB_BLOCK, static_cast(witer->offset), static_cast(witer->bytes)); + } + } + if(!err_area_list.empty()){ + ++err_file_cnt; + S3FS_PRN_CACHE(fp, CACHEDBG_FMT_ERR_HEAD); + for(fdpage_list_t::const_iterator eiter = err_area_list.begin(); eiter != err_area_list.end(); ++eiter){ + S3FS_PRN_CACHE(fp, CACHEDBG_FMT_PROB_BLOCK, static_cast(eiter->offset), static_cast(eiter->bytes)); + } + } + }else{ + // There is no problem! + if(!strOpenedWarn.empty()){ + strOpenedWarn += "\n "; + } + S3FS_PRN_CACHE(fp, CACHEDBG_FMT_FILE_OK, object_file_path.c_str(), strOpenedWarn.c_str()); + } + err_area_list.clear(); + warn_area_list.clear(); + close(cache_file_fd); + } + } + closedir(statsdir); + + return true; } bool FdManager::CheckAllCache() { - if(!FdManager::HaveLseekHole()){ - S3FS_PRN_ERR("lseek does not support SEEK_DATA/SEEK_HOLE, then could not check cache."); - return false; - } - - FILE* fp; - if(FdManager::check_cache_output.empty()){ - fp = stdout; - }else{ - if(NULL == (fp = fopen(FdManager::check_cache_output.c_str(), "a+"))){ - S3FS_PRN_ERR("Could not open(create) output file(%s) for checking all cache by errno(%d)", FdManager::check_cache_output.c_str(), errno); - return false; + if(!FdManager::HaveLseekHole()){ + S3FS_PRN_ERR("lseek does not support SEEK_DATA/SEEK_HOLE, then could not check cache."); + return false; } - } - // print head message - S3FS_PRN_CACHE(fp, CACHEDBG_FMT_HEAD); + FILE* fp; + if(FdManager::check_cache_output.empty()){ + fp = stdout; + }else{ + if(NULL == (fp = fopen(FdManager::check_cache_output.c_str(), "a+"))){ + S3FS_PRN_ERR("Could not open(create) output file(%s) for checking all cache by errno(%d)", FdManager::check_cache_output.c_str(), errno); + return false; + } + } - // Loop in directory of cache file's stats - string top_path = CacheFileStat::GetCacheFileStatTopDir(); - int total_file_cnt = 0; - int err_file_cnt = 0; - int err_dir_cnt = 0; - bool result = RawCheckAllCache(fp, top_path.c_str(), "/", total_file_cnt, err_file_cnt, err_dir_cnt); - if(!result){ - S3FS_PRN_ERR("Processing failed due to some problem."); - } + // print head message + S3FS_PRN_CACHE(fp, CACHEDBG_FMT_HEAD); - // print foot message - S3FS_PRN_CACHE(fp, CACHEDBG_FMT_FOOT, total_file_cnt, err_file_cnt, err_dir_cnt); + // Loop in directory of cache file's stats + string top_path = CacheFileStat::GetCacheFileStatTopDir(); + int total_file_cnt = 0; + int err_file_cnt = 0; + int err_dir_cnt = 0; + bool result = RawCheckAllCache(fp, top_path.c_str(), "/", total_file_cnt, err_file_cnt, err_dir_cnt); + if(!result){ + S3FS_PRN_ERR("Processing failed due to some problem."); + } - if(stdout != fp){ - fclose(fp); - } + // print foot message + S3FS_PRN_CACHE(fp, CACHEDBG_FMT_FOOT, total_file_cnt, err_file_cnt, err_dir_cnt); - return result; + if(stdout != fp){ + fclose(fp); + } + + return result; } /* * Local variables: -* tab-width: 2 -* c-basic-offset: 2 +* tab-width: 4 +* c-basic-offset: 4 * End: -* vim600: expandtab sw=2 ts=2 fdm=marker -* vim<600: expandtab sw=2 ts=2 +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 */ diff --git a/src/fdcache.h b/src/fdcache.h index 607ba1f..3bd3b6e 100644 --- a/src/fdcache.h +++ b/src/fdcache.h @@ -17,205 +17,11 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ -#ifndef FD_CACHE_H_ -#define FD_CACHE_H_ -#include +#ifndef S3FS_FDCACHE_H_ +#define S3FS_FDCACHE_H_ -//------------------------------------------------ -// CacheFileStat -//------------------------------------------------ -class CacheFileStat -{ - private: - std::string path; - int fd; - - private: - static bool MakeCacheFileStatPath(const char* path, std::string& sfile_path, bool is_create_dir = true); - - bool RawOpen(bool readonly); - - public: - static std::string GetCacheFileStatTopDir(void); - static bool DeleteCacheFileStat(const char* path); - static bool CheckCacheFileStatTopDir(void); - static bool DeleteCacheFileStatDirectory(void); - static bool RenameCacheFileStat(const char* oldpath, const char* newpath); - - explicit CacheFileStat(const char* tpath = NULL); - ~CacheFileStat(); - - bool Open(void); - bool ReadOnlyOpen(void); - bool Release(void); - bool SetPath(const char* tpath, bool is_open = true); - int GetFd(void) const { return fd; } -}; - -//------------------------------------------------ -// fdpage & PageList -//------------------------------------------------ -// page block information -struct fdpage -{ - off_t offset; - off_t bytes; - bool loaded; - bool modified; - - fdpage(off_t start = 0, off_t size = 0, bool is_loaded = false, bool is_modified = false) - : offset(start), bytes(size), loaded(is_loaded), modified(is_modified) {} - - off_t next(void) const { return (offset + bytes); } - off_t end(void) const { return (0 < bytes ? offset + bytes - 1 : 0); } -}; -typedef std::list fdpage_list_t; - -class FdEntity; - -// -// Management of loading area/modifying -// -// cppcheck-suppress copyCtorAndEqOperator -class PageList -{ - friend class FdEntity; // only one method access directly pages. - - private: - fdpage_list_t pages; - - public: - enum page_status{ - PAGE_NOT_LOAD_MODIFIED = 0, - PAGE_LOADED, - PAGE_MODIFIED, - PAGE_LOAD_MODIFIED - }; - - private: - static bool GetSparseFilePages(int fd, size_t file_size, fdpage_list_t& sparse_list); - static bool CheckZeroAreaInFile(int fd, off_t start, size_t bytes); - static bool CheckAreaInSparseFile(const struct fdpage& checkpage, const fdpage_list_t& sparse_list, int fd, fdpage_list_t& err_area_list, fdpage_list_t& warn_area_list); - - void Clear(void); - bool Compress(); - bool Parse(off_t new_pos); - - public: - static void FreeList(fdpage_list_t& list); - - explicit PageList(off_t size = 0, bool is_loaded = false, bool is_modified = false); - explicit PageList(const PageList& other); - ~PageList(); - - bool Init(off_t size, bool is_loaded, bool is_modified); - off_t Size(void) const; - bool Resize(off_t size, bool is_loaded, bool is_modified); - - bool IsPageLoaded(off_t start = 0, off_t size = 0) const; // size=0 is checking to end of list - bool SetPageLoadedStatus(off_t start, off_t size, PageList::page_status pstatus = PAGE_LOADED, bool is_compress = true); - bool FindUnloadedPage(off_t start, off_t& resstart, off_t& ressize) const; - off_t GetTotalUnloadedPageSize(off_t start = 0, off_t size = 0) const; // size=0 is checking to end of list - int GetUnloadedPages(fdpage_list_t& unloaded_list, off_t start = 0, off_t size = 0) const; // size=0 is checking to end of list - bool GetPageListsForMultipartUpload(fdpage_list_t& dlpages, fdpage_list_t& mixuppages, off_t max_partsize); - - bool IsModified(void) const; - bool ClearAllModified(void); - - bool Serialize(CacheFileStat& file, bool is_output, ino_t inode); - void Dump(void) const; - bool CompareSparseFile(int fd, size_t file_size, fdpage_list_t& err_area_list, fdpage_list_t& warn_area_list); -}; - -//------------------------------------------------ -// class FdEntity -//------------------------------------------------ -typedef std::list headers_list_t; - -class FdEntity -{ - private: - static bool mixmultipart; // whether multipart uploading can use copy api. - - pthread_mutex_t fdent_lock; - bool is_lock_init; - int refcnt; // reference count - std::string path; // object path - int fd; // file descriptor(tmp file or cache file) - FILE* pfile; // file pointer(tmp file or cache file) - ino_t inode; // inode number for cache file - headers_t orgmeta; // original headers at opening - off_t size_orgmeta; // original file size in original headers - - pthread_mutex_t fdent_data_lock;// protects the following members - PageList pagelist; - std::string upload_id; // for no cached multipart uploading when no disk space - etaglist_t etaglist; // for no cached multipart uploading when no disk space - off_t mp_start; // start position for no cached multipart(write method only) - off_t mp_size; // size for no cached multipart(write method only) - std::string cachepath; // local cache file path - // (if this is empty, does not load/save pagelist.) - std::string mirrorpath; // mirror file path to local cache file path - headers_list_t pending_headers;// pending update headers - - private: - static int FillFile(int fd, unsigned char byte, off_t size, off_t start); - static ino_t GetInode(int fd); - - void Clear(void); - ino_t GetInode(void); - int OpenMirrorFile(void); - bool SetAllStatus(bool is_loaded); // [NOTE] not locking - bool SetAllStatusUnloaded(void) { return SetAllStatus(false); } - int UploadPendingMeta(void); - - public: - static bool SetNoMixMultipart(void); - - explicit FdEntity(const char* tpath = NULL, const char* cpath = NULL); - ~FdEntity(); - - void Close(void); - bool IsOpen(void) const { return (-1 != fd); } - int Open(headers_t* pmeta = NULL, off_t size = -1, time_t time = -1, bool no_fd_lock_wait = false); - bool OpenAndLoadAll(headers_t* pmeta = NULL, off_t* size = NULL, bool force_load = false); - int Dup(bool lock_already_held = false); - - const char* GetPath(void) const { return path.c_str(); } - bool RenamePath(const std::string& newpath, std::string& fentmapkey); - int GetFd(void) const { return fd; } - bool IsModified(void) const; - bool MergeOrgMeta(headers_t& updatemeta); - - bool GetStats(struct stat& st, bool lock_already_held = false); - int SetCtime(time_t time, bool lock_already_held = false); - int SetMtime(time_t time, bool lock_already_held = false); - bool UpdateCtime(void); - bool UpdateMtime(void); - bool GetSize(off_t& size); - bool GetXattr(std::string& xattr); - bool SetXattr(const std::string& xattr); - bool SetMode(mode_t mode); - bool SetUId(uid_t uid); - bool SetGId(gid_t gid); - bool SetContentType(const char* path); - - int Load(off_t start = 0, off_t size = 0, bool lock_already_held = false, bool is_modified_flag = false); // size=0 means loading to end - int NoCacheLoadAndPost(off_t start = 0, off_t size = 0); // size=0 means loading to end - int NoCachePreMultipartPost(void); - int NoCacheMultipartPost(int tgfd, off_t start, off_t size); - int NoCacheCompleteMultipartPost(void); - - int RowFlush(const char* tpath, bool force_sync = false); - int Flush(bool force_sync = false) { return RowFlush(NULL, force_sync); } - - ssize_t Read(char* bytes, off_t start, size_t size, bool force_load = false); - ssize_t Write(const char* bytes, off_t start, size_t size); - - bool ReserveDiskSpace(off_t size); -}; -typedef std::map fdent_map_t; // key=path, value=FdEntity* +#include "fdcache_entity.h" //------------------------------------------------ // class FdManager @@ -223,71 +29,71 @@ typedef std::map fdent_map_t; // key=path, value class FdManager { private: - static FdManager singleton; - static pthread_mutex_t fd_manager_lock; - static pthread_mutex_t cache_cleanup_lock; - static pthread_mutex_t reserved_diskspace_lock; - static bool is_lock_init; - static std::string cache_dir; - static bool check_cache_dir_exist; - static off_t free_disk_space; // limit free disk space - static std::string check_cache_output; - static bool checked_lseek; - static bool have_lseek_hole; + static FdManager singleton; + static pthread_mutex_t fd_manager_lock; + static pthread_mutex_t cache_cleanup_lock; + static pthread_mutex_t reserved_diskspace_lock; + static bool is_lock_init; + static std::string cache_dir; + static bool check_cache_dir_exist; + static off_t free_disk_space; // limit free disk space + static std::string check_cache_output; + static bool checked_lseek; + static bool have_lseek_hole; - fdent_map_t fent; + fdent_map_t fent; private: - static off_t GetFreeDiskSpace(const char* path); - void CleanupCacheDirInternal(const std::string &path = ""); - bool RawCheckAllCache(FILE* fp, const char* cache_stat_top_dir, const char* sub_path, int& total_file_cnt, int& err_file_cnt, int& err_dir_cnt); + static off_t GetFreeDiskSpace(const char* path); + void CleanupCacheDirInternal(const std::string &path = ""); + bool RawCheckAllCache(FILE* fp, const char* cache_stat_top_dir, const char* sub_path, int& total_file_cnt, int& err_file_cnt, int& err_dir_cnt); public: - FdManager(); - ~FdManager(); + FdManager(); + ~FdManager(); - // Reference singleton - static FdManager* get(void) { return &singleton; } + // Reference singleton + static FdManager* get(void) { return &singleton; } - static bool DeleteCacheDirectory(void); - static int DeleteCacheFile(const char* path); - static bool SetCacheDir(const char* dir); - static bool IsCacheDir(void) { return !FdManager::cache_dir.empty(); } - static const char* GetCacheDir(void) { return FdManager::cache_dir.c_str(); } - static bool SetCacheCheckOutput(const char* path); - static const char* GetCacheCheckOutput(void) { return FdManager::check_cache_output.c_str(); } - static bool MakeCachePath(const char* path, std::string& cache_path, bool is_create_dir = true, bool is_mirror_path = false); - static bool CheckCacheTopDir(void); - static bool MakeRandomTempPath(const char* path, std::string& tmppath); - static bool SetCheckCacheDirExist(bool is_check); - static bool CheckCacheDirExist(void); + static bool DeleteCacheDirectory(void); + static int DeleteCacheFile(const char* path); + static bool SetCacheDir(const char* dir); + static bool IsCacheDir(void) { return !FdManager::cache_dir.empty(); } + static const char* GetCacheDir(void) { return FdManager::cache_dir.c_str(); } + static bool SetCacheCheckOutput(const char* path); + static const char* GetCacheCheckOutput(void) { return FdManager::check_cache_output.c_str(); } + static bool MakeCachePath(const char* path, std::string& cache_path, bool is_create_dir = true, bool is_mirror_path = false); + static bool CheckCacheTopDir(void); + static bool MakeRandomTempPath(const char* path, std::string& tmppath); + static bool SetCheckCacheDirExist(bool is_check); + static bool CheckCacheDirExist(void); - static off_t GetEnsureFreeDiskSpace(); - static off_t SetEnsureFreeDiskSpace(off_t size); - static bool IsSafeDiskSpace(const char* path, off_t size); - static void FreeReservedDiskSpace(off_t size); - static bool ReserveDiskSpace(off_t size); - static bool HaveLseekHole(void); + static off_t GetEnsureFreeDiskSpace(); + static off_t SetEnsureFreeDiskSpace(off_t size); + static bool IsSafeDiskSpace(const char* path, off_t size); + static void FreeReservedDiskSpace(off_t size); + static bool ReserveDiskSpace(off_t size); + static bool HaveLseekHole(void); - // Return FdEntity associated with path, returning NULL on error. This operation increments the reference count; callers must decrement via Close after use. - FdEntity* GetFdEntity(const char* path, int existfd = -1); - FdEntity* Open(const char* path, headers_t* pmeta = NULL, off_t size = -1, time_t time = -1, bool force_tmpfile = false, bool is_create = true, bool no_fd_lock_wait = false); - FdEntity* ExistOpen(const char* path, int existfd = -1, bool ignore_existfd = false); - void Rename(const std::string &from, const std::string &to); - bool Close(FdEntity* ent); - bool ChangeEntityToTempPath(FdEntity* ent, const char* path); - void CleanupCacheDir(); + // Return FdEntity associated with path, returning NULL on error. This operation increments the reference count; callers must decrement via Close after use. + FdEntity* GetFdEntity(const char* path, int existfd = -1); + FdEntity* Open(const char* path, headers_t* pmeta = NULL, off_t size = -1, time_t time = -1, bool force_tmpfile = false, bool is_create = true, bool no_fd_lock_wait = false); + FdEntity* ExistOpen(const char* path, int existfd = -1, bool ignore_existfd = false); + void Rename(const std::string &from, const std::string &to); + bool Close(FdEntity* ent); + bool ChangeEntityToTempPath(FdEntity* ent, const char* path); + void CleanupCacheDir(); - bool CheckAllCache(void); + bool CheckAllCache(void); }; -#endif // FD_CACHE_H_ +#endif // S3FS_FDCACHE_H_ /* * Local variables: -* tab-width: 2 -* c-basic-offset: 2 +* tab-width: 4 +* c-basic-offset: 4 * End: -* vim600: expandtab sw=2 ts=2 fdm=marker -* vim<600: expandtab sw=2 ts=2 +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 */ diff --git a/src/fdcache_entity.cpp b/src/fdcache_entity.cpp new file mode 100644 index 0000000..ba58a5f --- /dev/null +++ b/src/fdcache_entity.cpp @@ -0,0 +1,1536 @@ +/* + * s3fs - FUSE-based file system backed by Amazon S3 + * + * Copyright(C) 2007 Takeshi Nakatani + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include +#include +#include +#include +#include +#include + +#include "common.h" +#include "s3fs.h" +#include "fdcache_entity.h" +#include "fdcache.h" +#include "string_util.h" +#include "autolock.h" +#include "curl.h" + +using namespace std; + +//------------------------------------------------ +// Symbols +//------------------------------------------------ +static const int MAX_MULTIPART_CNT = 10 * 1000; // S3 multipart max count + +//------------------------------------------------ +// FdEntity class variables +//------------------------------------------------ +bool FdEntity::mixmultipart = true; + +//------------------------------------------------ +// FdEntity class methods +//------------------------------------------------ +bool FdEntity::SetNoMixMultipart() +{ + bool old = mixmultipart; + mixmultipart = false; + return old; +} + +int FdEntity::FillFile(int fd, unsigned char byte, off_t size, off_t start) +{ + unsigned char bytes[1024 * 32]; // 32kb + memset(bytes, byte, min(static_cast(sizeof(bytes)), size)); + + for(off_t total = 0, onewrote = 0; total < size; total += onewrote){ + if(-1 == (onewrote = pwrite(fd, bytes, min(static_cast(sizeof(bytes)), size - total), start + total))){ + S3FS_PRN_ERR("pwrite failed. errno(%d)", errno); + return -errno; + } + } + return 0; +} + +// [NOTE] +// If fd is wrong or something error is occurred, return 0. +// The ino_t is allowed zero, but inode 0 is not realistic. +// So this method returns 0 on error assuming the correct +// inode is never 0. +// The caller must have exclusive control. +// +ino_t FdEntity::GetInode(int fd) +{ + if(-1 == fd){ + S3FS_PRN_ERR("file descriptor is wrong."); + return 0; + } + + struct stat st; + if(0 != fstat(fd, &st)){ + S3FS_PRN_ERR("could not get stat for file descriptor(%d) by errno(%d).", fd, errno); + return 0; + } + return st.st_ino; +} + +//------------------------------------------------ +// FdEntity methods +//------------------------------------------------ +FdEntity::FdEntity(const char* tpath, const char* cpath) : + is_lock_init(false), refcnt(0), path(SAFESTRPTR(tpath)), + fd(-1), pfile(NULL), inode(0), size_orgmeta(0), upload_id(""), mp_start(0), mp_size(0), + cachepath(SAFESTRPTR(cpath)), mirrorpath("") +{ + pthread_mutexattr_t attr; + pthread_mutexattr_init(&attr); +#if S3FS_PTHREAD_ERRORCHECK + pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK); +#endif + int res; + if(0 != (res = pthread_mutex_init(&fdent_lock, &attr))){ + S3FS_PRN_CRIT("failed to init fdent_lock: %d", res); + abort(); + } + if(0 != (res = pthread_mutex_init(&fdent_data_lock, &attr))){ + S3FS_PRN_CRIT("failed to init fdent_data_lock: %d", res); + abort(); + } + is_lock_init = true; +} + +FdEntity::~FdEntity() +{ + Clear(); + + if(is_lock_init){ + int res; + if(0 != (res = pthread_mutex_destroy(&fdent_data_lock))){ + S3FS_PRN_CRIT("failed to destroy fdent_data_lock: %d", res); + abort(); + } + if(0 != (res = pthread_mutex_destroy(&fdent_lock))){ + S3FS_PRN_CRIT("failed to destroy fdent_lock: %d", res); + abort(); + } + is_lock_init = false; + } +} + +void FdEntity::Clear() +{ + AutoLock auto_lock(&fdent_lock); + AutoLock auto_data_lock(&fdent_data_lock); + + if(-1 != fd){ + if(!cachepath.empty()){ + // [NOTE] + // Compare the inode of the existing cache file with the inode of + // the cache file output by this object, and if they are the same, + // serialize the pagelist. + // + ino_t cur_inode = GetInode(); + if(0 != cur_inode && cur_inode == inode){ + CacheFileStat cfstat(path.c_str()); + if(!pagelist.Serialize(cfstat, true, inode)){ + S3FS_PRN_WARN("failed to save cache stat file(%s).", path.c_str()); + } + } + } + if(pfile){ + fclose(pfile); + pfile = NULL; + } + fd = -1; + inode = 0; + + if(!mirrorpath.empty()){ + if(-1 == unlink(mirrorpath.c_str())){ + S3FS_PRN_WARN("failed to remove mirror cache file(%s) by errno(%d).", mirrorpath.c_str(), errno); + } + mirrorpath.erase(); + } + } + pagelist.Init(0, false, false); + refcnt = 0; + path = ""; + cachepath = ""; +} + +// [NOTE] +// This method returns the inode of the file in cachepath. +// The return value is the same as the class method GetInode(). +// The caller must have exclusive control. +// +ino_t FdEntity::GetInode() +{ + if(cachepath.empty()){ + S3FS_PRN_INFO("cache file path is empty, then return inode as 0."); + return 0; + } + + struct stat st; + if(0 != stat(cachepath.c_str(), &st)){ + S3FS_PRN_INFO("could not get stat for file(%s) by errno(%d).", cachepath.c_str(), errno); + return 0; + } + return st.st_ino; +} + +void FdEntity::Close() +{ + AutoLock auto_lock(&fdent_lock); + + S3FS_PRN_DBG("[path=%s][fd=%d][refcnt=%d]", path.c_str(), fd, (-1 != fd ? refcnt - 1 : refcnt)); + + if(-1 != fd){ + if(0 < refcnt){ + refcnt--; + }else{ + S3FS_PRN_EXIT("reference count underflow"); + abort(); + } + if(0 == refcnt){ + AutoLock auto_data_lock(&fdent_data_lock); + if(!cachepath.empty()){ + // [NOTE] + // Compare the inode of the existing cache file with the inode of + // the cache file output by this object, and if they are the same, + // serialize the pagelist. + // + ino_t cur_inode = GetInode(); + if(0 != cur_inode && cur_inode == inode){ + CacheFileStat cfstat(path.c_str()); + if(!pagelist.Serialize(cfstat, true, inode)){ + S3FS_PRN_WARN("failed to save cache stat file(%s).", path.c_str()); + } + } + } + if(pfile){ + fclose(pfile); + pfile = NULL; + } + fd = -1; + inode = 0; + + if(!mirrorpath.empty()){ + if(-1 == unlink(mirrorpath.c_str())){ + S3FS_PRN_WARN("failed to remove mirror cache file(%s) by errno(%d).", mirrorpath.c_str(), errno); + } + mirrorpath.erase(); + } + } + } +} + +int FdEntity::Dup(bool lock_already_held) +{ + AutoLock auto_lock(&fdent_lock, lock_already_held ? AutoLock::ALREADY_LOCKED : AutoLock::NONE); + + S3FS_PRN_DBG("[path=%s][fd=%d][refcnt=%d]", path.c_str(), fd, (-1 != fd ? refcnt + 1 : refcnt)); + + if(-1 != fd){ + refcnt++; + } + return fd; +} + +// +// Open mirror file which is linked cache file. +// +int FdEntity::OpenMirrorFile() +{ + if(cachepath.empty()){ + S3FS_PRN_ERR("cache path is empty, why come here"); + return -EIO; + } + + // make temporary directory + string bupdir; + if(!FdManager::MakeCachePath(NULL, bupdir, true, true)){ + S3FS_PRN_ERR("could not make bup cache directory path or create it."); + return -EIO; + } + + // create seed generating mirror file name + unsigned int seed = static_cast(time(NULL)); + int urandom_fd; + if(-1 != (urandom_fd = open("/dev/urandom", O_RDONLY))){ + unsigned int rand_data; + if(sizeof(rand_data) == read(urandom_fd, &rand_data, sizeof(rand_data))){ + seed ^= rand_data; + } + close(urandom_fd); + } + + // try to link mirror file + while(true){ + // make random(temp) file path + // (do not care for threading, because allowed any value returned.) + // + char szfile[NAME_MAX + 1]; + sprintf(szfile, "%x.tmp", rand_r(&seed)); + mirrorpath = bupdir + "/" + szfile; + + // link mirror file to cache file + if(0 == link(cachepath.c_str(), mirrorpath.c_str())){ + break; + } + if(EEXIST != errno){ + S3FS_PRN_ERR("could not link mirror file(%s) to cache file(%s) by errno(%d).", mirrorpath.c_str(), cachepath.c_str(), errno); + return -errno; + } + ++seed; + } + + // open mirror file + int mirrorfd; + if(-1 == (mirrorfd = open(mirrorpath.c_str(), O_RDWR))){ + S3FS_PRN_ERR("could not open mirror file(%s) by errno(%d).", mirrorpath.c_str(), errno); + return -errno; + } + return mirrorfd; +} + +int FdEntity::Open(headers_t* pmeta, off_t size, time_t time, bool no_fd_lock_wait) +{ + AutoLock auto_lock(&fdent_lock, no_fd_lock_wait ? AutoLock::NO_WAIT : AutoLock::NONE); + + S3FS_PRN_DBG("[path=%s][fd=%d][size=%lld][time=%lld]", path.c_str(), fd, static_cast(size), static_cast(time)); + + if (!auto_lock.isLockAcquired()) { + // had to wait for fd lock, return + S3FS_PRN_ERR("Could not get lock."); + return -EIO; + } + + AutoLock auto_data_lock(&fdent_data_lock); + if(-1 != fd){ + // already opened, needs to increment refcnt. + Dup(/*lock_already_held=*/ true); + + // check only file size(do not need to save cfs and time. + if(0 <= size && pagelist.Size() != size){ + // truncate temporary file size + if(-1 == ftruncate(fd, size)){ + S3FS_PRN_ERR("failed to truncate temporary file(%d) by errno(%d).", fd, errno); + if(0 < refcnt){ + refcnt--; + } + return -EIO; + } + // resize page list + if(!pagelist.Resize(size, false, true)){ // Areas with increased size are modified + S3FS_PRN_ERR("failed to truncate temporary file information(%d).", fd); + if(0 < refcnt){ + refcnt--; + } + return -EIO; + } + } + // set original headers and set size. + off_t new_size = (0 <= size ? size : size_orgmeta); + if(pmeta){ + orgmeta = *pmeta; + new_size = get_size(orgmeta); + } + if(new_size < size_orgmeta){ + size_orgmeta = new_size; + } + return 0; + } + + bool need_save_csf = false; // need to save(reset) cache stat file + bool is_truncate = false; // need to truncate + + if(!cachepath.empty()){ + // using cache + + struct stat st; + if(stat(cachepath.c_str(), &st) == 0){ + if(st.st_mtime < time){ + S3FS_PRN_DBG("cache file stale, removing: %s", cachepath.c_str()); + if(unlink(cachepath.c_str()) != 0){ + return (0 == errno ? -EIO : -errno); + } + } + } + + // open cache and cache stat file, load page info. + CacheFileStat cfstat(path.c_str()); + + // try to open cache file + if( -1 != (fd = open(cachepath.c_str(), O_RDWR)) && + 0 != (inode = FdEntity::GetInode(fd)) && + pagelist.Serialize(cfstat, false, inode) ) + { + // succeed to open cache file and to load stats data + memset(&st, 0, sizeof(struct stat)); + if(-1 == fstat(fd, &st)){ + S3FS_PRN_ERR("fstat is failed. errno(%d)", errno); + fd = -1; + inode = 0; + return (0 == errno ? -EIO : -errno); + } + // check size, st_size, loading stat file + if(-1 == size){ + if(st.st_size != pagelist.Size()){ + pagelist.Resize(st.st_size, false, true); // Areas with increased size are modified + need_save_csf = true; // need to update page info + } + size = st.st_size; + }else{ + if(size != pagelist.Size()){ + pagelist.Resize(size, false, true); // Areas with increased size are modified + need_save_csf = true; // need to update page info + } + if(size != st.st_size){ + is_truncate = true; + } + } + + }else{ + if(-1 != fd){ + close(fd); + } + inode = 0; + + // could not open cache file or could not load stats data, so initialize it. + if(-1 == (fd = open(cachepath.c_str(), O_CREAT|O_RDWR|O_TRUNC, 0600))){ + S3FS_PRN_ERR("failed to open file(%s). errno(%d)", cachepath.c_str(), errno); + + // remove cache stat file if it is existed + if(!CacheFileStat::DeleteCacheFileStat(path.c_str())){ + if(ENOENT != errno){ + S3FS_PRN_WARN("failed to delete current cache stat file(%s) by errno(%d), but continue...", path.c_str(), errno); + } + } + return (0 == errno ? -EIO : -errno); + } + need_save_csf = true; // need to update page info + inode = FdEntity::GetInode(fd); + if(-1 == size){ + size = 0; + pagelist.Init(0, false, false); + }else{ + // [NOTE] + // The modify flag must not be set when opening a file, + // if the time parameter(mtime) is specified(not -1) and + // the cache file does not exist. + // If mtime is specified for the file and the cache file + // mtime is older than it, the cache file is removed and + // the processing comes here. + // + pagelist.Resize(size, false, (0 <= time ? false : true)); + + is_truncate = true; + } + } + + // open mirror file + int mirrorfd; + if(0 >= (mirrorfd = OpenMirrorFile())){ + S3FS_PRN_ERR("failed to open mirror file linked cache file(%s).", cachepath.c_str()); + return (0 == mirrorfd ? -EIO : mirrorfd); + } + // switch fd + close(fd); + fd = mirrorfd; + + // make file pointer(for being same tmpfile) + if(NULL == (pfile = fdopen(fd, "wb"))){ + S3FS_PRN_ERR("failed to get fileno(%s). errno(%d)", cachepath.c_str(), errno); + close(fd); + fd = -1; + inode = 0; + return (0 == errno ? -EIO : -errno); + } + + }else{ + // not using cache + inode = 0; + + // open temporary file + if(NULL == (pfile = tmpfile()) || -1 ==(fd = fileno(pfile))){ + S3FS_PRN_ERR("failed to open tmp file. err(%d)", errno); + if(pfile){ + fclose(pfile); + pfile = NULL; + } + return (0 == errno ? -EIO : -errno); + } + if(-1 == size){ + size = 0; + pagelist.Init(0, false, false); + }else{ + // [NOTE] + // The modify flag must not be set when opening a file, + // if the time parameter(mtime) is specified(not -1) and + // the cache file does not exist. + // If mtime is specified for the file and the cache file + // mtime is older than it, the cache file is removed and + // the processing comes here. + // + pagelist.Resize(size, false, (0 <= time ? false : true)); + is_truncate = true; + } + } + + // truncate cache(tmp) file + if(is_truncate){ + if(0 != ftruncate(fd, size) || 0 != fsync(fd)){ + S3FS_PRN_ERR("ftruncate(%s) or fsync returned err(%d)", cachepath.c_str(), errno); + fclose(pfile); + pfile = NULL; + fd = -1; + inode = 0; + return (0 == errno ? -EIO : -errno); + } + } + + // reset cache stat file + if(need_save_csf){ + CacheFileStat cfstat(path.c_str()); + if(!pagelist.Serialize(cfstat, true, inode)){ + S3FS_PRN_WARN("failed to save cache stat file(%s), but continue...", path.c_str()); + } + } + + // init internal data + refcnt = 1; + + // set original headers and size in it. + if(pmeta){ + orgmeta = *pmeta; + size_orgmeta = get_size(orgmeta); + }else{ + orgmeta.clear(); + size_orgmeta = 0; + } + + // set mtime(set "x-amz-meta-mtime" in orgmeta) + if(-1 != time){ + if(0 != SetMtime(time, /*lock_already_held=*/ true)){ + S3FS_PRN_ERR("failed to set mtime. errno(%d)", errno); + fclose(pfile); + pfile = NULL; + fd = -1; + inode = 0; + return (0 == errno ? -EIO : -errno); + } + } + return 0; +} + +// [NOTE] +// This method is called from only nocopyapi functions. +// So we do not check disk space for this option mode, if there is no enough +// disk space this method will be failed. +// +bool FdEntity::OpenAndLoadAll(headers_t* pmeta, off_t* size, bool force_load) +{ + AutoLock auto_lock(&fdent_lock); + int result; + + S3FS_PRN_INFO3("[path=%s][fd=%d]", path.c_str(), fd); + + if(-1 == fd){ + if(0 != Open(pmeta)){ + return false; + } + } + AutoLock auto_data_lock(&fdent_data_lock); + + if(force_load){ + SetAllStatusUnloaded(); + } + // + // TODO: possibly do background for delay loading + // + if(0 != (result = Load(/*start=*/ 0, /*size=*/ 0, /*lock_already_held=*/ true))){ + S3FS_PRN_ERR("could not download, result(%d)", result); + return false; + } + if(size){ + *size = pagelist.Size(); + } + return true; +} + +// +// Rename file path. +// +// This method sets the FdManager::fent map registration key to fentmapkey. +// +// [NOTE] +// This method changes the file path of FdEntity. +// Old file is deleted after linking to the new file path, and this works +// without problem because the file descriptor is not affected even if the +// cache file is open. +// The mirror file descriptor is also the same. The mirror file path does +// not need to be changed and will remain as it is. +// +bool FdEntity::RenamePath(const string& newpath, string& fentmapkey) +{ + if(!cachepath.empty()){ + // has cache path + + // make new cache path + string newcachepath; + if(!FdManager::MakeCachePath(newpath.c_str(), newcachepath, true)){ + S3FS_PRN_ERR("failed to make cache path for object(%s).", newpath.c_str()); + return false; + } + + // rename cache file + if(-1 == rename(cachepath.c_str(), newcachepath.c_str())){ + S3FS_PRN_ERR("failed to rename old cache path(%s) to new cache path(%s) by errno(%d).", cachepath.c_str(), newcachepath.c_str(), errno); + return false; + } + + // link and unlink cache file stat + if(!CacheFileStat::RenameCacheFileStat(path.c_str(), newpath.c_str())){ + S3FS_PRN_ERR("failed to rename cache file stat(%s to %s).", path.c_str(), newpath.c_str()); + return false; + } + fentmapkey = newpath; + cachepath = newcachepath; + + }else{ + // does not have cache path + fentmapkey.erase(); + FdManager::MakeRandomTempPath(newpath.c_str(), fentmapkey); + } + // set new path + path = newpath; + + return true; +} + +bool FdEntity::IsModified(void) const +{ + AutoLock auto_data_lock(const_cast(&fdent_data_lock)); + return pagelist.IsModified(); +} + +bool FdEntity::GetStats(struct stat& st, bool lock_already_held) +{ + AutoLock auto_lock(&fdent_lock, lock_already_held ? AutoLock::ALREADY_LOCKED : AutoLock::NONE); + if(-1 == fd){ + return false; + } + + memset(&st, 0, sizeof(struct stat)); + if(-1 == fstat(fd, &st)){ + S3FS_PRN_ERR("fstat failed. errno(%d)", errno); + return false; + } + return true; +} + +int FdEntity::SetCtime(time_t time, bool lock_already_held) +{ + AutoLock auto_lock(&fdent_lock, lock_already_held ? AutoLock::ALREADY_LOCKED : AutoLock::NONE); + + S3FS_PRN_INFO3("[path=%s][fd=%d][time=%lld]", path.c_str(), fd, static_cast(time)); + + if(-1 == time){ + return 0; + } + orgmeta["x-amz-meta-ctime"] = str(time); + return 0; +} + +int FdEntity::SetMtime(time_t time, bool lock_already_held) +{ + AutoLock auto_lock(&fdent_lock, lock_already_held ? AutoLock::ALREADY_LOCKED : AutoLock::NONE); + + S3FS_PRN_INFO3("[path=%s][fd=%d][time=%lld]", path.c_str(), fd, static_cast(time)); + + if(-1 == time){ + return 0; + } + + if(-1 != fd){ + struct timeval tv[2]; + tv[0].tv_sec = time; + tv[0].tv_usec= 0L; + tv[1].tv_sec = tv[0].tv_sec; + tv[1].tv_usec= 0L; + if(-1 == futimes(fd, tv)){ + S3FS_PRN_ERR("futimes failed. errno(%d)", errno); + return -errno; + } + }else if(!cachepath.empty()){ + // not opened file yet. + struct utimbuf n_mtime; + n_mtime.modtime = time; + n_mtime.actime = time; + if(-1 == utime(cachepath.c_str(), &n_mtime)){ + S3FS_PRN_ERR("utime failed. errno(%d)", errno); + return -errno; + } + } + orgmeta["x-amz-meta-ctime"] = str(time); + orgmeta["x-amz-meta-mtime"] = str(time); + + return 0; +} + +bool FdEntity::UpdateCtime() +{ + AutoLock auto_lock(&fdent_lock); + struct stat st; + if(!GetStats(st, /*lock_already_held=*/ true)){ + return false; + } + orgmeta["x-amz-meta-ctime"] = str(st.st_ctime); + return true; +} + +bool FdEntity::UpdateMtime() +{ + AutoLock auto_lock(&fdent_lock); + struct stat st; + if(!GetStats(st, /*lock_already_held=*/ true)){ + return false; + } + orgmeta["x-amz-meta-ctime"] = str(st.st_ctime); + orgmeta["x-amz-meta-mtime"] = str(st.st_mtime); + return true; +} + +bool FdEntity::GetSize(off_t& size) +{ + AutoLock auto_lock(&fdent_lock); + if(-1 == fd){ + return false; + } + + AutoLock auto_data_lock(&fdent_data_lock); + size = pagelist.Size(); + return true; +} + +bool FdEntity::GetXattr(string& xattr) +{ + AutoLock auto_lock(&fdent_lock); + + headers_t::const_iterator iter = orgmeta.find("x-amz-meta-xattr"); + if(iter == orgmeta.end()){ + return false; + } + xattr = iter->second; + return true; +} + +bool FdEntity::SetXattr(const std::string& xattr) +{ + AutoLock auto_lock(&fdent_lock); + orgmeta["x-amz-meta-xattr"] = xattr; + return true; +} + +bool FdEntity::SetMode(mode_t mode) +{ + AutoLock auto_lock(&fdent_lock); + orgmeta["x-amz-meta-mode"] = str(mode); + return true; +} + +bool FdEntity::SetUId(uid_t uid) +{ + AutoLock auto_lock(&fdent_lock); + orgmeta["x-amz-meta-uid"] = str(uid); + return true; +} + +bool FdEntity::SetGId(gid_t gid) +{ + AutoLock auto_lock(&fdent_lock); + orgmeta["x-amz-meta-gid"] = str(gid); + return true; +} + +bool FdEntity::SetContentType(const char* path) +{ + if(!path){ + return false; + } + AutoLock auto_lock(&fdent_lock); + orgmeta["Content-Type"] = S3fsCurl::LookupMimeType(string(path)); + return true; +} + +bool FdEntity::SetAllStatus(bool is_loaded) +{ + S3FS_PRN_INFO3("[path=%s][fd=%d][%s]", path.c_str(), fd, is_loaded ? "loaded" : "unloaded"); + + if(-1 == fd){ + return false; + } + // [NOTE] + // this method is only internal use, and calling after locking. + // so do not lock now. + // + //AutoLock auto_lock(&fdent_lock); + + // get file size + struct stat st; + memset(&st, 0, sizeof(struct stat)); + if(-1 == fstat(fd, &st)){ + S3FS_PRN_ERR("fstat is failed. errno(%d)", errno); + return false; + } + // Reinit + pagelist.Init(st.st_size, is_loaded, false); + + return true; +} + +int FdEntity::Load(off_t start, off_t size, bool lock_already_held, bool is_modified_flag) +{ + AutoLock auto_lock(&fdent_lock, lock_already_held ? AutoLock::ALREADY_LOCKED : AutoLock::NONE); + + S3FS_PRN_DBG("[path=%s][fd=%d][offset=%lld][size=%lld]", path.c_str(), fd, static_cast(start), static_cast(size)); + + if(-1 == fd){ + return -EBADF; + } + AutoLock auto_data_lock(&fdent_data_lock, lock_already_held ? AutoLock::ALREADY_LOCKED : AutoLock::NONE); + + int result = 0; + + // check loaded area & load + fdpage_list_t unloaded_list; + if(0 < pagelist.GetUnloadedPages(unloaded_list, start, size)){ + for(fdpage_list_t::iterator iter = unloaded_list.begin(); iter != unloaded_list.end(); ++iter){ + if(0 != size && start + size <= iter->offset){ + // reached end + break; + } + // check loading size + off_t need_load_size = 0; + if(iter->offset < size_orgmeta){ + // original file size(on S3) is smaller than request. + need_load_size = (iter->next() <= size_orgmeta ? iter->bytes : (size_orgmeta - iter->offset)); + } + + // download + if(S3fsCurl::GetMultipartSize() <= need_load_size && !nomultipart){ + // parallel request + result = S3fsCurl::ParallelGetObjectRequest(path.c_str(), fd, iter->offset, need_load_size); + }else{ + // single request + if(0 < need_load_size){ + S3fsCurl s3fscurl; + result = s3fscurl.GetObjectRequest(path.c_str(), fd, iter->offset, need_load_size); + }else{ + result = 0; + } + } + if(0 != result){ + break; + } + // Set loaded flag + pagelist.SetPageLoadedStatus(iter->offset, iter->bytes, (is_modified_flag ? PageList::PAGE_LOAD_MODIFIED : PageList::PAGE_LOADED)); + } + PageList::FreeList(unloaded_list); + } + return result; +} + +// [NOTE] +// At no disk space for caching object. +// This method is downloading by dividing an object of the specified range +// and uploading by multipart after finishing downloading it. +// +// [NOTICE] +// Need to lock before calling this method. +// +int FdEntity::NoCacheLoadAndPost(off_t start, off_t size) +{ + int result = 0; + + S3FS_PRN_INFO3("[path=%s][fd=%d][offset=%lld][size=%lld]", path.c_str(), fd, static_cast(start), static_cast(size)); + + if(-1 == fd){ + return -EBADF; + } + + // [NOTE] + // This method calling means that the cache file is never used no more. + // + if(!cachepath.empty()){ + // remove cache files(and cache stat file) + FdManager::DeleteCacheFile(path.c_str()); + // cache file path does not use no more. + cachepath.erase(); + mirrorpath.erase(); + } + + // Change entity key in manager mapping + FdManager::get()->ChangeEntityToTempPath(this, path.c_str()); + + // open temporary file + FILE* ptmpfp; + int tmpfd; + if(NULL == (ptmpfp = tmpfile()) || -1 ==(tmpfd = fileno(ptmpfp))){ + S3FS_PRN_ERR("failed to open tmp file. err(%d)", errno); + if(ptmpfp){ + fclose(ptmpfp); + } + return (0 == errno ? -EIO : -errno); + } + + // loop uploading by multipart + for(fdpage_list_t::iterator iter = pagelist.pages.begin(); iter != pagelist.pages.end(); ++iter){ + if(iter->end() < start){ + continue; + } + if(0 != size && start + size <= iter->offset){ + break; + } + // download each multipart size(default 10MB) in unit + for(off_t oneread = 0, totalread = (iter->offset < start ? start : 0); totalread < static_cast(iter->bytes); totalread += oneread){ + int upload_fd = fd; + off_t offset = iter->offset + totalread; + oneread = min(static_cast(iter->bytes) - totalread, S3fsCurl::GetMultipartSize()); + + // check rest size is over minimum part size + // + // [NOTE] + // If the final part size is smaller than 5MB, it is not allowed by S3 API. + // For this case, if the previous part of the final part is not over 5GB, + // we incorporate the final part to the previous part. If the previous part + // is over 5GB, we want to even out the last part and the previous part. + // + if((iter->bytes - totalread - oneread) < MIN_MULTIPART_SIZE){ + if(FIVE_GB < iter->bytes - totalread){ + oneread = (iter->bytes - totalread) / 2; + }else{ + oneread = iter->bytes - totalread; + } + } + + if(!iter->loaded){ + // + // loading or initializing + // + upload_fd = tmpfd; + + // load offset & size + size_t need_load_size = 0; + if(size_orgmeta <= offset){ + // all area is over of original size + need_load_size = 0; + }else{ + if(size_orgmeta < (offset + oneread)){ + // original file size(on S3) is smaller than request. + need_load_size = size_orgmeta - offset; + }else{ + need_load_size = oneread; + } + } + size_t over_size = oneread - need_load_size; + + // [NOTE] + // truncate file to zero and set length to part offset + size + // after this, file length is (offset + size), but file does not use any disk space. + // + if(-1 == ftruncate(tmpfd, 0) || -1 == ftruncate(tmpfd, (offset + oneread))){ + S3FS_PRN_ERR("failed to truncate temporary file(%d).", tmpfd); + result = -EIO; + break; + } + + // single area get request + if(0 < need_load_size){ + S3fsCurl s3fscurl; + if(0 != (result = s3fscurl.GetObjectRequest(path.c_str(), tmpfd, offset, oneread))){ + S3FS_PRN_ERR("failed to get object(start=%lld, size=%lld) for file(%d).", static_cast(offset), static_cast(oneread), tmpfd); + break; + } + } + // initialize fd without loading + if(0 < over_size){ + if(0 != (result = FdEntity::FillFile(tmpfd, 0, over_size, offset + need_load_size))){ + S3FS_PRN_ERR("failed to fill rest bytes for fd(%d). errno(%d)", tmpfd, result); + break; + } + } + }else{ + // already loaded area + } + + // single area upload by multipart post + if(0 != (result = NoCacheMultipartPost(upload_fd, offset, oneread))){ + S3FS_PRN_ERR("failed to multipart post(start=%lld, size=%lld) for file(%d).", static_cast(offset), static_cast(oneread), upload_fd); + break; + } + } + if(0 != result){ + break; + } + + // set loaded flag + if(!iter->loaded){ + if(iter->offset < start){ + fdpage page(iter->offset, start - iter->offset, iter->loaded, false); + iter->bytes -= (start - iter->offset); + iter->offset = start; + pagelist.pages.insert(iter, page); + } + if(0 != size && start + size < iter->next()){ + fdpage page(iter->offset, start + size - iter->offset, true, false); + iter->bytes -= (start + size - iter->offset); + iter->offset = start + size; + pagelist.pages.insert(iter, page); + }else{ + iter->loaded = true; + iter->modified = false; + } + } + } + if(0 == result){ + // compress pagelist + pagelist.Compress(); + + // fd data do empty + if(-1 == ftruncate(fd, 0)){ + S3FS_PRN_ERR("failed to truncate file(%d), but continue...", fd); + } + } + + // close temporary + fclose(ptmpfp); + + return result; +} + +// [NOTE] +// At no disk space for caching object. +// This method is starting multipart uploading. +// +int FdEntity::NoCachePreMultipartPost() +{ + // initialize multipart upload values + upload_id.erase(); + etaglist.clear(); + pending_headers.clear(); + + S3fsCurl s3fscurl(true); + int result; + if(0 != (result = s3fscurl.PreMultipartPostRequest(path.c_str(), orgmeta, upload_id, false))){ + return result; + } + s3fscurl.DestroyCurlHandle(); + return 0; +} + +// [NOTE] +// At no disk space for caching object. +// This method is uploading one part of multipart. +// +int FdEntity::NoCacheMultipartPost(int tgfd, off_t start, off_t size) +{ + if(-1 == tgfd || upload_id.empty()){ + S3FS_PRN_ERR("Need to initialize for multipart post."); + return -EIO; + } + S3fsCurl s3fscurl(true); + return s3fscurl.MultipartUploadRequest(upload_id, path.c_str(), tgfd, start, size, etaglist); +} + +// [NOTE] +// At no disk space for caching object. +// This method is finishing multipart uploading. +// +int FdEntity::NoCacheCompleteMultipartPost() +{ + if(upload_id.empty() || etaglist.empty()){ + S3FS_PRN_ERR("There is no upload id or etag list."); + return -EIO; + } + + S3fsCurl s3fscurl(true); + int result; + if(0 != (result = s3fscurl.CompleteMultipartPostRequest(path.c_str(), upload_id, etaglist))){ + return result; + } + s3fscurl.DestroyCurlHandle(); + + // reset values + upload_id.erase(); + etaglist.clear(); + mp_start = 0; + mp_size = 0; + + return 0; +} + +int FdEntity::RowFlush(const char* tpath, bool force_sync) +{ + int result = 0; + + std::string tmppath; + headers_t tmporgmeta; + { + AutoLock auto_lock(&fdent_lock); + tmppath = path; + tmporgmeta = orgmeta; + } + + S3FS_PRN_INFO3("[tpath=%s][path=%s][fd=%d]", SAFESTRPTR(tpath), tmppath.c_str(), fd); + + if(-1 == fd){ + return -EBADF; + } + AutoLock auto_lock(&fdent_data_lock); + + if(!force_sync && !pagelist.IsModified()){ + // nothing to update. + return 0; + } + + // If there is no loading all of the area, loading all area. + off_t restsize = pagelist.GetTotalUnloadedPageSize(); + if(0 < restsize){ + if(0 == upload_id.length()){ + // check disk space + if(ReserveDiskSpace(restsize)){ + // enough disk space + // Load all uninitialized area(no mix multipart uploading) + if(!FdEntity::mixmultipart){ + result = Load(/*start=*/ 0, /*size=*/ 0, /*lock_already_held=*/ true); + } + FdManager::FreeReservedDiskSpace(restsize); + if(0 != result){ + S3FS_PRN_ERR("failed to upload all area(errno=%d)", result); + return static_cast(result); + } + }else{ + // no enough disk space + // upload all by multipart uploading + if(0 != (result = NoCacheLoadAndPost())){ + S3FS_PRN_ERR("failed to upload all area by multipart uploading(errno=%d)", result); + return static_cast(result); + } + } + }else{ + // already start multipart uploading + } + } + + if(0 == upload_id.length()){ + // normal uploading + // + // Make decision to do multi upload (or not) based upon file size + // + // According to the AWS spec: + // - 1 to 10,000 parts are allowed + // - minimum size of parts is 5MB (expect for the last part) + // + // For our application, we will define minimum part size to be 10MB (10 * 2^20 Bytes) + // minimum file size will be 64 GB - 2 ** 36 + // + // Initially uploads will be done serially + // + // If file is > 20MB, then multipart will kick in + // + if(pagelist.Size() > MAX_MULTIPART_CNT * S3fsCurl::GetMultipartSize()){ + // close f ? + S3FS_PRN_ERR("Part count exceeds %d. Increase multipart size and try again.", MAX_MULTIPART_CNT); + return -ENOTSUP; + } + + // seek to head of file. + if(0 != lseek(fd, 0, SEEK_SET)){ + S3FS_PRN_ERR("lseek error(%d)", errno); + return -errno; + } + // backup upload file size + struct stat st; + memset(&st, 0, sizeof(struct stat)); + if(-1 == fstat(fd, &st)){ + S3FS_PRN_ERR("fstat is failed by errno(%d), but continue...", errno); + } + + if(pagelist.Size() >= S3fsCurl::GetMultipartSize() && !nomultipart){ + if(FdEntity::mixmultipart){ + // multipart uploading can use copy api + + // This is to ensure that each part is 5MB or more. + // If the part is less than 5MB, download it. + fdpage_list_t dlpages; + fdpage_list_t mixuppages; + if(!pagelist.GetPageListsForMultipartUpload(dlpages, mixuppages, S3fsCurl::GetMultipartSize())){ + S3FS_PRN_ERR("something error occurred during getting download pagelist."); + return -1; + } + + // [TODO] should use parallel downloading + // + for(fdpage_list_t::const_iterator iter = dlpages.begin(); iter != dlpages.end(); ++iter){ + if(0 != (result = Load(iter->offset, iter->bytes, /*lock_already_held=*/ true, /*is_modified_flag=*/ true))){ // set loaded and modified flag + S3FS_PRN_ERR("failed to get parts(start=%lld, size=%lld) before uploading.", static_cast(iter->offset), static_cast(iter->bytes)); + return result; + } + } + + // multipart uploading with copy api + result = S3fsCurl::ParallelMixMultipartUploadRequest(tpath ? tpath : tmppath.c_str(), tmporgmeta, fd, mixuppages); + + }else{ + // multipart uploading not using copy api + result = S3fsCurl::ParallelMultipartUploadRequest(tpath ? tpath : tmppath.c_str(), tmporgmeta, fd); + } + }else{ + // If there are unloaded pages, they are loaded at here. + if(0 != (result = Load(/*start=*/ 0, /*size=*/ 0, /*lock_already_held=*/ true))){ + S3FS_PRN_ERR("failed to load parts before uploading object(%d)", result); + return result; + } + + S3fsCurl s3fscurl(true); + result = s3fscurl.PutRequest(tpath ? tpath : tmppath.c_str(), tmporgmeta, fd); + } + + // seek to head of file. + if(0 == result && 0 != lseek(fd, 0, SEEK_SET)){ + S3FS_PRN_ERR("lseek error(%d)", errno); + return -errno; + } + + // reset uploaded file size + size_orgmeta = st.st_size; + + }else{ + // upload rest data + if(0 < mp_size){ + if(0 != (result = NoCacheMultipartPost(fd, mp_start, mp_size))){ + S3FS_PRN_ERR("failed to multipart post(start=%lld, size=%lld) for file(%d).", static_cast(mp_start), static_cast(mp_size), fd); + return result; + } + mp_start = 0; + mp_size = 0; + } + // complete multipart uploading. + if(0 != (result = NoCacheCompleteMultipartPost())){ + S3FS_PRN_ERR("failed to complete(finish) multipart post for file(%d).", fd); + return result; + } + // truncate file to zero + if(-1 == ftruncate(fd, 0)){ + // So the file has already been removed, skip error. + S3FS_PRN_ERR("failed to truncate file(%d) to zero, but continue...", fd); + } + + // put pading headers + if(0 != (result = UploadPendingMeta())){ + return result; + } + } + + if(0 == result){ + pagelist.ClearAllModified(); + } + return result; +} + +// [NOTICE] +// Need to lock before calling this method. +bool FdEntity::ReserveDiskSpace(off_t size) +{ + if(FdManager::ReserveDiskSpace(size)){ + return true; + } + + if(!pagelist.IsModified()){ + // try to clear all cache for this fd. + pagelist.Init(pagelist.Size(), false, false); + if(-1 == ftruncate(fd, 0) || -1 == ftruncate(fd, pagelist.Size())){ + S3FS_PRN_ERR("failed to truncate temporary file(%d).", fd); + return false; + } + + if(FdManager::ReserveDiskSpace(size)){ + return true; + } + } + + FdManager::get()->CleanupCacheDir(); + + return FdManager::ReserveDiskSpace(size); +} + +ssize_t FdEntity::Read(char* bytes, off_t start, size_t size, bool force_load) +{ + S3FS_PRN_DBG("[path=%s][fd=%d][offset=%lld][size=%zu]", path.c_str(), fd, static_cast(start), size); + + if(-1 == fd){ + return -EBADF; + } + AutoLock auto_lock(&fdent_data_lock); + + if(force_load){ + pagelist.SetPageLoadedStatus(start, size, PageList::PAGE_NOT_LOAD_MODIFIED); + } + + ssize_t rsize; + + // check disk space + if(0 < pagelist.GetTotalUnloadedPageSize(start, size)){ + // load size(for prefetch) + size_t load_size = size; + if(start + static_cast(size) < pagelist.Size()){ + ssize_t prefetch_max_size = max(static_cast(size), S3fsCurl::GetMultipartSize() * S3fsCurl::GetMaxParallelCount()); + + if(start + prefetch_max_size < pagelist.Size()){ + load_size = prefetch_max_size; + }else{ + load_size = pagelist.Size() - start; + } + } + + if(!ReserveDiskSpace(load_size)){ + S3FS_PRN_WARN("could not reserve disk space for pre-fetch download"); + load_size = size; + if(!ReserveDiskSpace(load_size)){ + S3FS_PRN_ERR("could not reserve disk space for pre-fetch download"); + return -ENOSPC; + } + } + + // Loading + int result = 0; + if(0 < size){ + result = Load(start, load_size, /*lock_already_held=*/ true); + } + + FdManager::FreeReservedDiskSpace(load_size); + + if(0 != result){ + S3FS_PRN_ERR("could not download. start(%lld), size(%zu), errno(%d)", static_cast(start), size, result); + return -EIO; + } + } + + // Reading + if(-1 == (rsize = pread(fd, bytes, size, start))){ + S3FS_PRN_ERR("pread failed. errno(%d)", errno); + return -errno; + } + return rsize; +} + +ssize_t FdEntity::Write(const char* bytes, off_t start, size_t size) +{ + S3FS_PRN_DBG("[path=%s][fd=%d][offset=%lld][size=%zu]", path.c_str(), fd, static_cast(start), size); + + if(-1 == fd){ + return -EBADF; + } + // check if not enough disk space left BEFORE locking fd + if(FdManager::IsCacheDir() && !FdManager::IsSafeDiskSpace(NULL, size)){ + FdManager::get()->CleanupCacheDir(); + } + AutoLock auto_lock(&fdent_data_lock); + + // check file size + if(pagelist.Size() < start){ + // grow file size + if(-1 == ftruncate(fd, start)){ + S3FS_PRN_ERR("failed to truncate temporary file(%d).", fd); + return -EIO; + } + // add new area + pagelist.SetPageLoadedStatus(pagelist.Size(), start - pagelist.Size(), PageList::PAGE_MODIFIED); + } + + int result = 0; + ssize_t wsize; + + if(0 == upload_id.length()){ + // check disk space + off_t restsize = pagelist.GetTotalUnloadedPageSize(0, start) + size; + if(ReserveDiskSpace(restsize)){ + // enough disk space + + // Load uninitialized area which starts from 0 to (start + size) before writing. + if(!FdEntity::mixmultipart){ + if(0 < start){ + result = Load(0, start, /*lock_already_held=*/ true); + } + } + + FdManager::FreeReservedDiskSpace(restsize); + if(0 != result){ + S3FS_PRN_ERR("failed to load uninitialized area before writing(errno=%d)", result); + return static_cast(result); + } + }else{ + // no enough disk space + if(0 != (result = NoCachePreMultipartPost())){ + S3FS_PRN_ERR("failed to switch multipart uploading with no cache(errno=%d)", result); + return static_cast(result); + } + // start multipart uploading + if(0 != (result = NoCacheLoadAndPost(0, start))){ + S3FS_PRN_ERR("failed to load uninitialized area and multipart uploading it(errno=%d)", result); + return static_cast(result); + } + mp_start = start; + mp_size = 0; + } + }else{ + // already start multipart uploading + } + + // Writing + if(-1 == (wsize = pwrite(fd, bytes, size, start))){ + S3FS_PRN_ERR("pwrite failed. errno(%d)", errno); + return -errno; + } + if(0 < wsize){ + pagelist.SetPageLoadedStatus(start, wsize, PageList::PAGE_LOAD_MODIFIED); + } + + // Load uninitialized area which starts from (start + size) to EOF after writing. + if(!FdEntity::mixmultipart){ + if(pagelist.Size() > start + static_cast(size)){ + result = Load(start + size, pagelist.Size(), /*lock_already_held=*/ true); + if(0 != result){ + S3FS_PRN_ERR("failed to load uninitialized area after writing(errno=%d)", result); + return static_cast(result); + } + } + } + + // check multipart uploading + if(0 < upload_id.length()){ + mp_size += wsize; + if(S3fsCurl::GetMultipartSize() <= mp_size){ + // over one multipart size + if(0 != (result = NoCacheMultipartPost(fd, mp_start, mp_size))){ + S3FS_PRN_ERR("failed to multipart post(start=%lld, size=%lld) for file(%d).", static_cast(mp_start), static_cast(mp_size), fd); + return result; + } + // [NOTE] + // truncate file to zero and set length to part offset + size + // after this, file length is (offset + size), but file does not use any disk space. + // + if(-1 == ftruncate(fd, 0) || -1 == ftruncate(fd, (mp_start + mp_size))){ + S3FS_PRN_ERR("failed to truncate file(%d).", fd); + return -EIO; + } + mp_start += mp_size; + mp_size = 0; + } + } + return wsize; +} + +// [NOTE] +// Returns true if merged to orgmeta. +// If true is returned, the caller can update the header. +// If it is false, do not update the header because multipart upload is in progress. +// In this case, the header is pending internally and is updated after the upload +// is complete(flush file). +// +bool FdEntity::MergeOrgMeta(headers_t& updatemeta) +{ + AutoLock auto_lock(&fdent_lock); + + bool is_pending; + if(upload_id.empty()){ + // merge update meta + headers_t mergedmeta = orgmeta; + + merge_headers(orgmeta, updatemeta, false); // overwrite existing keys only + merge_headers(mergedmeta, updatemeta, true); // overwrite all keys + updatemeta = mergedmeta; // swap + + is_pending = false; + }else{ + // could not update meta because uploading now, then put pending. + pending_headers.push_back(updatemeta); + is_pending = true; + } + return is_pending; +} + +// global function in s3fs.cpp +int put_headers(const char* path, headers_t& meta, bool is_copy); + +int FdEntity::UploadPendingMeta() +{ + AutoLock auto_lock(&fdent_lock); + + int result = 0; + for(headers_list_t::const_iterator iter = pending_headers.begin(); iter != pending_headers.end(); ++iter){ + // [NOTE] + // orgmeta will be updated sequentially. + headers_t putmeta = orgmeta; + merge_headers(putmeta, *iter, true); // overwrite all keys + merge_headers(orgmeta, *iter, false); // overwrite existing keys only + + // [NOTE] + // this is special cases, we remove the key which has empty values. + for(headers_t::iterator hiter = putmeta.begin(); hiter != putmeta.end(); ){ + if(hiter->second.empty()){ + if(orgmeta.end() != orgmeta.find(hiter->first)){ + orgmeta.erase(hiter->first); + } + putmeta.erase(hiter++); + }else{ + ++hiter; + } + } + + // update ctime/mtime + time_t updatetime = get_mtime((*iter), false); // not overcheck + if(0 != updatetime){ + SetMtime(updatetime, true); + } + updatetime = get_ctime((*iter), false); // not overcheck + if(0 != updatetime){ + SetCtime(updatetime, true); + } + + // put headers + int one_result = put_headers(path.c_str(), putmeta, true); + if(0 != one_result){ + S3FS_PRN_ERR("failed to put header after flushing file(%s) by(%d).", path.c_str(), one_result); + result = one_result; // keep lastest result code + } + } + pending_headers.clear(); + return result; +} + +/* +* Local variables: +* tab-width: 4 +* c-basic-offset: 4 +* End: +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 +*/ diff --git a/src/fdcache_entity.h b/src/fdcache_entity.h new file mode 100644 index 0000000..fd3bc0b --- /dev/null +++ b/src/fdcache_entity.h @@ -0,0 +1,124 @@ +/* + * s3fs - FUSE-based file system backed by Amazon S3 + * + * Copyright(C) 2007 Randy Rizun + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef S3FS_FDCACHE_ENTITY_H_ +#define S3FS_FDCACHE_ENTITY_H_ + +#include "fdcache_page.h" +#include "metaheader.h" + +//------------------------------------------------ +// class FdEntity +//------------------------------------------------ +class FdEntity +{ + private: + static bool mixmultipart; // whether multipart uploading can use copy api. + + pthread_mutex_t fdent_lock; + bool is_lock_init; + int refcnt; // reference count + std::string path; // object path + int fd; // file descriptor(tmp file or cache file) + FILE* pfile; // file pointer(tmp file or cache file) + ino_t inode; // inode number for cache file + headers_t orgmeta; // original headers at opening + off_t size_orgmeta; // original file size in original headers + + pthread_mutex_t fdent_data_lock;// protects the following members + PageList pagelist; + std::string upload_id; // for no cached multipart uploading when no disk space + etaglist_t etaglist; // for no cached multipart uploading when no disk space + off_t mp_start; // start position for no cached multipart(write method only) + off_t mp_size; // size for no cached multipart(write method only) + std::string cachepath; // local cache file path + // (if this is empty, does not load/save pagelist.) + std::string mirrorpath; // mirror file path to local cache file path + headers_list_t pending_headers;// pending update headers + + private: + static int FillFile(int fd, unsigned char byte, off_t size, off_t start); + static ino_t GetInode(int fd); + + void Clear(void); + ino_t GetInode(void); + int OpenMirrorFile(void); + bool SetAllStatus(bool is_loaded); // [NOTE] not locking + bool SetAllStatusUnloaded(void) { return SetAllStatus(false); } + int UploadPendingMeta(void); + + public: + static bool SetNoMixMultipart(void); + + explicit FdEntity(const char* tpath = NULL, const char* cpath = NULL); + ~FdEntity(); + + void Close(void); + bool IsOpen(void) const { return (-1 != fd); } + int Open(headers_t* pmeta = NULL, off_t size = -1, time_t time = -1, bool no_fd_lock_wait = false); + bool OpenAndLoadAll(headers_t* pmeta = NULL, off_t* size = NULL, bool force_load = false); + int Dup(bool lock_already_held = false); + + const char* GetPath(void) const { return path.c_str(); } + bool RenamePath(const std::string& newpath, std::string& fentmapkey); + int GetFd(void) const { return fd; } + bool IsModified(void) const; + bool MergeOrgMeta(headers_t& updatemeta); + + bool GetStats(struct stat& st, bool lock_already_held = false); + int SetCtime(time_t time, bool lock_already_held = false); + int SetMtime(time_t time, bool lock_already_held = false); + bool UpdateCtime(void); + bool UpdateMtime(void); + bool GetSize(off_t& size); + bool GetXattr(std::string& xattr); + bool SetXattr(const std::string& xattr); + bool SetMode(mode_t mode); + bool SetUId(uid_t uid); + bool SetGId(gid_t gid); + bool SetContentType(const char* path); + + int Load(off_t start = 0, off_t size = 0, bool lock_already_held = false, bool is_modified_flag = false); // size=0 means loading to end + int NoCacheLoadAndPost(off_t start = 0, off_t size = 0); // size=0 means loading to end + int NoCachePreMultipartPost(void); + int NoCacheMultipartPost(int tgfd, off_t start, off_t size); + int NoCacheCompleteMultipartPost(void); + + int RowFlush(const char* tpath, bool force_sync = false); + int Flush(bool force_sync = false) { return RowFlush(NULL, force_sync); } + + ssize_t Read(char* bytes, off_t start, size_t size, bool force_load = false); + ssize_t Write(const char* bytes, off_t start, size_t size); + + bool ReserveDiskSpace(off_t size); +}; + +typedef std::map fdent_map_t; // key=path, value=FdEntity* + +#endif // S3FS_FDCACHE_ENTITY_H_ + +/* +* Local variables: +* tab-width: 4 +* c-basic-offset: 4 +* End: +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 +*/ diff --git a/src/fdcache_page.cpp b/src/fdcache_page.cpp new file mode 100644 index 0000000..6e56500 --- /dev/null +++ b/src/fdcache_page.cpp @@ -0,0 +1,925 @@ +/* + * s3fs - FUSE-based file system backed by Amazon S3 + * + * Copyright(C) 2007 Takeshi Nakatani + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include +#include +#include +#include +#include + +#include "common.h" +#include "s3fs.h" +#include "fdcache_page.h" +#include "string_util.h" + +using namespace std; + +//------------------------------------------------ +// Symbols +//------------------------------------------------ +static const int CHECK_CACHEFILE_PART_SIZE = 1024 * 16; // Buffer size in PageList::CheckZeroAreaInFile() + +//------------------------------------------------ +// fdpage_list_t utility +//------------------------------------------------ +// Inline function for repeated processing +inline void raw_add_compress_fdpage_list(fdpage_list_t& pagelist, fdpage& page, bool ignore_load, bool ignore_modify, bool default_load, bool default_modify) +{ + if(0 < page.bytes){ + // [NOTE] + // The page variable is subject to change here. + // + if(ignore_load){ + page.loaded = default_load; + } + if(ignore_modify){ + page.modified = default_modify; + } + pagelist.push_back(page); + } +} + +// Compress the page list +// +// ignore_load: Ignore the flag of loaded member and compress +// ignore_modify: Ignore the flag of modified member and compress +// default_load: loaded flag value in the list after compression when ignore_load=true +// default_modify: modified flag value in the list after compression when default_modify=true +// +// NOTE: ignore_modify and ignore_load cannot both be true. +// +static fdpage_list_t raw_compress_fdpage_list(const fdpage_list_t& pages, bool ignore_load, bool ignore_modify, bool default_load, bool default_modify) +{ + fdpage_list_t compressed_pages; + fdpage tmppage; + bool is_first = true; + for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){ + if(!is_first){ + if( (!ignore_load && (tmppage.loaded != iter->loaded )) || + (!ignore_modify && (tmppage.modified != iter->modified)) ) + { + // Different from the previous area, add it to list + raw_add_compress_fdpage_list(compressed_pages, tmppage, ignore_load, ignore_modify, default_load, default_modify); + + // keep current area + tmppage = fdpage(iter->offset, iter->bytes, (ignore_load ? default_load : iter->loaded), (ignore_modify ? default_modify : iter->modified)); + }else{ + // Same as the previous area + if(tmppage.next() != iter->offset){ + // These are not contiguous areas, add it to list + raw_add_compress_fdpage_list(compressed_pages, tmppage, ignore_load, ignore_modify, default_load, default_modify); + + // keep current area + tmppage = fdpage(iter->offset, iter->bytes, (ignore_load ? default_load : iter->loaded), (ignore_modify ? default_modify : iter->modified)); + }else{ + // These are contiguous areas + + // add current area + tmppage.bytes += iter->bytes; + } + } + }else{ + // first erea + is_first = false; + + // keep current area + tmppage = fdpage(iter->offset, iter->bytes, (ignore_load ? default_load : iter->loaded), (ignore_modify ? default_modify : iter->modified)); + } + } + // add lastest area + if(!is_first){ + raw_add_compress_fdpage_list(compressed_pages, tmppage, ignore_load, ignore_modify, default_load, default_modify); + } + return compressed_pages; +} + +static fdpage_list_t compress_fdpage_list_ignore_modify(const fdpage_list_t& pages, bool default_modify) +{ + return raw_compress_fdpage_list(pages, /* ignore_load= */ false, /* ignore_modify= */ true, /* default_load= */false, /* default_modify= */default_modify); +} + +static fdpage_list_t compress_fdpage_list_ignore_load(const fdpage_list_t& pages, bool default_load) +{ + return raw_compress_fdpage_list(pages, /* ignore_load= */ true, /* ignore_modify= */ false, /* default_load= */default_load, /* default_modify= */false); +} + +static fdpage_list_t compress_fdpage_list(const fdpage_list_t& pages) +{ + return raw_compress_fdpage_list(pages, /* ignore_load= */ false, /* ignore_modify= */ false, /* default_load= */false, /* default_modify= */false); +} + +static fdpage_list_t parse_partsize_fdpage_list(const fdpage_list_t& pages, off_t max_partsize) +{ + fdpage_list_t parsed_pages; + for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){ + if(iter->modified){ + // modified page + fdpage tmppage = *iter; + for(off_t start = iter->offset, rest_bytes = iter->bytes; 0 < rest_bytes; ){ + if((max_partsize * 2) < rest_bytes){ + // do parse + tmppage.offset = start; + tmppage.bytes = max_partsize; + parsed_pages.push_back(tmppage); + + start += max_partsize; + rest_bytes -= max_partsize; + }else{ + // Since the number of remaining bytes is less than twice max_partsize, + // one of the divided areas will be smaller than max_partsize. + // Therefore, this area at the end should not be divided. + tmppage.offset = start; + tmppage.bytes = rest_bytes; + parsed_pages.push_back(tmppage); + + start += rest_bytes; + rest_bytes = 0; + } + } + }else{ + // not modified page is not parsed + parsed_pages.push_back(*iter); + } + } + return parsed_pages; +} + +//------------------------------------------------ +// PageList class methods +//------------------------------------------------ +// +// Examine and return the status of each block in the file. +// +// Assuming the file is a sparse file, check the HOLE and DATA areas +// and return it in fdpage_list_t. The loaded flag of each fdpage is +// set to false for HOLE blocks and true for DATA blocks. +// +bool PageList::GetSparseFilePages(int fd, size_t file_size, fdpage_list_t& sparse_list) +{ + // [NOTE] + // Express the status of the cache file using fdpage_list_t. + // There is a hole in the cache file(sparse file), and the + // state of this hole is expressed by the "loaded" member of + // struct fdpage. (the "modified" member is not used) + // + if(0 == file_size){ + // file is empty + return true; + } + + bool is_hole = false; + int hole_pos = lseek(fd, 0, SEEK_HOLE); + int data_pos = lseek(fd, 0, SEEK_DATA); + if(-1 == hole_pos && -1 == data_pos){ + S3FS_PRN_ERR("Could not find the first position both HOLE and DATA in the file(fd=%d).", fd); + return false; + }else if(-1 == hole_pos){ + is_hole = false; + }else if(-1 == data_pos){ + is_hole = true; + }else if(hole_pos < data_pos){ + is_hole = true; + }else{ + is_hole = false; + } + + for(int cur_pos = 0, next_pos = 0; 0 <= cur_pos; cur_pos = next_pos, is_hole = !is_hole){ + fdpage page; + page.offset = cur_pos; + page.loaded = !is_hole; + page.modified = false; + + next_pos = lseek(fd, cur_pos, (is_hole ? SEEK_DATA : SEEK_HOLE)); + if(-1 == next_pos){ + page.bytes = static_cast(file_size - cur_pos); + }else{ + page.bytes = next_pos - cur_pos; + } + sparse_list.push_back(page); + } + return true; +} + +// +// Confirm that the specified area is ZERO +// +bool PageList::CheckZeroAreaInFile(int fd, off_t start, size_t bytes) +{ + char* readbuff = new char[CHECK_CACHEFILE_PART_SIZE]; + + for(size_t comp_bytes = 0, check_bytes = 0; comp_bytes < bytes; comp_bytes += check_bytes){ + if(CHECK_CACHEFILE_PART_SIZE < (bytes - comp_bytes)){ + check_bytes = CHECK_CACHEFILE_PART_SIZE; + }else{ + check_bytes = bytes - comp_bytes; + } + bool found_bad_data = false; + ssize_t read_bytes; + if(-1 == (read_bytes = pread(fd, readbuff, check_bytes, (start + comp_bytes)))){ + S3FS_PRN_ERR("Something error is occurred in reading %zu bytes at %lld from file(%d).", check_bytes, static_cast(start + comp_bytes), fd); + found_bad_data = true; + }else{ + check_bytes = static_cast(read_bytes); + for(size_t tmppos = 0; tmppos < check_bytes; ++tmppos){ + if('\0' != readbuff[tmppos]){ + // found not ZERO data. + found_bad_data = true; + break; + } + } + } + if(found_bad_data){ + delete[] readbuff; + return false; + } + } + delete[] readbuff; + return true; +} + +// +// Checks that the specified area matches the state of the sparse file. +// +// [Parameters] +// checkpage: This is one state of the cache file, it is loaded from the stats file. +// sparse_list: This is a list of the results of directly checking the cache file status(HOLE/DATA). +// In the HOLE area, the "loaded" flag of fdpage is false. The DATA area has it set to true. +// fd: opened file discriptor to target cache file. +// +bool PageList::CheckAreaInSparseFile(const struct fdpage& checkpage, const fdpage_list_t& sparse_list, int fd, fdpage_list_t& err_area_list, fdpage_list_t& warn_area_list) +{ + // Check the block status of a part(Check Area: checkpage) of the target file. + // The elements of sparse_list have 5 patterns that overlap this block area. + // + // File |<---...--------------------------------------...--->| + // Check Area (offset)<-------------------->(offset + bytes - 1) + // Area case(0) <-------> + // Area case(1) <-------> + // Area case(2) <--------> + // Area case(3) <----------> + // Area case(4) <-----------> + // Area case(5) <-----------------------------> + // + bool result = true; + + for(fdpage_list_t::const_iterator iter = sparse_list.begin(); iter != sparse_list.end(); ++iter){ + off_t check_start = 0; + off_t check_bytes = 0; + if((iter->offset + iter->bytes) <= checkpage.offset){ + // case 0 + continue; // next + + }else if((checkpage.offset + checkpage.bytes) <= iter->offset){ + // case 1 + break; // finish + + }else if(iter->offset < checkpage.offset && (iter->offset + iter->bytes) < (checkpage.offset + checkpage.bytes)){ + // case 2 + check_start = checkpage.offset; + check_bytes = iter->bytes - (checkpage.offset - iter->offset); + + }else if(iter->offset < (checkpage.offset + checkpage.bytes) && (checkpage.offset + checkpage.bytes) < (iter->offset + iter->bytes)){ + // case 3 + check_start = iter->offset; + check_bytes = checkpage.bytes - (iter->offset - checkpage.offset); + + }else if(checkpage.offset < iter->offset && (iter->offset + iter->bytes) < (checkpage.offset + checkpage.bytes)){ + // case 4 + check_start = iter->offset; + check_bytes = iter->bytes; + + }else{ // (iter->offset <= checkpage.offset && (checkpage.offset + checkpage.bytes) <= (iter->offset + iter->bytes)) + // case 5 + check_start = checkpage.offset; + check_bytes = checkpage.bytes; + } + + // check target area type + if(checkpage.loaded || checkpage.modified){ + // target area must be not HOLE(DATA) area. + if(!iter->loaded){ + // Found bad area, it is HOLE area. + fdpage page(check_start, check_bytes, false, false); + err_area_list.push_back(page); + result = false; + } + }else{ + // target area should be HOLE area.(If it is not a block boundary, it may be a DATA area.) + if(iter->loaded){ + // need to check this area's each data, it should be ZERO. + if(!PageList::CheckZeroAreaInFile(fd, check_start, static_cast(check_bytes))){ + // Discovered an area that has un-initial status data but it probably does not effect bad. + fdpage page(check_start, check_bytes, true, false); + warn_area_list.push_back(page); + result = false; + } + } + } + } + return result; +} + +//------------------------------------------------ +// PageList methods +//------------------------------------------------ +void PageList::FreeList(fdpage_list_t& list) +{ + list.clear(); +} + +PageList::PageList(off_t size, bool is_loaded, bool is_modified) +{ + Init(size, is_loaded, is_modified); +} + +PageList::PageList(const PageList& other) +{ + for(fdpage_list_t::const_iterator iter = other.pages.begin(); iter != other.pages.end(); ++iter){ + pages.push_back(*iter); + } +} + +PageList::~PageList() +{ + Clear(); +} + +void PageList::Clear() +{ + PageList::FreeList(pages); +} + +bool PageList::Init(off_t size, bool is_loaded, bool is_modified) +{ + Clear(); + if(0 < size){ + fdpage page(0, size, is_loaded, is_modified); + pages.push_back(page); + } + return true; +} + +off_t PageList::Size() const +{ + if(pages.empty()){ + return 0; + } + fdpage_list_t::const_reverse_iterator riter = pages.rbegin(); + return riter->next(); +} + +bool PageList::Compress() +{ + pages = compress_fdpage_list(pages); + return true; +} + +bool PageList::Parse(off_t new_pos) +{ + for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ++iter){ + if(new_pos == iter->offset){ + // nothing to do + return true; + }else if(iter->offset < new_pos && new_pos < iter->next()){ + fdpage page(iter->offset, new_pos - iter->offset, iter->loaded, iter->modified); + iter->bytes -= (new_pos - iter->offset); + iter->offset = new_pos; + pages.insert(iter, page); + return true; + } + } + return false; +} + +bool PageList::Resize(off_t size, bool is_loaded, bool is_modified) +{ + off_t total = Size(); + + if(0 == total){ + Init(size, is_loaded, is_modified); + + }else if(total < size){ + // add new area + fdpage page(total, (size - total), is_loaded, is_modified); + pages.push_back(page); + + }else if(size < total){ + // cut area + for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ){ + if(iter->next() <= size){ + ++iter; + }else{ + if(size <= iter->offset){ + iter = pages.erase(iter); + }else{ + iter->bytes = size - iter->offset; + } + } + } + }else{ // total == size + // nothing to do + } + // compress area + return Compress(); +} + +bool PageList::IsPageLoaded(off_t start, off_t size) const +{ + for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){ + if(iter->end() < start){ + continue; + } + if(!iter->loaded){ + return false; + } + if(0 != size && start + size <= iter->next()){ + break; + } + } + return true; +} + +bool PageList::SetPageLoadedStatus(off_t start, off_t size, PageList::page_status pstatus, bool is_compress) +{ + off_t now_size = Size(); + bool is_loaded = (PAGE_LOAD_MODIFIED == pstatus || PAGE_LOADED == pstatus); + bool is_modified = (PAGE_LOAD_MODIFIED == pstatus || PAGE_MODIFIED == pstatus); + + if(now_size <= start){ + if(now_size < start){ + // add + Resize(start, false, is_modified); // set modified flag from now end pos to specified start pos. + } + Resize(start + size, is_loaded, is_modified); + + }else if(now_size <= start + size){ + // cut + Resize(start, false, false); // not changed loaded/modified flags in existing area. + // add + Resize(start + size, is_loaded, is_modified); + + }else{ + // start-size are inner pages area + // parse "start", and "start + size" position + Parse(start); + Parse(start + size); + + // set loaded flag + for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ++iter){ + if(iter->end() < start){ + continue; + }else if(start + size <= iter->offset){ + break; + }else{ + iter->loaded = is_loaded; + iter->modified = is_modified; + } + } + } + // compress area + return (is_compress ? Compress() : true); +} + +bool PageList::FindUnloadedPage(off_t start, off_t& resstart, off_t& ressize) const +{ + for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){ + if(start <= iter->end()){ + if(!iter->loaded && !iter->modified){ // Do not load unloaded and modified areas + resstart = iter->offset; + ressize = iter->bytes; + return true; + } + } + } + return false; +} + +off_t PageList::GetTotalUnloadedPageSize(off_t start, off_t size) const +{ + off_t restsize = 0; + off_t next = start + size; + for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){ + if(iter->next() <= start){ + continue; + } + if(next <= iter->offset){ + break; + } + if(iter->loaded || iter->modified){ + continue; + } + off_t tmpsize; + if(iter->offset <= start){ + if(iter->next() <= next){ + tmpsize = (iter->next() - start); + }else{ + tmpsize = next - start; // = size + } + }else{ + if(iter->next() <= next){ + tmpsize = iter->next() - iter->offset; // = iter->bytes + }else{ + tmpsize = next - iter->offset; + } + } + restsize += tmpsize; + } + return restsize; +} + +int PageList::GetUnloadedPages(fdpage_list_t& unloaded_list, off_t start, off_t size) const +{ + // If size is 0, it means loading to end. + if(0 == size){ + if(start < Size()){ + size = Size() - start; + } + } + off_t next = start + size; + + for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){ + if(iter->next() <= start){ + continue; + } + if(next <= iter->offset){ + break; + } + if(iter->loaded || iter->modified){ + continue; // already loaded or modified + } + + // page area + off_t page_start = max(iter->offset, start); + off_t page_next = min(iter->next(), next); + off_t page_size = page_next - page_start; + + // add list + fdpage_list_t::reverse_iterator riter = unloaded_list.rbegin(); + if(riter != unloaded_list.rend() && riter->next() == page_start){ + // merge to before page + riter->bytes += page_size; + }else{ + fdpage page(page_start, page_size, false, false); + unloaded_list.push_back(page); + } + } + return unloaded_list.size(); +} + +// [NOTE] +// This method is called in advance when mixing POST and COPY in multi-part upload. +// The minimum size of each part must be 5 MB, and the data area below this must be +// downloaded from S3. +// This method checks the current PageList status and returns the area that needs +// to be downloaded so that each part is at least 5 MB. +// +bool PageList::GetPageListsForMultipartUpload(fdpage_list_t& dlpages, fdpage_list_t& mixuppages, off_t max_partsize) +{ + // compress before this processing + if(!Compress()){ + return false; + } + + // make a list by modified flag + fdpage_list_t modified_pages = compress_fdpage_list_ignore_load(pages, false); + fdpage_list_t download_pages; // A non-contiguous page list showing the areas that need to be downloaded + fdpage_list_t mixupload_pages; // A continuous page list showing only modified flags for mixupload + fdpage prev_page; + for(fdpage_list_t::const_iterator iter = modified_pages.begin(); iter != modified_pages.end(); ++iter){ + if(iter->modified){ + // current is modified area + if(!prev_page.modified){ + // previous is not modified area + if(prev_page.bytes < MIN_MULTIPART_SIZE){ + // previous(not modified) area is too small for one multipart size, + // then all of previous area is needed to download. + download_pages.push_back(prev_page); + + // previous(not modified) area is set upload area. + prev_page.modified = true; + mixupload_pages.push_back(prev_page); + }else{ + // previous(not modified) area is set copy area. + prev_page.modified = false; + mixupload_pages.push_back(prev_page); + } + // set current to previous + prev_page = *iter; + }else{ + // previous is modified area, too + prev_page.bytes += iter->bytes; + } + + }else{ + // current is not modified area + if(!prev_page.modified){ + // previous is not modified area, too + prev_page.bytes += iter->bytes; + + }else{ + // previous is modified area + if(prev_page.bytes < MIN_MULTIPART_SIZE){ + // previous(modified) area is too small for one multipart size, + // then part or all of current area is needed to download. + off_t missing_bytes = MIN_MULTIPART_SIZE - prev_page.bytes; + + if((missing_bytes + MIN_MULTIPART_SIZE) < iter-> bytes){ + // The current size is larger than the missing size, and the remainder + // after deducting the missing size is larger than the minimum size. + + fdpage missing_page(iter->offset, missing_bytes, false, false); + download_pages.push_back(missing_page); + + // previous(not modified) area is set upload area. + prev_page.bytes = MIN_MULTIPART_SIZE; + mixupload_pages.push_back(prev_page); + + // set current to previous + prev_page = *iter; + prev_page.offset += missing_bytes; + prev_page.bytes -= missing_bytes; + + }else{ + // The current size is less than the missing size, or the remaining + // size less the missing size is less than the minimum size. + download_pages.push_back(*iter); + + // add current to previous + prev_page.bytes += iter->bytes; + } + + }else{ + // previous(modified) area is enough size for one multipart size. + mixupload_pages.push_back(prev_page); + + // set current to previous + prev_page = *iter; + } + } + } + } + // lastest area + if(0 < prev_page.bytes){ + mixupload_pages.push_back(prev_page); + } + + // compress + dlpages = compress_fdpage_list_ignore_modify(download_pages, false); + mixuppages = compress_fdpage_list_ignore_load(mixupload_pages, false); + + // parse by max pagesize + dlpages = parse_partsize_fdpage_list(dlpages, max_partsize); + mixuppages = parse_partsize_fdpage_list(mixuppages, max_partsize); + + return true; +} + +bool PageList::IsModified() const +{ + for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){ + if(iter->modified){ + return true; + } + } + return false; +} + +bool PageList::ClearAllModified() +{ + for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ++iter){ + if(iter->modified){ + iter->modified = false; + } + } + return Compress(); +} + +bool PageList::Serialize(CacheFileStat& file, bool is_output, ino_t inode) +{ + if(!file.Open()){ + return false; + } + if(is_output){ + // + // put to file + // + ostringstream ssall; + ssall << inode << ":" << Size(); + + for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ++iter){ + ssall << "\n" << iter->offset << ":" << iter->bytes << ":" << (iter->loaded ? "1" : "0") << ":" << (iter->modified ? "1" : "0"); + } + + if(-1 == ftruncate(file.GetFd(), 0)){ + S3FS_PRN_ERR("failed to truncate file(to 0) for stats(%d)", errno); + return false; + } + string strall = ssall.str(); + if(0 >= pwrite(file.GetFd(), strall.c_str(), strall.length(), 0)){ + S3FS_PRN_ERR("failed to write stats(%d)", errno); + return false; + } + + }else{ + // + // loading from file + // + struct stat st; + memset(&st, 0, sizeof(struct stat)); + if(-1 == fstat(file.GetFd(), &st)){ + S3FS_PRN_ERR("fstat is failed. errno(%d)", errno); + return false; + } + if(0 >= st.st_size){ + // nothing + Init(0, false, false); + return true; + } + char* ptmp = new char[st.st_size + 1]; + ptmp[st.st_size] = '\0'; + // read from file + if(0 >= pread(file.GetFd(), ptmp, st.st_size, 0)){ + S3FS_PRN_ERR("failed to read stats(%d)", errno); + delete[] ptmp; + return false; + } + string oneline; + istringstream ssall(ptmp); + + // loaded + Clear(); + + // load head line(for size and inode) + off_t total; + ino_t cache_inode; // if this value is 0, it means old format. + if(!getline(ssall, oneline, '\n')){ + S3FS_PRN_ERR("failed to parse stats."); + delete[] ptmp; + return false; + }else{ + istringstream sshead(oneline); + string strhead1; + string strhead2; + + // get first part in head line. + if(!getline(sshead, strhead1, ':')){ + S3FS_PRN_ERR("failed to parse stats."); + delete[] ptmp; + return false; + } + // get second part in head line. + if(!getline(sshead, strhead2, ':')){ + // old head format is "\n" + total = cvt_strtoofft(strhead1.c_str(), /* base= */10); + cache_inode = 0; + }else{ + // current head format is ":\n" + total = cvt_strtoofft(strhead2.c_str(), /* base= */10); + cache_inode = static_cast(cvt_strtoofft(strhead1.c_str(), /* base= */10)); + if(0 == cache_inode){ + S3FS_PRN_ERR("wrong inode number in parsed cache stats."); + delete[] ptmp; + return false; + } + } + } + // check inode number + if(0 != cache_inode && cache_inode != inode){ + S3FS_PRN_ERR("differ inode and inode number in parsed cache stats."); + delete[] ptmp; + return false; + } + + // load each part + bool is_err = false; + while(getline(ssall, oneline, '\n')){ + string part; + istringstream ssparts(oneline); + // offset + if(!getline(ssparts, part, ':')){ + is_err = true; + break; + } + off_t offset = cvt_strtoofft(part.c_str(), /* base= */10); + // size + if(!getline(ssparts, part, ':')){ + is_err = true; + break; + } + off_t size = cvt_strtoofft(part.c_str(), /* base= */10); + // loaded + if(!getline(ssparts, part, ':')){ + is_err = true; + break; + } + bool is_loaded = (1 == cvt_strtoofft(part.c_str(), /* base= */10) ? true : false); + bool is_modified; + if(!getline(ssparts, part, ':')){ + is_modified = false; // old version does not have this part. + }else{ + is_modified = (1 == cvt_strtoofft(part.c_str(), /* base= */10) ? true : false); + } + // add new area + PageList::page_status pstatus = + ( is_loaded && is_modified ? PageList::PAGE_LOAD_MODIFIED : + !is_loaded && is_modified ? PageList::PAGE_MODIFIED : + is_loaded && !is_modified ? PageList::PAGE_LOADED : PageList::PAGE_NOT_LOAD_MODIFIED ); + + SetPageLoadedStatus(offset, size, pstatus); + } + delete[] ptmp; + if(is_err){ + S3FS_PRN_ERR("failed to parse stats."); + Clear(); + return false; + } + + // check size + if(total != Size()){ + S3FS_PRN_ERR("different size(%lld - %lld).", static_cast(total), static_cast(Size())); + Clear(); + return false; + } + } + return true; +} + +void PageList::Dump() const +{ + int cnt = 0; + + S3FS_PRN_DBG("pages = {"); + for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter, ++cnt){ + S3FS_PRN_DBG(" [%08d] -> {%014lld - %014lld : %s / %s}", cnt, static_cast(iter->offset), static_cast(iter->bytes), iter->loaded ? "loaded" : "unloaded", iter->modified ? "modified" : "not modified"); + } + S3FS_PRN_DBG("}"); +} + +// +// Compare the fdpage_list_t pages of the object with the state of the file. +// +// The loaded=true or modified=true area of pages must be a DATA block +// (not a HOLE block) in the file. +// The other area is a HOLE block in the file or is a DATA block(but the +// data of the target area in that block should be ZERO). +// If it is a bad area in the previous case, it will be reported as an error. +// If the latter case does not match, it will be reported as a warning. +// +bool PageList::CompareSparseFile(int fd, size_t file_size, fdpage_list_t& err_area_list, fdpage_list_t& warn_area_list) +{ + err_area_list.clear(); + warn_area_list.clear(); + + // First, list the block disk allocation area of the cache file. + // The cache file has holes(sparse file) and no disk block areas + // are assigned to any holes. + fdpage_list_t sparse_list; + if(!PageList::GetSparseFilePages(fd, file_size, sparse_list)){ + S3FS_PRN_ERR("Something error is occurred in parsing hole/data of the cache file(%d).", fd); + + fdpage page(0, static_cast(file_size), false, false); + err_area_list.push_back(page); + + return false; + } + + if(sparse_list.empty() && pages.empty()){ + // both file and stats information are empty, it means cache file size is ZERO. + return true; + } + + // Compare each pages and sparse_list + bool result = true; + for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){ + if(!PageList::CheckAreaInSparseFile(*iter, sparse_list, fd, err_area_list, warn_area_list)){ + result = false; + } + } + return result; +} + +/* +* Local variables: +* tab-width: 4 +* c-basic-offset: 4 +* End: +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 +*/ diff --git a/src/fdcache_page.h b/src/fdcache_page.h new file mode 100644 index 0000000..d513247 --- /dev/null +++ b/src/fdcache_page.h @@ -0,0 +1,130 @@ +/* + * s3fs - FUSE-based file system backed by Amazon S3 + * + * Copyright(C) 2007 Randy Rizun + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef S3FS_FDCACHE_PAGE_H_ +#define S3FS_FDCACHE_PAGE_H_ + +#include "fdcache_stat.h" + +//------------------------------------------------ +// Symbols +//------------------------------------------------ +// [NOTE] +// If the following symbols in lseek whence are undefined, define them. +// If it is not supported by lseek, s3fs judges by the processing result of lseek. +// +#ifndef SEEK_DATA +#define SEEK_DATA 3 +#endif +#ifndef SEEK_HOLE +#define SEEK_HOLE 4 +#endif + +//------------------------------------------------ +// Structure fdpage +//------------------------------------------------ +// page block information +struct fdpage +{ + off_t offset; + off_t bytes; + bool loaded; + bool modified; + + fdpage(off_t start = 0, off_t size = 0, bool is_loaded = false, bool is_modified = false) : + offset(start), bytes(size), loaded(is_loaded), modified(is_modified) {} + + off_t next(void) const + { + return (offset + bytes); + } + off_t end(void) const + { + return (0 < bytes ? offset + bytes - 1 : 0); + } +}; +typedef std::list fdpage_list_t; + +//------------------------------------------------ +// Class PageList +//------------------------------------------------ +class FdEntity; + +// cppcheck-suppress copyCtorAndEqOperator +class PageList +{ + friend class FdEntity; // only one method access directly pages. + + private: + fdpage_list_t pages; + + public: + enum page_status{ + PAGE_NOT_LOAD_MODIFIED = 0, + PAGE_LOADED, + PAGE_MODIFIED, + PAGE_LOAD_MODIFIED + }; + + private: + static bool GetSparseFilePages(int fd, size_t file_size, fdpage_list_t& sparse_list); + static bool CheckZeroAreaInFile(int fd, off_t start, size_t bytes); + static bool CheckAreaInSparseFile(const struct fdpage& checkpage, const fdpage_list_t& sparse_list, int fd, fdpage_list_t& err_area_list, fdpage_list_t& warn_area_list); + + void Clear(void); + bool Compress(); + bool Parse(off_t new_pos); + + public: + static void FreeList(fdpage_list_t& list); + + explicit PageList(off_t size = 0, bool is_loaded = false, bool is_modified = false); + explicit PageList(const PageList& other); + ~PageList(); + + bool Init(off_t size, bool is_loaded, bool is_modified); + off_t Size(void) const; + bool Resize(off_t size, bool is_loaded, bool is_modified); + + bool IsPageLoaded(off_t start = 0, off_t size = 0) const; // size=0 is checking to end of list + bool SetPageLoadedStatus(off_t start, off_t size, PageList::page_status pstatus = PAGE_LOADED, bool is_compress = true); + bool FindUnloadedPage(off_t start, off_t& resstart, off_t& ressize) const; + off_t GetTotalUnloadedPageSize(off_t start = 0, off_t size = 0) const; // size=0 is checking to end of list + int GetUnloadedPages(fdpage_list_t& unloaded_list, off_t start = 0, off_t size = 0) const; // size=0 is checking to end of list + bool GetPageListsForMultipartUpload(fdpage_list_t& dlpages, fdpage_list_t& mixuppages, off_t max_partsize); + + bool IsModified(void) const; + bool ClearAllModified(void); + + bool Serialize(CacheFileStat& file, bool is_output, ino_t inode); + void Dump(void) const; + bool CompareSparseFile(int fd, size_t file_size, fdpage_list_t& err_area_list, fdpage_list_t& warn_area_list); +}; + +#endif // S3FS_FDCACHE_PAGE_H_ + +/* +* Local variables: +* tab-width: 4 +* c-basic-offset: 4 +* End: +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 +*/ diff --git a/src/fdcache_stat.cpp b/src/fdcache_stat.cpp new file mode 100644 index 0000000..e27e94a --- /dev/null +++ b/src/fdcache_stat.cpp @@ -0,0 +1,282 @@ +/* + * s3fs - FUSE-based file system backed by Amazon S3 + * + * Copyright(C) 2007 Takeshi Nakatani + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include +#include +#include +#include +#include + +#include "common.h" +#include "s3fs.h" +#include "fdcache_stat.h" +#include "fdcache.h" +#include "s3fs_util.h" +#include "string_util.h" + +using namespace std; + +//------------------------------------------------ +// CacheFileStat class methods +//------------------------------------------------ +string CacheFileStat::GetCacheFileStatTopDir() +{ + string top_path(""); + if(!FdManager::IsCacheDir() || bucket.empty()){ + return top_path; + } + + // stat top dir( "//..stat" ) + top_path += FdManager::GetCacheDir(); + top_path += "/."; + top_path += bucket; + top_path += ".stat"; + return top_path; +} + +bool CacheFileStat::MakeCacheFileStatPath(const char* path, string& sfile_path, bool is_create_dir) +{ + string top_path = CacheFileStat::GetCacheFileStatTopDir(); + if(top_path.empty()){ + S3FS_PRN_ERR("The path to cache top dir is empty."); + return false; + } + + if(is_create_dir){ + int result; + if(0 != (result = mkdirp(top_path + mydirname(path), 0777))){ + S3FS_PRN_ERR("failed to create dir(%s) by errno(%d).", path, result); + return false; + } + } + if(!path || '\0' == path[0]){ + sfile_path = top_path; + }else{ + sfile_path = top_path + SAFESTRPTR(path); + } + return true; +} + +bool CacheFileStat::CheckCacheFileStatTopDir() +{ + string top_path = CacheFileStat::GetCacheFileStatTopDir(); + if(top_path.empty()){ + S3FS_PRN_INFO("The path to cache top dir is empty, thus not need to check permission."); + return true; + } + + return check_exist_dir_permission(top_path.c_str()); +} + +bool CacheFileStat::DeleteCacheFileStat(const char* path) +{ + if(!path || '\0' == path[0]){ + return false; + } + // stat path + string sfile_path; + if(!CacheFileStat::MakeCacheFileStatPath(path, sfile_path, false)){ + S3FS_PRN_ERR("failed to create cache stat file path(%s)", path); + return false; + } + if(0 != unlink(sfile_path.c_str())){ + if(ENOENT == errno){ + S3FS_PRN_DBG("failed to delete file(%s): errno=%d", path, errno); + }else{ + S3FS_PRN_ERR("failed to delete file(%s): errno=%d", path, errno); + } + return false; + } + return true; +} + +// [NOTE] +// If remove stat file directory, it should do before removing +// file cache directory. +// +bool CacheFileStat::DeleteCacheFileStatDirectory() +{ + string top_path = CacheFileStat::GetCacheFileStatTopDir(); + if(top_path.empty()){ + S3FS_PRN_INFO("The path to cache top dir is empty, thus not need to remove it."); + return true; + } + return delete_files_in_dir(top_path.c_str(), true); +} + +bool CacheFileStat::RenameCacheFileStat(const char* oldpath, const char* newpath) +{ + if(!oldpath || '\0' == oldpath[0] || !newpath || '\0' == newpath[0]){ + return false; + } + + // stat path + string old_filestat; + string new_filestat; + if(!CacheFileStat::MakeCacheFileStatPath(oldpath, old_filestat, false) || !CacheFileStat::MakeCacheFileStatPath(newpath, new_filestat, false)){ + return false; + } + + // check new stat path + struct stat st; + if(0 == stat(new_filestat.c_str(), &st)){ + // new stat path is existed, then unlink it. + if(-1 == unlink(new_filestat.c_str())){ + S3FS_PRN_ERR("failed to unlink new cache file stat path(%s) by errno(%d).", new_filestat.c_str(), errno); + return false; + } + } + + // check old stat path + if(0 != stat(old_filestat.c_str(), &st)){ + // old stat path is not existed, then nothing to do any more. + return true; + } + + // link and unlink + if(-1 == link(old_filestat.c_str(), new_filestat.c_str())){ + S3FS_PRN_ERR("failed to link old cache file stat path(%s) to new cache file stat path(%s) by errno(%d).", old_filestat.c_str(), new_filestat.c_str(), errno); + return false; + } + if(-1 == unlink(old_filestat.c_str())){ + S3FS_PRN_ERR("failed to unlink old cache file stat path(%s) by errno(%d).", old_filestat.c_str(), errno); + return false; + } + return true; +} + +//------------------------------------------------ +// CacheFileStat methods +//------------------------------------------------ +CacheFileStat::CacheFileStat(const char* tpath) : path(""), fd(-1) +{ + if(tpath && '\0' != tpath[0]){ + SetPath(tpath, true); + } +} + +CacheFileStat::~CacheFileStat() +{ + Release(); +} + +bool CacheFileStat::SetPath(const char* tpath, bool is_open) +{ + if(!tpath || '\0' == tpath[0]){ + return false; + } + if(!Release()){ + // could not close old stat file. + return false; + } + path = tpath; + if(!is_open){ + return true; + } + return Open(); +} + +bool CacheFileStat::RawOpen(bool readonly) +{ + if(path.empty()){ + return false; + } + if(-1 != fd){ + // already opened + return true; + } + // stat path + string sfile_path; + if(!CacheFileStat::MakeCacheFileStatPath(path.c_str(), sfile_path, true)){ + S3FS_PRN_ERR("failed to create cache stat file path(%s)", path.c_str()); + return false; + } + // open + if(readonly){ + if(-1 == (fd = open(sfile_path.c_str(), O_RDONLY))){ + S3FS_PRN_ERR("failed to read only open cache stat file path(%s) - errno(%d)", path.c_str(), errno); + return false; + } + }else{ + if(-1 == (fd = open(sfile_path.c_str(), O_CREAT|O_RDWR, 0600))){ + S3FS_PRN_ERR("failed to open cache stat file path(%s) - errno(%d)", path.c_str(), errno); + return false; + } + } + // lock + if(-1 == flock(fd, LOCK_EX)){ + S3FS_PRN_ERR("failed to lock cache stat file(%s) - errno(%d)", path.c_str(), errno); + close(fd); + fd = -1; + return false; + } + // seek top + if(0 != lseek(fd, 0, SEEK_SET)){ + S3FS_PRN_ERR("failed to lseek cache stat file(%s) - errno(%d)", path.c_str(), errno); + flock(fd, LOCK_UN); + close(fd); + fd = -1; + return false; + } + S3FS_PRN_DBG("file locked(%s - %s)", path.c_str(), sfile_path.c_str()); + + return true; +} + +bool CacheFileStat::Open() +{ + return RawOpen(false); +} + +bool CacheFileStat::ReadOnlyOpen() +{ + return RawOpen(true); +} + +bool CacheFileStat::Release() +{ + if(-1 == fd){ + // already release + return true; + } + // unlock + if(-1 == flock(fd, LOCK_UN)){ + S3FS_PRN_ERR("failed to unlock cache stat file(%s) - errno(%d)", path.c_str(), errno); + return false; + } + S3FS_PRN_DBG("file unlocked(%s)", path.c_str()); + + if(-1 == close(fd)){ + S3FS_PRN_ERR("failed to close cache stat file(%s) - errno(%d)", path.c_str(), errno); + return false; + } + fd = -1; + + return true; +} + +/* +* Local variables: +* tab-width: 4 +* c-basic-offset: 4 +* End: +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 +*/ diff --git a/src/fdcache_stat.h b/src/fdcache_stat.h new file mode 100644 index 0000000..ee2f098 --- /dev/null +++ b/src/fdcache_stat.h @@ -0,0 +1,64 @@ +/* + * s3fs - FUSE-based file system backed by Amazon S3 + * + * Copyright(C) 2007 Randy Rizun + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef S3FS_FDCACHE_STAT_H_ +#define S3FS_FDCACHE_STAT_H_ + +//------------------------------------------------ +// CacheFileStat +//------------------------------------------------ +class CacheFileStat +{ + private: + std::string path; + int fd; + + private: + static bool MakeCacheFileStatPath(const char* path, std::string& sfile_path, bool is_create_dir = true); + + bool RawOpen(bool readonly); + + public: + static std::string GetCacheFileStatTopDir(void); + static bool DeleteCacheFileStat(const char* path); + static bool CheckCacheFileStatTopDir(void); + static bool DeleteCacheFileStatDirectory(void); + static bool RenameCacheFileStat(const char* oldpath, const char* newpath); + + explicit CacheFileStat(const char* tpath = NULL); + ~CacheFileStat(); + + bool Open(void); + bool ReadOnlyOpen(void); + bool Release(void); + bool SetPath(const char* tpath, bool is_open = true); + int GetFd(void) const { return fd; } +}; + +#endif // S3FS_FDCACHE_STAT_H_ + +/* +* Local variables: +* tab-width: 4 +* c-basic-offset: 4 +* End: +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 +*/ diff --git a/src/gnutls_auth.cpp b/src/gnutls_auth.cpp index 6d2c9c8..53f5e36 100644 --- a/src/gnutls_auth.cpp +++ b/src/gnutls_auth.cpp @@ -30,7 +30,7 @@ #include #include #include -#ifdef USE_GNUTLS_NETTLE +#ifdef USE_GNUTLS_NETTLE #include #include #include @@ -39,6 +39,7 @@ #include #include "common.h" +#include "s3fs.h" #include "s3fs_auth.h" using namespace std; @@ -46,46 +47,46 @@ using namespace std; //------------------------------------------------------------------- // Utility Function for version //------------------------------------------------------------------- -#ifdef USE_GNUTLS_NETTLE +#ifdef USE_GNUTLS_NETTLE const char* s3fs_crypt_lib_name(void) { - static const char version[] = "GnuTLS(nettle)"; + static const char version[] = "GnuTLS(nettle)"; - return version; + return version; } -#else // USE_GNUTLS_NETTLE +#else // USE_GNUTLS_NETTLE const char* s3fs_crypt_lib_name() { - static const char version[] = "GnuTLS(gcrypt)"; + static const char version[] = "GnuTLS(gcrypt)"; - return version; + return version; } -#endif // USE_GNUTLS_NETTLE +#endif // USE_GNUTLS_NETTLE //------------------------------------------------------------------- // Utility Function for global init //------------------------------------------------------------------- bool s3fs_init_global_ssl() { - if(GNUTLS_E_SUCCESS != gnutls_global_init()){ - return false; - } + if(GNUTLS_E_SUCCESS != gnutls_global_init()){ + return false; + } #ifndef USE_GNUTLS_NETTLE - if(NULL == gcry_check_version(NULL)){ - return false; - } -#endif // USE_GNUTLS_NETTLE - return true; + if(NULL == gcry_check_version(NULL)){ + return false; + } +#endif // USE_GNUTLS_NETTLE + return true; } bool s3fs_destroy_global_ssl() { - gnutls_global_deinit(); - return true; + gnutls_global_deinit(); + return true; } //------------------------------------------------------------------- @@ -93,304 +94,304 @@ bool s3fs_destroy_global_ssl() //------------------------------------------------------------------- bool s3fs_init_crypt_mutex() { - return true; + return true; } bool s3fs_destroy_crypt_mutex() { - return true; + return true; } //------------------------------------------------------------------- // Utility Function for HMAC //------------------------------------------------------------------- -#ifdef USE_GNUTLS_NETTLE +#ifdef USE_GNUTLS_NETTLE bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen) { - if(!key || !data || !digest || !digestlen){ - return false; - } + if(!key || !data || !digest || !digestlen){ + return false; + } - *digest = new unsigned char[SHA1_DIGEST_SIZE]; + *digest = new unsigned char[SHA1_DIGEST_SIZE]; - struct hmac_sha1_ctx ctx_hmac; - hmac_sha1_set_key(&ctx_hmac, keylen, reinterpret_cast(key)); - hmac_sha1_update(&ctx_hmac, datalen, reinterpret_cast(data)); - hmac_sha1_digest(&ctx_hmac, SHA1_DIGEST_SIZE, reinterpret_cast(*digest)); - *digestlen = SHA1_DIGEST_SIZE; + struct hmac_sha1_ctx ctx_hmac; + hmac_sha1_set_key(&ctx_hmac, keylen, reinterpret_cast(key)); + hmac_sha1_update(&ctx_hmac, datalen, reinterpret_cast(data)); + hmac_sha1_digest(&ctx_hmac, SHA1_DIGEST_SIZE, reinterpret_cast(*digest)); + *digestlen = SHA1_DIGEST_SIZE; - return true; + return true; } bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen) { - if(!key || !data || !digest || !digestlen){ - return false; - } + if(!key || !data || !digest || !digestlen){ + return false; + } - *digest = new unsigned char[SHA256_DIGEST_SIZE]; + *digest = new unsigned char[SHA256_DIGEST_SIZE]; - struct hmac_sha256_ctx ctx_hmac; - hmac_sha256_set_key(&ctx_hmac, keylen, reinterpret_cast(key)); - hmac_sha256_update(&ctx_hmac, datalen, reinterpret_cast(data)); - hmac_sha256_digest(&ctx_hmac, SHA256_DIGEST_SIZE, reinterpret_cast(*digest)); - *digestlen = SHA256_DIGEST_SIZE; + struct hmac_sha256_ctx ctx_hmac; + hmac_sha256_set_key(&ctx_hmac, keylen, reinterpret_cast(key)); + hmac_sha256_update(&ctx_hmac, datalen, reinterpret_cast(data)); + hmac_sha256_digest(&ctx_hmac, SHA256_DIGEST_SIZE, reinterpret_cast(*digest)); + *digestlen = SHA256_DIGEST_SIZE; - return true; + return true; } -#else // USE_GNUTLS_NETTLE +#else // USE_GNUTLS_NETTLE bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen) { - if(!key || !data || !digest || !digestlen){ - return false; - } + if(!key || !data || !digest || !digestlen){ + return false; + } - if(0 == (*digestlen = gnutls_hmac_get_len(GNUTLS_MAC_SHA1))){ - return false; - } - *digest = new unsigned char[*digestlen + 1]; - if(0 > gnutls_hmac_fast(GNUTLS_MAC_SHA1, key, keylen, data, datalen, *digest)){ - delete[] *digest; - *digest = NULL; - return false; - } - return true; + if(0 == (*digestlen = gnutls_hmac_get_len(GNUTLS_MAC_SHA1))){ + return false; + } + *digest = new unsigned char[*digestlen + 1]; + if(0 > gnutls_hmac_fast(GNUTLS_MAC_SHA1, key, keylen, data, datalen, *digest)){ + delete[] *digest; + *digest = NULL; + return false; + } + return true; } bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen) { - if(!key || !data || !digest || !digestlen){ - return false; - } + if(!key || !data || !digest || !digestlen){ + return false; + } - if(0 == (*digestlen = gnutls_hmac_get_len(GNUTLS_MAC_SHA256))){ - return false; - } - *digest = new unsigned char[*digestlen + 1]; - if(0 > gnutls_hmac_fast(GNUTLS_MAC_SHA256, key, keylen, data, datalen, *digest)){ - delete[] *digest; - *digest = NULL; - return false; - } - return true; + if(0 == (*digestlen = gnutls_hmac_get_len(GNUTLS_MAC_SHA256))){ + return false; + } + *digest = new unsigned char[*digestlen + 1]; + if(0 > gnutls_hmac_fast(GNUTLS_MAC_SHA256, key, keylen, data, datalen, *digest)){ + delete[] *digest; + *digest = NULL; + return false; + } + return true; } -#endif // USE_GNUTLS_NETTLE +#endif // USE_GNUTLS_NETTLE //------------------------------------------------------------------- // Utility Function for MD5 //------------------------------------------------------------------- size_t get_md5_digest_length() { - return 16; + return 16; } -#ifdef USE_GNUTLS_NETTLE +#ifdef USE_GNUTLS_NETTLE unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size) { - struct md5_ctx ctx_md5; - unsigned char buf[512]; - ssize_t bytes; - unsigned char* result; + struct md5_ctx ctx_md5; + unsigned char buf[512]; + ssize_t bytes; + unsigned char* result; - memset(buf, 0, 512); - md5_init(&ctx_md5); - - for(ssize_t total = 0; total < size; total += bytes){ - bytes = 512 < (size - total) ? 512 : (size - total); - bytes = pread(fd, buf, bytes, start + total); - if(0 == bytes){ - // end of file - break; - }else if(-1 == bytes){ - // error - S3FS_PRN_ERR("file read error(%d)", errno); - return NULL; - } - md5_update(&ctx_md5, bytes, buf); memset(buf, 0, 512); - } - result = new unsigned char[get_md5_digest_length()]; - md5_digest(&ctx_md5, get_md5_digest_length(), result); + md5_init(&ctx_md5); - return result; + for(ssize_t total = 0; total < size; total += bytes){ + bytes = 512 < (size - total) ? 512 : (size - total); + bytes = pread(fd, buf, bytes, start + total); + if(0 == bytes){ + // end of file + break; + }else if(-1 == bytes){ + // error + S3FS_PRN_ERR("file read error(%d)", errno); + return NULL; + } + md5_update(&ctx_md5, bytes, buf); + memset(buf, 0, 512); + } + result = new unsigned char[get_md5_digest_length()]; + md5_digest(&ctx_md5, get_md5_digest_length(), result); + + return result; } -#else // USE_GNUTLS_NETTLE +#else // USE_GNUTLS_NETTLE unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size) { - gcry_md_hd_t ctx_md5; - gcry_error_t err; - char buf[512]; - ssize_t bytes; - unsigned char* result; + gcry_md_hd_t ctx_md5; + gcry_error_t err; + char buf[512]; + ssize_t bytes; + unsigned char* result; - if(-1 == size){ - struct stat st; - if(-1 == fstat(fd, &st)){ - return NULL; + if(-1 == size){ + struct stat st; + if(-1 == fstat(fd, &st)){ + return NULL; + } + size = static_cast(st.st_size); } - size = static_cast(st.st_size); - } - memset(buf, 0, 512); - if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_md5, GCRY_MD_MD5, 0))){ - S3FS_PRN_ERR("MD5 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err)); - return NULL; - } - - for(ssize_t total = 0; total < size; total += bytes){ - bytes = 512 < (size - total) ? 512 : (size - total); - bytes = pread(fd, buf, bytes, start + total); - if(0 == bytes){ - // end of file - break; - }else if(-1 == bytes){ - // error - S3FS_PRN_ERR("file read error(%d)", errno); - gcry_md_close(ctx_md5); - return NULL; - } - gcry_md_write(ctx_md5, buf, bytes); memset(buf, 0, 512); - } - result = new unsigned char[get_md5_digest_length()]; - memcpy(result, gcry_md_read(ctx_md5, 0), get_md5_digest_length()); - gcry_md_close(ctx_md5); + if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_md5, GCRY_MD_MD5, 0))){ + S3FS_PRN_ERR("MD5 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err)); + return NULL; + } - return result; + for(ssize_t total = 0; total < size; total += bytes){ + bytes = 512 < (size - total) ? 512 : (size - total); + bytes = pread(fd, buf, bytes, start + total); + if(0 == bytes){ + // end of file + break; + }else if(-1 == bytes){ + // error + S3FS_PRN_ERR("file read error(%d)", errno); + gcry_md_close(ctx_md5); + return NULL; + } + gcry_md_write(ctx_md5, buf, bytes); + memset(buf, 0, 512); + } + result = new unsigned char[get_md5_digest_length()]; + memcpy(result, gcry_md_read(ctx_md5, 0), get_md5_digest_length()); + gcry_md_close(ctx_md5); + + return result; } -#endif // USE_GNUTLS_NETTLE +#endif // USE_GNUTLS_NETTLE //------------------------------------------------------------------- // Utility Function for SHA256 //------------------------------------------------------------------- size_t get_sha256_digest_length() { - return 32; + return 32; } -#ifdef USE_GNUTLS_NETTLE +#ifdef USE_GNUTLS_NETTLE bool s3fs_sha256(const unsigned char* data, unsigned int datalen, unsigned char** digest, unsigned int* digestlen) { - (*digestlen) = static_cast(get_sha256_digest_length()); - *digest = new unsigned char[*digestlen]; + (*digestlen) = static_cast(get_sha256_digest_length()); + *digest = new unsigned char[*digestlen]; - struct sha256_ctx ctx_sha256; - sha256_init(&ctx_sha256); - sha256_update(&ctx_sha256, datalen, data); - sha256_digest(&ctx_sha256, *digestlen, *digest); + struct sha256_ctx ctx_sha256; + sha256_init(&ctx_sha256); + sha256_update(&ctx_sha256, datalen, data); + sha256_digest(&ctx_sha256, *digestlen, *digest); - return true; + return true; } unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size) { - struct sha256_ctx ctx_sha256; - unsigned char buf[512]; - ssize_t bytes; - unsigned char* result; + struct sha256_ctx ctx_sha256; + unsigned char buf[512]; + ssize_t bytes; + unsigned char* result; - memset(buf, 0, 512); - sha256_init(&ctx_sha256); - - for(ssize_t total = 0; total < size; total += bytes){ - bytes = 512 < (size - total) ? 512 : (size - total); - bytes = pread(fd, buf, bytes, start + total); - if(0 == bytes){ - // end of file - break; - }else if(-1 == bytes){ - // error - S3FS_PRN_ERR("file read error(%d)", errno); - return NULL; - } - sha256_update(&ctx_sha256, bytes, buf); memset(buf, 0, 512); - } - result = new unsigned char[get_sha256_digest_length()]; - sha256_digest(&ctx_sha256, get_sha256_digest_length(), result); + sha256_init(&ctx_sha256); - return result; + for(ssize_t total = 0; total < size; total += bytes){ + bytes = 512 < (size - total) ? 512 : (size - total); + bytes = pread(fd, buf, bytes, start + total); + if(0 == bytes){ + // end of file + break; + }else if(-1 == bytes){ + // error + S3FS_PRN_ERR("file read error(%d)", errno); + return NULL; + } + sha256_update(&ctx_sha256, bytes, buf); + memset(buf, 0, 512); + } + result = new unsigned char[get_sha256_digest_length()]; + sha256_digest(&ctx_sha256, get_sha256_digest_length(), result); + + return result; } -#else // USE_GNUTLS_NETTLE +#else // USE_GNUTLS_NETTLE bool s3fs_sha256(const unsigned char* data, unsigned int datalen, unsigned char** digest, unsigned int* digestlen) { - size_t len = (*digestlen) = static_cast(get_sha256_digest_length()); - *digest = new unsigned char[len]; + size_t len = (*digestlen) = static_cast(get_sha256_digest_length()); + *digest = new unsigned char[len]; - gcry_md_hd_t ctx_sha256; - gcry_error_t err; - if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_sha256, GCRY_MD_SHA256, 0))){ - S3FS_PRN_ERR("SHA256 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err)); - delete[] *digest; - return false; - } - gcry_md_write(ctx_sha256, data, datalen); - memcpy(*digest, gcry_md_read(ctx_sha256, 0), *digestlen); - gcry_md_close(ctx_sha256); + gcry_md_hd_t ctx_sha256; + gcry_error_t err; + if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_sha256, GCRY_MD_SHA256, 0))){ + S3FS_PRN_ERR("SHA256 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err)); + delete[] *digest; + return false; + } + gcry_md_write(ctx_sha256, data, datalen); + memcpy(*digest, gcry_md_read(ctx_sha256, 0), *digestlen); + gcry_md_close(ctx_sha256); - return true; + return true; } unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size) { - gcry_md_hd_t ctx_sha256; - gcry_error_t err; - char buf[512]; - ssize_t bytes; - unsigned char* result; + gcry_md_hd_t ctx_sha256; + gcry_error_t err; + char buf[512]; + ssize_t bytes; + unsigned char* result; - if(-1 == size){ - struct stat st; - if(-1 == fstat(fd, &st)){ - return NULL; + if(-1 == size){ + struct stat st; + if(-1 == fstat(fd, &st)){ + return NULL; + } + size = static_cast(st.st_size); } - size = static_cast(st.st_size); - } - memset(buf, 0, 512); - if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_sha256, GCRY_MD_SHA256, 0))){ - S3FS_PRN_ERR("SHA256 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err)); - return NULL; - } - - for(ssize_t total = 0; total < size; total += bytes){ - bytes = 512 < (size - total) ? 512 : (size - total); - bytes = pread(fd, buf, bytes, start + total); - if(0 == bytes){ - // end of file - break; - }else if(-1 == bytes){ - // error - S3FS_PRN_ERR("file read error(%d)", errno); - gcry_md_close(ctx_sha256); - return NULL; - } - gcry_md_write(ctx_sha256, buf, bytes); memset(buf, 0, 512); - } - result = new unsigned char[get_sha256_digest_length()]; - memcpy(result, gcry_md_read(ctx_sha256, 0), get_sha256_digest_length()); - gcry_md_close(ctx_sha256); + if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_sha256, GCRY_MD_SHA256, 0))){ + S3FS_PRN_ERR("SHA256 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err)); + return NULL; + } - return result; + for(ssize_t total = 0; total < size; total += bytes){ + bytes = 512 < (size - total) ? 512 : (size - total); + bytes = pread(fd, buf, bytes, start + total); + if(0 == bytes){ + // end of file + break; + }else if(-1 == bytes){ + // error + S3FS_PRN_ERR("file read error(%d)", errno); + gcry_md_close(ctx_sha256); + return NULL; + } + gcry_md_write(ctx_sha256, buf, bytes); + memset(buf, 0, 512); + } + result = new unsigned char[get_sha256_digest_length()]; + memcpy(result, gcry_md_read(ctx_sha256, 0), get_sha256_digest_length()); + gcry_md_close(ctx_sha256); + + return result; } -#endif // USE_GNUTLS_NETTLE +#endif // USE_GNUTLS_NETTLE /* * Local variables: -* tab-width: 2 -* c-basic-offset: 2 +* tab-width: 4 +* c-basic-offset: 4 * End: -* vim600: expandtab sw=2 ts=2 fdm=marker -* vim<600: expandtab sw=2 ts=2 +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 */ diff --git a/src/metaheader.cpp b/src/metaheader.cpp new file mode 100644 index 0000000..b835950 --- /dev/null +++ b/src/metaheader.cpp @@ -0,0 +1,322 @@ +/* + * s3fs - FUSE-based file system backed by Amazon S3 + * + * Copyright(C) 2007 Takeshi Nakatani + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include +#include +#include +#include + +#include "common.h" +#include "s3fs.h" +#include "metaheader.h" +#include "string_util.h" + +using namespace std; + +//------------------------------------------------------------------- +// Utility functions for convert +//------------------------------------------------------------------- +time_t get_mtime(const char *str) +{ + // [NOTE] + // In rclone, there are cases where ns is set to x-amz-meta-mtime + // with floating point number. s3fs uses x-amz-meta-mtime by + // truncating the floating point or less (in seconds or less) to + // correspond to this. + // + string strmtime; + if(str && '\0' != *str){ + strmtime = str; + string::size_type pos = strmtime.find('.', 0); + if(string::npos != pos){ + strmtime = strmtime.substr(0, pos); + } + } + return static_cast(cvt_strtoofft(strmtime.c_str())); +} + +static time_t get_time(const headers_t& meta, const char *header) +{ + headers_t::const_iterator iter; + if(meta.end() == (iter = meta.find(header))){ + return 0; + } + return get_mtime((*iter).second.c_str()); +} + +time_t get_mtime(const headers_t& meta, bool overcheck) +{ + time_t t = get_time(meta, "x-amz-meta-mtime"); + if(t != 0){ + return t; + } + t = get_time(meta, "x-amz-meta-goog-reserved-file-mtime"); + if(t != 0){ + return t; + } + if(overcheck){ + return get_lastmodified(meta); + } + return 0; +} + +time_t get_ctime(const headers_t& meta, bool overcheck) +{ + time_t t = get_time(meta, "x-amz-meta-ctime"); + if(t != 0){ + return t; + } + if(overcheck){ + return get_lastmodified(meta); + } + return 0; +} + +off_t get_size(const char *s) +{ + return cvt_strtoofft(s); +} + +off_t get_size(const headers_t& meta) +{ + headers_t::const_iterator iter = meta.find("Content-Length"); + if(meta.end() == iter){ + return 0; + } + return get_size((*iter).second.c_str()); +} + +mode_t get_mode(const char *s, int base) +{ + return static_cast(cvt_strtoofft(s, base)); +} + +mode_t get_mode(const headers_t& meta, const char* path, bool checkdir, bool forcedir) +{ + mode_t mode = 0; + bool isS3sync = false; + headers_t::const_iterator iter; + + if(meta.end() != (iter = meta.find("x-amz-meta-mode"))){ + mode = get_mode((*iter).second.c_str()); + }else if(meta.end() != (iter = meta.find("x-amz-meta-permissions"))){ // for s3sync + mode = get_mode((*iter).second.c_str()); + isS3sync = true; + }else if(meta.end() != (iter = meta.find("x-amz-meta-goog-reserved-posix-mode"))){ // for GCS + mode = get_mode((*iter).second.c_str(), 8); + }else{ + // If another tool creates an object without permissions, default to owner + // read-write and group readable. + mode = path[strlen(path) - 1] == '/' ? 0750 : 0640; + } + + // Checking the bitmask, if the last 3 bits are all zero then process as a regular + // file type (S_IFDIR or S_IFREG), otherwise return mode unmodified so that S_IFIFO, + // S_IFSOCK, S_IFCHR, S_IFLNK and S_IFBLK devices can be processed properly by fuse. + if(!(mode & S_IFMT)){ + if(!isS3sync){ + if(checkdir){ + if(forcedir){ + mode |= S_IFDIR; + }else{ + if(meta.end() != (iter = meta.find("Content-Type"))){ + string strConType = (*iter).second; + // Leave just the mime type, remove any optional parameters (eg charset) + string::size_type pos = strConType.find(';'); + if(string::npos != pos){ + strConType = strConType.substr(0, pos); + } + if(strConType == "application/x-directory" || strConType == "httpd/unix-directory"){ + // Nextcloud uses this MIME type for directory objects when mounting bucket as external Storage + mode |= S_IFDIR; + }else if(path && 0 < strlen(path) && '/' == path[strlen(path) - 1]){ + if(strConType == "binary/octet-stream" || strConType == "application/octet-stream"){ + mode |= S_IFDIR; + }else{ + if(complement_stat){ + // If complement lack stat mode, when the object has '/' character at end of name + // and content type is text/plain and the object's size is 0 or 1, it should be + // directory. + off_t size = get_size(meta); + if(strConType == "text/plain" && (0 == size || 1 == size)){ + mode |= S_IFDIR; + }else{ + mode |= S_IFREG; + } + }else{ + mode |= S_IFREG; + } + } + }else{ + mode |= S_IFREG; + } + }else{ + mode |= S_IFREG; + } + } + } + // If complement lack stat mode, when it's mode is not set any permission, + // the object is added minimal mode only for read permission. + if(complement_stat && 0 == (mode & (S_IRWXU | S_IRWXG | S_IRWXO))){ + mode |= (S_IRUSR | (0 == (mode & S_IFDIR) ? 0 : S_IXUSR)); + } + }else{ + if(!checkdir){ + // cut dir/reg flag. + mode &= ~S_IFDIR; + mode &= ~S_IFREG; + } + } + } + return mode; +} + +uid_t get_uid(const char *s) +{ + return static_cast(cvt_strtoofft(s)); +} + +uid_t get_uid(const headers_t& meta) +{ + headers_t::const_iterator iter; + if(meta.end() != (iter = meta.find("x-amz-meta-uid"))){ + return get_uid((*iter).second.c_str()); + }else if(meta.end() != (iter = meta.find("x-amz-meta-owner"))){ // for s3sync + return get_uid((*iter).second.c_str()); + }else if(meta.end() != (iter = meta.find("x-amz-meta-goog-reserved-posix-uid"))){ // for GCS + return get_uid((*iter).second.c_str()); + }else{ + return geteuid(); + } +} + +gid_t get_gid(const char *s) +{ + return static_cast(cvt_strtoofft(s)); +} + +gid_t get_gid(const headers_t& meta) +{ + headers_t::const_iterator iter; + if(meta.end() != (iter = meta.find("x-amz-meta-gid"))){ + return get_gid((*iter).second.c_str()); + }else if(meta.end() != (iter = meta.find("x-amz-meta-group"))){ // for s3sync + return get_gid((*iter).second.c_str()); + }else if(meta.end() != (iter = meta.find("x-amz-meta-goog-reserved-posix-gid"))){ // for GCS + return get_gid((*iter).second.c_str()); + }else{ + return getegid(); + } +} + +blkcnt_t get_blocks(off_t size) +{ + return size / 512 + 1; +} + +time_t cvtIAMExpireStringToTime(const char* s) +{ + struct tm tm; + if(!s){ + return 0L; + } + memset(&tm, 0, sizeof(struct tm)); + strptime(s, "%Y-%m-%dT%H:%M:%S", &tm); + return timegm(&tm); // GMT +} + +time_t get_lastmodified(const char* s) +{ + struct tm tm; + if(!s){ + return 0L; + } + memset(&tm, 0, sizeof(struct tm)); + strptime(s, "%a, %d %b %Y %H:%M:%S %Z", &tm); + return timegm(&tm); // GMT +} + +time_t get_lastmodified(const headers_t& meta) +{ + headers_t::const_iterator iter = meta.find("Last-Modified"); + if(meta.end() == iter){ + return 0; + } + return get_lastmodified((*iter).second.c_str()); +} + +// +// Returns it whether it is an object with need checking in detail. +// If this function returns true, the object is possible to be directory +// and is needed checking detail(searching sub object). +// +bool is_need_check_obj_detail(const headers_t& meta) +{ + headers_t::const_iterator iter; + + // directory object is Content-Length as 0. + if(0 != get_size(meta)){ + return false; + } + // if the object has x-amz-meta information, checking is no more. + if(meta.end() != meta.find("x-amz-meta-mode") || + meta.end() != meta.find("x-amz-meta-mtime") || + meta.end() != meta.find("x-amz-meta-uid") || + meta.end() != meta.find("x-amz-meta-gid") || + meta.end() != meta.find("x-amz-meta-owner") || + meta.end() != meta.find("x-amz-meta-group") || + meta.end() != meta.find("x-amz-meta-permissions") ) + { + return false; + } + // if there is not Content-Type, or Content-Type is "x-directory", + // checking is no more. + if(meta.end() == (iter = meta.find("Content-Type"))){ + return false; + } + if("application/x-directory" == (*iter).second){ + return false; + } + return true; +} + +// [NOTE] +// If add_noexist is false and the key does not exist, it will not be added. +// +bool merge_headers(headers_t& base, const headers_t& additional, bool add_noexist) +{ + bool added = false; + for(headers_t::const_iterator iter = additional.begin(); iter != additional.end(); ++iter){ + if(add_noexist || base.find(iter->first) != base.end()){ + base[iter->first] = iter->second; + added = true; + } + } + return added; +} + +/* +* Local variables: +* tab-width: 4 +* c-basic-offset: 4 +* End: +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 +*/ diff --git a/src/metaheader.h b/src/metaheader.h new file mode 100644 index 0000000..0ae19e3 --- /dev/null +++ b/src/metaheader.h @@ -0,0 +1,72 @@ +/* + * s3fs - FUSE-based file system backed by Amazon S3 + * + * Copyright(C) 2007 Randy Rizun + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef S3FS_METAHEADER_H_ +#define S3FS_METAHEADER_H_ + +#include +#include +#include + +//------------------------------------------------------------------- +// headers_t +//------------------------------------------------------------------- +struct header_nocase_cmp : public std::binary_function +{ + bool operator()(const std::string &strleft, const std::string &strright) const + { + return (strcasecmp(strleft.c_str(), strright.c_str()) < 0); + } +}; +typedef std::map headers_t; +typedef std::list headers_list_t; + +//------------------------------------------------------------------- +// Functions +//------------------------------------------------------------------- +time_t get_mtime(const char *s); +time_t get_mtime(const headers_t& meta, bool overcheck = true); +time_t get_ctime(const headers_t& meta, bool overcheck = true); +off_t get_size(const char *s); +off_t get_size(const headers_t& meta); +mode_t get_mode(const char *s, int base = 0); +mode_t get_mode(const headers_t& meta, const char* path = NULL, bool checkdir = false, bool forcedir = false); +uid_t get_uid(const char *s); +uid_t get_uid(const headers_t& meta); +gid_t get_gid(const char *s); +gid_t get_gid(const headers_t& meta); +blkcnt_t get_blocks(off_t size); +time_t cvtIAMExpireStringToTime(const char* s); +time_t get_lastmodified(const char* s); +time_t get_lastmodified(const headers_t& meta); +bool is_need_check_obj_detail(const headers_t& meta); +bool merge_headers(headers_t& base, const headers_t& additional, bool add_noexist); +bool simple_parse_xml(const char* data, size_t len, const char* key, std::string& value); + +#endif // S3FS_METAHEADER_H_ + +/* +* Local variables: +* tab-width: 4 +* c-basic-offset: 4 +* End: +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 +*/ diff --git a/src/mpu_util.cpp b/src/mpu_util.cpp new file mode 100644 index 0000000..d3ae9b4 --- /dev/null +++ b/src/mpu_util.cpp @@ -0,0 +1,161 @@ +/* + * s3fs - FUSE-based file system backed by Amazon S3 + * + * Copyright(C) 2007 Takeshi Nakatani + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include +#include + +#include "common.h" +#include "s3fs.h" +#include "mpu_util.h" +#include "curl.h" +#include "s3fs_xml.h" +#include "s3fs_auth.h" +#include "string_util.h" + +using namespace std; + +//------------------------------------------------------------------- +// Global variables +//------------------------------------------------------------------- +utility_incomp_type utility_mode = NO_UTILITY_MODE; + +//------------------------------------------------------------------- +// Functions +//------------------------------------------------------------------- +static void print_incomp_mpu_list(incomp_mpu_list_t& list) +{ + printf("\n"); + printf("Lists the parts that have been uploaded for a specific multipart upload.\n"); + printf("\n"); + + if(!list.empty()){ + printf("---------------------------------------------------------------\n"); + + int cnt = 0; + for(incomp_mpu_list_t::iterator iter = list.begin(); iter != list.end(); ++iter, ++cnt){ + printf(" Path : %s\n", (*iter).key.c_str()); + printf(" UploadId : %s\n", (*iter).id.c_str()); + printf(" Date : %s\n", (*iter).date.c_str()); + printf("\n"); + } + printf("---------------------------------------------------------------\n"); + + }else{ + printf("There is no list.\n"); + } +} + +static bool abort_incomp_mpu_list(incomp_mpu_list_t& list, time_t abort_time) +{ + if(list.empty()){ + return true; + } + time_t now_time = time(NULL); + + // do removing. + S3fsCurl s3fscurl; + bool result = true; + for(incomp_mpu_list_t::iterator iter = list.begin(); iter != list.end(); ++iter){ + const char* tpath = (*iter).key.c_str(); + string upload_id = (*iter).id; + + if(0 != abort_time){ // abort_time is 0, it means all. + time_t date = 0; + if(!get_unixtime_from_iso8601((*iter).date.c_str(), date)){ + S3FS_PRN_DBG("date format is not ISO 8601 for %s multipart uploading object, skip this.", tpath); + continue; + } + if(now_time <= (date + abort_time)){ + continue; + } + } + + if(0 != s3fscurl.AbortMultipartUpload(tpath, upload_id)){ + S3FS_PRN_EXIT("Failed to remove %s multipart uploading object.", tpath); + result = false; + }else{ + printf("Succeed to remove %s multipart uploading object.\n", tpath); + } + + // reset(initialize) curl object + s3fscurl.DestroyCurlHandle(); + } + return result; +} + +int s3fs_utility_processing(time_t abort_time) +{ + if(NO_UTILITY_MODE == utility_mode){ + return EXIT_FAILURE; + } + printf("\n*** s3fs run as utility mode.\n\n"); + + S3fsCurl s3fscurl; + string body; + int result = EXIT_SUCCESS; + if(0 != s3fscurl.MultipartListRequest(body)){ + S3FS_PRN_EXIT("Could not get list multipart upload.\nThere is no incomplete multipart uploaded object in bucket.\n"); + result = EXIT_FAILURE; + }else{ + // parse result(incomplete multipart upload information) + S3FS_PRN_DBG("response body = {\n%s\n}", body.c_str()); + + xmlDocPtr doc; + if(NULL == (doc = xmlReadMemory(body.c_str(), static_cast(body.size()), "", NULL, 0))){ + S3FS_PRN_DBG("xmlReadMemory exited with error."); + result = EXIT_FAILURE; + + }else{ + // make incomplete uploads list + incomp_mpu_list_t list; + if(!get_incomp_mpu_list(doc, list)){ + S3FS_PRN_DBG("get_incomp_mpu_list exited with error."); + result = EXIT_FAILURE; + + }else{ + if(INCOMP_TYPE_LIST == utility_mode){ + // print list + print_incomp_mpu_list(list); + }else if(INCOMP_TYPE_ABORT == utility_mode){ + // remove + if(!abort_incomp_mpu_list(list, abort_time)){ + S3FS_PRN_DBG("an error occurred during removal process."); + result = EXIT_FAILURE; + } + } + } + S3FS_XMLFREEDOC(doc); + } + } + + // ssl + s3fs_destroy_global_ssl(); + + return result; +} + +/* +* Local variables: +* tab-width: 4 +* c-basic-offset: 4 +* End: +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 +*/ diff --git a/src/mpu_util.h b/src/mpu_util.h new file mode 100644 index 0000000..3a6d199 --- /dev/null +++ b/src/mpu_util.h @@ -0,0 +1,64 @@ +/* + * s3fs - FUSE-based file system backed by Amazon S3 + * + * Copyright(C) 2007 Randy Rizun + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef S3FS_MPU_UTIL_H_ +#define S3FS_MPU_UTIL_H_ + +#include +#include + +//------------------------------------------------------------------- +// Structure / Typedef +//------------------------------------------------------------------- +typedef struct incomplete_multipart_upload_info +{ + std::string key; + std::string id; + std::string date; +}INCOMP_MPU_INFO; + +typedef std::list incomp_mpu_list_t; + +//------------------------------------------------------------------- +// enum for utility process mode +//------------------------------------------------------------------- +enum utility_incomp_type{ + NO_UTILITY_MODE = 0, // not utility mode + INCOMP_TYPE_LIST, // list of incomplete mpu + INCOMP_TYPE_ABORT // delete incomplete mpu +}; + +extern utility_incomp_type utility_mode; + +//------------------------------------------------------------------- +// Functions +//------------------------------------------------------------------- +int s3fs_utility_processing(time_t abort_time); + +#endif // S3FS_MPU_UTIL_H_ + +/* +* Local variables: +* tab-width: 4 +* c-basic-offset: 4 +* End: +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 +*/ diff --git a/src/mvnode.cpp b/src/mvnode.cpp new file mode 100644 index 0000000..f47a7c8 --- /dev/null +++ b/src/mvnode.cpp @@ -0,0 +1,142 @@ +/* + * s3fs - FUSE-based file system backed by Amazon S3 + * + * Copyright(C) 2007 Takeshi Nakatani + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include +#include + +#include "common.h" +#include "s3fs.h" +#include "mvnode.h" + +//------------------------------------------------------------------- +// Utility functions for moving objects +//------------------------------------------------------------------- +MVNODE *create_mvnode(const char *old_path, const char *new_path, bool is_dir, bool normdir) +{ + MVNODE *p; + char *p_old_path; + char *p_new_path; + + p = new MVNODE(); + + if(NULL == (p_old_path = strdup(old_path))){ + delete p; + printf("create_mvnode: could not allocation memory for p_old_path\n"); + S3FS_FUSE_EXIT(); + return NULL; + } + + if(NULL == (p_new_path = strdup(new_path))){ + delete p; + free(p_old_path); + printf("create_mvnode: could not allocation memory for p_new_path\n"); + S3FS_FUSE_EXIT(); + return NULL; + } + + p->old_path = p_old_path; + p->new_path = p_new_path; + p->is_dir = is_dir; + p->is_normdir = normdir; + p->prev = NULL; + p->next = NULL; + return p; +} + +// +// Add sorted MVNODE data(Ascending order) +// +MVNODE *add_mvnode(MVNODE** head, MVNODE** tail, const char *old_path, const char *new_path, bool is_dir, bool normdir) +{ + if(!head || !tail){ + return NULL; + } + + MVNODE* cur; + MVNODE* mvnew; + for(cur = *head; cur; cur = cur->next){ + if(cur->is_dir == is_dir){ + int nResult = strcmp(cur->old_path, old_path); + if(0 == nResult){ + // Found same old_path. + return cur; + + }else if(0 > nResult){ + // next check. + // ex: cur("abc"), mvnew("abcd") + // ex: cur("abc"), mvnew("abd") + continue; + + }else{ + // Add into before cur-pos. + // ex: cur("abc"), mvnew("ab") + // ex: cur("abc"), mvnew("abb") + if(NULL == (mvnew = create_mvnode(old_path, new_path, is_dir, normdir))){ + return NULL; + } + if(cur->prev){ + (cur->prev)->next = mvnew; + }else{ + *head = mvnew; + } + mvnew->prev = cur->prev; + mvnew->next = cur; + cur->prev = mvnew; + + return mvnew; + } + } + } + // Add into tail. + if(NULL == (mvnew = create_mvnode(old_path, new_path, is_dir, normdir))){ + return NULL; + } + mvnew->prev = (*tail); + if(*tail){ + (*tail)->next = mvnew; + } + (*tail) = mvnew; + if(!(*head)){ + (*head) = mvnew; + } + return mvnew; +} + +void free_mvnodes(MVNODE *head) +{ + MVNODE *my_head; + MVNODE *next; + + for(my_head = head, next = NULL; my_head; my_head = next){ + next = my_head->next; + free(my_head->old_path); + free(my_head->new_path); + delete my_head; + } +} + +/* +* Local variables: +* tab-width: 4 +* c-basic-offset: 4 +* End: +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 +*/ diff --git a/src/mvnode.h b/src/mvnode.h new file mode 100644 index 0000000..59f583b --- /dev/null +++ b/src/mvnode.h @@ -0,0 +1,53 @@ +/* + * s3fs - FUSE-based file system backed by Amazon S3 + * + * Copyright(C) 2007 Randy Rizun + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef S3FS_MVNODE_H_ +#define S3FS_MVNODE_H_ + +//------------------------------------------------------------------- +// Structure +//------------------------------------------------------------------- +typedef struct mvnode +{ + char* old_path; + char* new_path; + bool is_dir; + bool is_normdir; + struct mvnode* prev; + struct mvnode* next; +} MVNODE; + +//------------------------------------------------------------------- +// Utility functions for moving objects +//------------------------------------------------------------------- +MVNODE *create_mvnode(const char *old_path, const char *new_path, bool is_dir, bool normdir = false); +MVNODE *add_mvnode(MVNODE** head, MVNODE** tail, const char *old_path, const char *new_path, bool is_dir, bool normdir = false); +void free_mvnodes(MVNODE *head); + +#endif // S3FS_MVNODE_H_ + +/* +* Local variables: +* tab-width: 4 +* c-basic-offset: 4 +* End: +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 +*/ diff --git a/src/nss_auth.cpp b/src/nss_auth.cpp index e0c02ea..78c7549 100644 --- a/src/nss_auth.cpp +++ b/src/nss_auth.cpp @@ -35,6 +35,7 @@ #include #include "common.h" +#include "s3fs.h" #include "s3fs_auth.h" using namespace std; @@ -44,9 +45,9 @@ using namespace std; //------------------------------------------------------------------- const char* s3fs_crypt_lib_name() { - static const char version[] = "NSS"; + static const char version[] = "NSS"; - return version; + return version; } //------------------------------------------------------------------- @@ -54,21 +55,21 @@ const char* s3fs_crypt_lib_name() //------------------------------------------------------------------- bool s3fs_init_global_ssl() { - PR_Init(PR_USER_THREAD, PR_PRIORITY_NORMAL, 0); + PR_Init(PR_USER_THREAD, PR_PRIORITY_NORMAL, 0); - if(SECSuccess != NSS_NoDB_Init(NULL)){ - S3FS_PRN_ERR("Failed NSS_NoDB_Init call."); - return false; - } - return true; + if(SECSuccess != NSS_NoDB_Init(NULL)){ + S3FS_PRN_ERR("Failed NSS_NoDB_Init call."); + return false; + } + return true; } bool s3fs_destroy_global_ssl() { - NSS_Shutdown(); - PL_ArenaFinish(); - PR_Cleanup(); - return true; + NSS_Shutdown(); + PL_ArenaFinish(); + PR_Cleanup(); + return true; } //------------------------------------------------------------------- @@ -76,12 +77,12 @@ bool s3fs_destroy_global_ssl() //------------------------------------------------------------------- bool s3fs_init_crypt_mutex() { - return true; + return true; } bool s3fs_destroy_crypt_mutex() { - return true; + return true; } //------------------------------------------------------------------- @@ -89,58 +90,58 @@ bool s3fs_destroy_crypt_mutex() //------------------------------------------------------------------- static bool s3fs_HMAC_RAW(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen, bool is_sha256) { - if(!key || !data || !digest || !digestlen){ - return false; - } + if(!key || !data || !digest || !digestlen){ + return false; + } - PK11SlotInfo* Slot; - PK11SymKey* pKey; - PK11Context* Context; - unsigned char tmpdigest[64]; - SECItem KeySecItem = {siBuffer, reinterpret_cast(const_cast(key)), static_cast(keylen)}; - SECItem NullSecItem = {siBuffer, NULL, 0}; + PK11SlotInfo* Slot; + PK11SymKey* pKey; + PK11Context* Context; + unsigned char tmpdigest[64]; + SECItem KeySecItem = {siBuffer, reinterpret_cast(const_cast(key)), static_cast(keylen)}; + SECItem NullSecItem = {siBuffer, NULL, 0}; - if(NULL == (Slot = PK11_GetInternalKeySlot())){ - return false; - } - if(NULL == (pKey = PK11_ImportSymKey(Slot, (is_sha256 ? CKM_SHA256_HMAC : CKM_SHA_1_HMAC), PK11_OriginUnwrap, CKA_SIGN, &KeySecItem, NULL))){ - PK11_FreeSlot(Slot); - return false; - } - if(NULL == (Context = PK11_CreateContextBySymKey((is_sha256 ? CKM_SHA256_HMAC : CKM_SHA_1_HMAC), CKA_SIGN, pKey, &NullSecItem))){ - PK11_FreeSymKey(pKey); - PK11_FreeSlot(Slot); - return false; - } + if(NULL == (Slot = PK11_GetInternalKeySlot())){ + return false; + } + if(NULL == (pKey = PK11_ImportSymKey(Slot, (is_sha256 ? CKM_SHA256_HMAC : CKM_SHA_1_HMAC), PK11_OriginUnwrap, CKA_SIGN, &KeySecItem, NULL))){ + PK11_FreeSlot(Slot); + return false; + } + if(NULL == (Context = PK11_CreateContextBySymKey((is_sha256 ? CKM_SHA256_HMAC : CKM_SHA_1_HMAC), CKA_SIGN, pKey, &NullSecItem))){ + PK11_FreeSymKey(pKey); + PK11_FreeSlot(Slot); + return false; + } - *digestlen = 0; - if(SECSuccess != PK11_DigestBegin(Context) || - SECSuccess != PK11_DigestOp(Context, data, datalen) || - SECSuccess != PK11_DigestFinal(Context, tmpdigest, digestlen, sizeof(tmpdigest)) ) - { + *digestlen = 0; + if(SECSuccess != PK11_DigestBegin(Context) || + SECSuccess != PK11_DigestOp(Context, data, datalen) || + SECSuccess != PK11_DigestFinal(Context, tmpdigest, digestlen, sizeof(tmpdigest)) ) + { + PK11_DestroyContext(Context, PR_TRUE); + PK11_FreeSymKey(pKey); + PK11_FreeSlot(Slot); + return false; + } PK11_DestroyContext(Context, PR_TRUE); PK11_FreeSymKey(pKey); PK11_FreeSlot(Slot); - return false; - } - PK11_DestroyContext(Context, PR_TRUE); - PK11_FreeSymKey(pKey); - PK11_FreeSlot(Slot); - *digest = new unsigned char[*digestlen]; - memcpy(*digest, tmpdigest, *digestlen); + *digest = new unsigned char[*digestlen]; + memcpy(*digest, tmpdigest, *digestlen); - return true; + return true; } bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen) { - return s3fs_HMAC_RAW(key, keylen, data, datalen, digest, digestlen, false); + return s3fs_HMAC_RAW(key, keylen, data, datalen, digest, digestlen, false); } bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen) { - return s3fs_HMAC_RAW(key, keylen, data, datalen, digest, digestlen, true); + return s3fs_HMAC_RAW(key, keylen, data, datalen, digest, digestlen, true); } //------------------------------------------------------------------- @@ -148,48 +149,48 @@ bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, siz //------------------------------------------------------------------- size_t get_md5_digest_length() { - return MD5_LENGTH; + return MD5_LENGTH; } unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size) { - PK11Context* md5ctx; - unsigned char buf[512]; - ssize_t bytes; - unsigned char* result; - unsigned int md5outlen; + PK11Context* md5ctx; + unsigned char buf[512]; + ssize_t bytes; + unsigned char* result; + unsigned int md5outlen; - if(-1 == size){ - struct stat st; - if(-1 == fstat(fd, &st)){ - return NULL; + if(-1 == size){ + struct stat st; + if(-1 == fstat(fd, &st)){ + return NULL; + } + size = static_cast(st.st_size); } - size = static_cast(st.st_size); - } - memset(buf, 0, 512); - md5ctx = PK11_CreateDigestContext(SEC_OID_MD5); - - for(ssize_t total = 0; total < size; total += bytes){ - bytes = 512 < (size - total) ? 512 : (size - total); - bytes = pread(fd, buf, bytes, start + total); - if(0 == bytes){ - // end of file - break; - }else if(-1 == bytes){ - // error - S3FS_PRN_ERR("file read error(%d)", errno); - PK11_DestroyContext(md5ctx, PR_TRUE); - return NULL; - } - PK11_DigestOp(md5ctx, buf, bytes); memset(buf, 0, 512); - } - result = new unsigned char[get_md5_digest_length()]; - PK11_DigestFinal(md5ctx, result, &md5outlen, get_md5_digest_length()); - PK11_DestroyContext(md5ctx, PR_TRUE); + md5ctx = PK11_CreateDigestContext(SEC_OID_MD5); - return result; + for(ssize_t total = 0; total < size; total += bytes){ + bytes = 512 < (size - total) ? 512 : (size - total); + bytes = pread(fd, buf, bytes, start + total); + if(0 == bytes){ + // end of file + break; + }else if(-1 == bytes){ + // error + S3FS_PRN_ERR("file read error(%d)", errno); + PK11_DestroyContext(md5ctx, PR_TRUE); + return NULL; + } + PK11_DigestOp(md5ctx, buf, bytes); + memset(buf, 0, 512); + } + result = new unsigned char[get_md5_digest_length()]; + PK11_DigestFinal(md5ctx, result, &md5outlen, get_md5_digest_length()); + PK11_DestroyContext(md5ctx, PR_TRUE); + + return result; } //------------------------------------------------------------------- @@ -197,72 +198,72 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size) //------------------------------------------------------------------- size_t get_sha256_digest_length() { - return SHA256_LENGTH; + return SHA256_LENGTH; } bool s3fs_sha256(const unsigned char* data, unsigned int datalen, unsigned char** digest, unsigned int* digestlen) { - (*digestlen) = static_cast(get_sha256_digest_length()); - *digest = new unsigned char[*digestlen]; + (*digestlen) = static_cast(get_sha256_digest_length()); + *digest = new unsigned char[*digestlen]; - PK11Context* sha256ctx; - unsigned int sha256outlen; - sha256ctx = PK11_CreateDigestContext(SEC_OID_SHA256); + PK11Context* sha256ctx; + unsigned int sha256outlen; + sha256ctx = PK11_CreateDigestContext(SEC_OID_SHA256); - PK11_DigestOp(sha256ctx, data, datalen); - PK11_DigestFinal(sha256ctx, *digest, &sha256outlen, *digestlen); - PK11_DestroyContext(sha256ctx, PR_TRUE); - *digestlen = sha256outlen; + PK11_DigestOp(sha256ctx, data, datalen); + PK11_DigestFinal(sha256ctx, *digest, &sha256outlen, *digestlen); + PK11_DestroyContext(sha256ctx, PR_TRUE); + *digestlen = sha256outlen; - return true; + return true; } unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size) { - PK11Context* sha256ctx; - unsigned char buf[512]; - ssize_t bytes; - unsigned char* result; - unsigned int sha256outlen; + PK11Context* sha256ctx; + unsigned char buf[512]; + ssize_t bytes; + unsigned char* result; + unsigned int sha256outlen; - if(-1 == size){ - struct stat st; - if(-1 == fstat(fd, &st)){ - return NULL; + if(-1 == size){ + struct stat st; + if(-1 == fstat(fd, &st)){ + return NULL; + } + size = static_cast(st.st_size); } - size = static_cast(st.st_size); - } - memset(buf, 0, 512); - sha256ctx = PK11_CreateDigestContext(SEC_OID_SHA256); - - for(ssize_t total = 0; total < size; total += bytes){ - bytes = 512 < (size - total) ? 512 : (size - total); - bytes = pread(fd, buf, bytes, start + total); - if(0 == bytes){ - // end of file - break; - }else if(-1 == bytes){ - // error - S3FS_PRN_ERR("file read error(%d)", errno); - PK11_DestroyContext(sha256ctx, PR_TRUE); - return NULL; - } - PK11_DigestOp(sha256ctx, buf, bytes); memset(buf, 0, 512); - } - result = new unsigned char[get_sha256_digest_length()]; - PK11_DigestFinal(sha256ctx, result, &sha256outlen, get_sha256_digest_length()); - PK11_DestroyContext(sha256ctx, PR_TRUE); + sha256ctx = PK11_CreateDigestContext(SEC_OID_SHA256); - return result; + for(ssize_t total = 0; total < size; total += bytes){ + bytes = 512 < (size - total) ? 512 : (size - total); + bytes = pread(fd, buf, bytes, start + total); + if(0 == bytes){ + // end of file + break; + }else if(-1 == bytes){ + // error + S3FS_PRN_ERR("file read error(%d)", errno); + PK11_DestroyContext(sha256ctx, PR_TRUE); + return NULL; + } + PK11_DigestOp(sha256ctx, buf, bytes); + memset(buf, 0, 512); + } + result = new unsigned char[get_sha256_digest_length()]; + PK11_DigestFinal(sha256ctx, result, &sha256outlen, get_sha256_digest_length()); + PK11_DestroyContext(sha256ctx, PR_TRUE); + + return result; } /* * Local variables: -* tab-width: 2 -* c-basic-offset: 2 +* tab-width: 4 +* c-basic-offset: 4 * End: -* vim600: expandtab sw=2 ts=2 fdm=marker -* vim<600: expandtab sw=2 ts=2 +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 */ diff --git a/src/openssl_auth.cpp b/src/openssl_auth.cpp index 35cb867..b1164de 100644 --- a/src/openssl_auth.cpp +++ b/src/openssl_auth.cpp @@ -39,6 +39,7 @@ #include #include "common.h" +#include "s3fs.h" #include "s3fs_auth.h" using namespace std; @@ -48,9 +49,9 @@ using namespace std; //------------------------------------------------------------------- const char* s3fs_crypt_lib_name() { - static const char version[] = "OpenSSL"; + static const char version[] = "OpenSSL"; - return version; + return version; } //------------------------------------------------------------------- @@ -58,17 +59,17 @@ const char* s3fs_crypt_lib_name() //------------------------------------------------------------------- bool s3fs_init_global_ssl() { - ERR_load_crypto_strings(); - ERR_load_BIO_strings(); - OpenSSL_add_all_algorithms(); - return true; + ERR_load_crypto_strings(); + ERR_load_BIO_strings(); + OpenSSL_add_all_algorithms(); + return true; } bool s3fs_destroy_global_ssl() { - EVP_cleanup(); - ERR_free_strings(); - return true; + EVP_cleanup(); + ERR_free_strings(); + return true; } //------------------------------------------------------------------- @@ -77,7 +78,7 @@ bool s3fs_destroy_global_ssl() // internal use struct for openssl struct CRYPTO_dynlock_value { - pthread_mutex_t dyn_mutex; + pthread_mutex_t dyn_mutex; }; static pthread_mutex_t* s3fs_crypt_mutex = NULL; @@ -85,136 +86,136 @@ static pthread_mutex_t* s3fs_crypt_mutex = NULL; static void s3fs_crypt_mutex_lock(int mode, int pos, const char* file, int line) __attribute__ ((unused)); static void s3fs_crypt_mutex_lock(int mode, int pos, const char* file, int line) { - if(s3fs_crypt_mutex){ - int res; - if(mode & CRYPTO_LOCK){ - if(0 != (res = pthread_mutex_lock(&s3fs_crypt_mutex[pos]))){ - S3FS_PRN_CRIT("pthread_mutex_lock returned: %d", res); - abort(); - } - }else{ - if(0 != (res = pthread_mutex_unlock(&s3fs_crypt_mutex[pos]))){ - S3FS_PRN_CRIT("pthread_mutex_unlock returned: %d", res); - abort(); - } + if(s3fs_crypt_mutex){ + int res; + if(mode & CRYPTO_LOCK){ + if(0 != (res = pthread_mutex_lock(&s3fs_crypt_mutex[pos]))){ + S3FS_PRN_CRIT("pthread_mutex_lock returned: %d", res); + abort(); + } + }else{ + if(0 != (res = pthread_mutex_unlock(&s3fs_crypt_mutex[pos]))){ + S3FS_PRN_CRIT("pthread_mutex_unlock returned: %d", res); + abort(); + } + } } - } } static unsigned long s3fs_crypt_get_threadid() __attribute__ ((unused)); static unsigned long s3fs_crypt_get_threadid() { - // For FreeBSD etc, some system's pthread_t is structure pointer. - // Then we use cast like C style(not C++) instead of ifdef. - return (unsigned long)(pthread_self()); + // For FreeBSD etc, some system's pthread_t is structure pointer. + // Then we use cast like C style(not C++) instead of ifdef. + return (unsigned long)(pthread_self()); } static struct CRYPTO_dynlock_value* s3fs_dyn_crypt_mutex(const char* file, int line) __attribute__ ((unused)); static struct CRYPTO_dynlock_value* s3fs_dyn_crypt_mutex(const char* file, int line) { - struct CRYPTO_dynlock_value* dyndata = new CRYPTO_dynlock_value(); - pthread_mutexattr_t attr; - pthread_mutexattr_init(&attr); + struct CRYPTO_dynlock_value* dyndata = new CRYPTO_dynlock_value(); + pthread_mutexattr_t attr; + pthread_mutexattr_init(&attr); #if S3FS_PTHREAD_ERRORCHECK - pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK); + pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK); #endif - int res; - if(0 != (res = pthread_mutex_init(&(dyndata->dyn_mutex), &attr))){ - S3FS_PRN_CRIT("pthread_mutex_init returned: %d", res); - return NULL; - } - return dyndata; + int res; + if(0 != (res = pthread_mutex_init(&(dyndata->dyn_mutex), &attr))){ + S3FS_PRN_CRIT("pthread_mutex_init returned: %d", res); + return NULL; + } + return dyndata; } static void s3fs_dyn_crypt_mutex_lock(int mode, struct CRYPTO_dynlock_value* dyndata, const char* file, int line) __attribute__ ((unused)); static void s3fs_dyn_crypt_mutex_lock(int mode, struct CRYPTO_dynlock_value* dyndata, const char* file, int line) { - if(dyndata){ - int res; - if(mode & CRYPTO_LOCK){ - if(0 != (res = pthread_mutex_lock(&(dyndata->dyn_mutex)))){ - S3FS_PRN_CRIT("pthread_mutex_lock returned: %d", res); - abort(); - } - }else{ - if(0 != (res = pthread_mutex_unlock(&(dyndata->dyn_mutex)))){ - S3FS_PRN_CRIT("pthread_mutex_unlock returned: %d", res); - abort(); - } + if(dyndata){ + int res; + if(mode & CRYPTO_LOCK){ + if(0 != (res = pthread_mutex_lock(&(dyndata->dyn_mutex)))){ + S3FS_PRN_CRIT("pthread_mutex_lock returned: %d", res); + abort(); + } + }else{ + if(0 != (res = pthread_mutex_unlock(&(dyndata->dyn_mutex)))){ + S3FS_PRN_CRIT("pthread_mutex_unlock returned: %d", res); + abort(); + } + } } - } } static void s3fs_destroy_dyn_crypt_mutex(struct CRYPTO_dynlock_value* dyndata, const char* file, int line) __attribute__ ((unused)); static void s3fs_destroy_dyn_crypt_mutex(struct CRYPTO_dynlock_value* dyndata, const char* file, int line) { - if(dyndata){ - int res = pthread_mutex_destroy(&(dyndata->dyn_mutex)); - if(res != 0){ - S3FS_PRN_CRIT("failed to destroy dyn_mutex"); - abort(); + if(dyndata){ + int res = pthread_mutex_destroy(&(dyndata->dyn_mutex)); + if(res != 0){ + S3FS_PRN_CRIT("failed to destroy dyn_mutex"); + abort(); + } + delete dyndata; } - delete dyndata; - } } bool s3fs_init_crypt_mutex() { - if(s3fs_crypt_mutex){ - S3FS_PRN_DBG("s3fs_crypt_mutex is not NULL, destroy it."); - if(!s3fs_destroy_crypt_mutex()){ - S3FS_PRN_ERR("Failed to s3fs_crypt_mutex"); - return false; + if(s3fs_crypt_mutex){ + S3FS_PRN_DBG("s3fs_crypt_mutex is not NULL, destroy it."); + if(!s3fs_destroy_crypt_mutex()){ + S3FS_PRN_ERR("Failed to s3fs_crypt_mutex"); + return false; + } } - } - s3fs_crypt_mutex = new pthread_mutex_t[CRYPTO_num_locks()]; - pthread_mutexattr_t attr; - pthread_mutexattr_init(&attr); + s3fs_crypt_mutex = new pthread_mutex_t[CRYPTO_num_locks()]; + pthread_mutexattr_t attr; + pthread_mutexattr_init(&attr); #if S3FS_PTHREAD_ERRORCHECK - pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK); + pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK); #endif - for(int cnt = 0; cnt < CRYPTO_num_locks(); cnt++){ - int res = pthread_mutex_init(&s3fs_crypt_mutex[cnt], &attr); - if(res != 0){ - S3FS_PRN_CRIT("pthread_mutex_init returned: %d", res); - return false; + for(int cnt = 0; cnt < CRYPTO_num_locks(); cnt++){ + int res = pthread_mutex_init(&s3fs_crypt_mutex[cnt], &attr); + if(res != 0){ + S3FS_PRN_CRIT("pthread_mutex_init returned: %d", res); + return false; + } } - } - // static lock - CRYPTO_set_locking_callback(s3fs_crypt_mutex_lock); - CRYPTO_set_id_callback(s3fs_crypt_get_threadid); - // dynamic lock - CRYPTO_set_dynlock_create_callback(s3fs_dyn_crypt_mutex); - CRYPTO_set_dynlock_lock_callback(s3fs_dyn_crypt_mutex_lock); - CRYPTO_set_dynlock_destroy_callback(s3fs_destroy_dyn_crypt_mutex); + // static lock + CRYPTO_set_locking_callback(s3fs_crypt_mutex_lock); + CRYPTO_set_id_callback(s3fs_crypt_get_threadid); + // dynamic lock + CRYPTO_set_dynlock_create_callback(s3fs_dyn_crypt_mutex); + CRYPTO_set_dynlock_lock_callback(s3fs_dyn_crypt_mutex_lock); + CRYPTO_set_dynlock_destroy_callback(s3fs_destroy_dyn_crypt_mutex); - return true; + return true; } bool s3fs_destroy_crypt_mutex() { - if(!s3fs_crypt_mutex){ - return true; - } - - CRYPTO_set_dynlock_destroy_callback(NULL); - CRYPTO_set_dynlock_lock_callback(NULL); - CRYPTO_set_dynlock_create_callback(NULL); - CRYPTO_set_id_callback(NULL); - CRYPTO_set_locking_callback(NULL); - - for(int cnt = 0; cnt < CRYPTO_num_locks(); cnt++){ - int res = pthread_mutex_destroy(&s3fs_crypt_mutex[cnt]); - if(res != 0){ - S3FS_PRN_CRIT("failed to destroy s3fs_crypt_mutex[%d]", cnt); - abort(); + if(!s3fs_crypt_mutex){ + return true; } - } - CRYPTO_cleanup_all_ex_data(); - delete[] s3fs_crypt_mutex; - s3fs_crypt_mutex = NULL; - return true; + CRYPTO_set_dynlock_destroy_callback(NULL); + CRYPTO_set_dynlock_lock_callback(NULL); + CRYPTO_set_dynlock_create_callback(NULL); + CRYPTO_set_id_callback(NULL); + CRYPTO_set_locking_callback(NULL); + + for(int cnt = 0; cnt < CRYPTO_num_locks(); cnt++){ + int res = pthread_mutex_destroy(&s3fs_crypt_mutex[cnt]); + if(res != 0){ + S3FS_PRN_CRIT("failed to destroy s3fs_crypt_mutex[%d]", cnt); + abort(); + } + } + CRYPTO_cleanup_all_ex_data(); + delete[] s3fs_crypt_mutex; + s3fs_crypt_mutex = NULL; + + return true; } //------------------------------------------------------------------- @@ -222,28 +223,28 @@ bool s3fs_destroy_crypt_mutex() //------------------------------------------------------------------- static bool s3fs_HMAC_RAW(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen, bool is_sha256) { - if(!key || !data || !digest || !digestlen){ - return false; - } - (*digestlen) = EVP_MAX_MD_SIZE * sizeof(unsigned char); - *digest = new unsigned char[*digestlen]; - if(is_sha256){ - HMAC(EVP_sha256(), key, keylen, data, datalen, *digest, digestlen); - }else{ - HMAC(EVP_sha1(), key, keylen, data, datalen, *digest, digestlen); - } + if(!key || !data || !digest || !digestlen){ + return false; + } + (*digestlen) = EVP_MAX_MD_SIZE * sizeof(unsigned char); + *digest = new unsigned char[*digestlen]; + if(is_sha256){ + HMAC(EVP_sha256(), key, keylen, data, datalen, *digest, digestlen); + }else{ + HMAC(EVP_sha1(), key, keylen, data, datalen, *digest, digestlen); + } - return true; + return true; } bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen) { - return s3fs_HMAC_RAW(key, keylen, data, datalen, digest, digestlen, false); + return s3fs_HMAC_RAW(key, keylen, data, datalen, digest, digestlen, false); } bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen) { - return s3fs_HMAC_RAW(key, keylen, data, datalen, digest, digestlen, true); + return s3fs_HMAC_RAW(key, keylen, data, datalen, digest, digestlen, true); } //------------------------------------------------------------------- @@ -251,46 +252,46 @@ bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, siz //------------------------------------------------------------------- size_t get_md5_digest_length() { - return MD5_DIGEST_LENGTH; + return MD5_DIGEST_LENGTH; } unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size) { - MD5_CTX md5ctx; - char buf[512]; - ssize_t bytes; - unsigned char* result; + MD5_CTX md5ctx; + char buf[512]; + ssize_t bytes; + unsigned char* result; - if(-1 == size){ - struct stat st; - if(-1 == fstat(fd, &st)){ - return NULL; + if(-1 == size){ + struct stat st; + if(-1 == fstat(fd, &st)){ + return NULL; + } + size = static_cast(st.st_size); } - size = static_cast(st.st_size); - } - memset(buf, 0, 512); - MD5_Init(&md5ctx); - - for(ssize_t total = 0; total < size; total += bytes){ - bytes = 512 < (size - total) ? 512 : (size - total); - bytes = pread(fd, buf, bytes, start + total); - if(0 == bytes){ - // end of file - break; - }else if(-1 == bytes){ - // error - S3FS_PRN_ERR("file read error(%d)", errno); - return NULL; - } - MD5_Update(&md5ctx, buf, bytes); memset(buf, 0, 512); - } + MD5_Init(&md5ctx); - result = new unsigned char[get_md5_digest_length()]; - MD5_Final(result, &md5ctx); + for(ssize_t total = 0; total < size; total += bytes){ + bytes = 512 < (size - total) ? 512 : (size - total); + bytes = pread(fd, buf, bytes, start + total); + if(0 == bytes){ + // end of file + break; + }else if(-1 == bytes){ + // error + S3FS_PRN_ERR("file read error(%d)", errno); + return NULL; + } + MD5_Update(&md5ctx, buf, bytes); + memset(buf, 0, 512); + } - return result; + result = new unsigned char[get_md5_digest_length()]; + MD5_Final(result, &md5ctx); + + return result; } //------------------------------------------------------------------- @@ -298,71 +299,71 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size) //------------------------------------------------------------------- size_t get_sha256_digest_length() { - return SHA256_DIGEST_LENGTH; + return SHA256_DIGEST_LENGTH; } bool s3fs_sha256(const unsigned char* data, unsigned int datalen, unsigned char** digest, unsigned int* digestlen) { - (*digestlen) = EVP_MAX_MD_SIZE * sizeof(unsigned char); - *digest = new unsigned char[*digestlen]; + (*digestlen) = EVP_MAX_MD_SIZE * sizeof(unsigned char); + *digest = new unsigned char[*digestlen]; - const EVP_MD* md = EVP_get_digestbyname("sha256"); - EVP_MD_CTX* mdctx = EVP_MD_CTX_create(); - EVP_DigestInit_ex(mdctx, md, NULL); - EVP_DigestUpdate(mdctx, data, datalen); - EVP_DigestFinal_ex(mdctx, *digest, digestlen); - EVP_MD_CTX_destroy(mdctx); + const EVP_MD* md = EVP_get_digestbyname("sha256"); + EVP_MD_CTX* mdctx = EVP_MD_CTX_create(); + EVP_DigestInit_ex(mdctx, md, NULL); + EVP_DigestUpdate(mdctx, data, datalen); + EVP_DigestFinal_ex(mdctx, *digest, digestlen); + EVP_MD_CTX_destroy(mdctx); - return true; + return true; } unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size) { - const EVP_MD* md = EVP_get_digestbyname("sha256"); - EVP_MD_CTX* sha256ctx; - char buf[512]; - ssize_t bytes; - unsigned char* result; + const EVP_MD* md = EVP_get_digestbyname("sha256"); + EVP_MD_CTX* sha256ctx; + char buf[512]; + ssize_t bytes; + unsigned char* result; - if(-1 == size){ - struct stat st; - if(-1 == fstat(fd, &st)){ - return NULL; + if(-1 == size){ + struct stat st; + if(-1 == fstat(fd, &st)){ + return NULL; + } + size = static_cast(st.st_size); } - size = static_cast(st.st_size); - } - sha256ctx = EVP_MD_CTX_create(); - EVP_DigestInit_ex(sha256ctx, md, NULL); + sha256ctx = EVP_MD_CTX_create(); + EVP_DigestInit_ex(sha256ctx, md, NULL); - memset(buf, 0, 512); - for(ssize_t total = 0; total < size; total += bytes){ - bytes = 512 < (size - total) ? 512 : (size - total); - bytes = pread(fd, buf, bytes, start + total); - if(0 == bytes){ - // end of file - break; - }else if(-1 == bytes){ - // error - S3FS_PRN_ERR("file read error(%d)", errno); - EVP_MD_CTX_destroy(sha256ctx); - return NULL; - } - EVP_DigestUpdate(sha256ctx, buf, bytes); memset(buf, 0, 512); - } - result = new unsigned char[get_sha256_digest_length()]; - EVP_DigestFinal_ex(sha256ctx, result, NULL); - EVP_MD_CTX_destroy(sha256ctx); + for(ssize_t total = 0; total < size; total += bytes){ + bytes = 512 < (size - total) ? 512 : (size - total); + bytes = pread(fd, buf, bytes, start + total); + if(0 == bytes){ + // end of file + break; + }else if(-1 == bytes){ + // error + S3FS_PRN_ERR("file read error(%d)", errno); + EVP_MD_CTX_destroy(sha256ctx); + return NULL; + } + EVP_DigestUpdate(sha256ctx, buf, bytes); + memset(buf, 0, 512); + } + result = new unsigned char[get_sha256_digest_length()]; + EVP_DigestFinal_ex(sha256ctx, result, NULL); + EVP_MD_CTX_destroy(sha256ctx); - return result; + return result; } /* * Local variables: -* tab-width: 2 -* c-basic-offset: 2 +* tab-width: 4 +* c-basic-offset: 4 * End: -* vim600: expandtab sw=2 ts=2 fdm=marker -* vim<600: expandtab sw=2 ts=2 +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 */ diff --git a/src/psemaphore.h b/src/psemaphore.h index bbc8af8..402571a 100644 --- a/src/psemaphore.h +++ b/src/psemaphore.h @@ -21,29 +21,33 @@ #ifndef S3FS_SEMAPHORE_H_ #define S3FS_SEMAPHORE_H_ +//------------------------------------------------------------------- +// Class Semaphore +//------------------------------------------------------------------- // portability wrapper for sem_t since macOS does not implement it - #ifdef __APPLE__ #include class Semaphore { - public: - explicit Semaphore(int value) : value(value), sem(dispatch_semaphore_create(value)) {} - ~Semaphore() { - // macOS cannot destroy a semaphore with posts less than the initializer - for(int i = 0; i < get_value(); ++i){ - post(); - } - dispatch_release(sem); - } - void wait() { dispatch_semaphore_wait(sem, DISPATCH_TIME_FOREVER); } - void post() { dispatch_semaphore_signal(sem); } - int get_value() const { return value; } - private: - const int value; - dispatch_semaphore_t sem; + public: + explicit Semaphore(int value) : value(value), sem(dispatch_semaphore_create(value)) {} + ~Semaphore() + { + // macOS cannot destroy a semaphore with posts less than the initializer + for(int i = 0; i < get_value(); ++i){ + post(); + } + dispatch_release(sem); + } + void wait() { dispatch_semaphore_wait(sem, DISPATCH_TIME_FOREVER); } + void post() { dispatch_semaphore_signal(sem); } + int get_value() const { return value; } + + private: + const int value; + dispatch_semaphore_t sem; }; #else @@ -53,31 +57,33 @@ class Semaphore class Semaphore { - public: - explicit Semaphore(int value) : value(value) { sem_init(&mutex, 0, value); } - ~Semaphore() { sem_destroy(&mutex); } - void wait() - { - int r; - do { - r = sem_wait(&mutex); - } while (r == -1 && errno == EINTR); - } - void post() { sem_post(&mutex); } - int get_value() const { return value; } - private: - const int value; - sem_t mutex; + public: + explicit Semaphore(int value) : value(value) { sem_init(&mutex, 0, value); } + ~Semaphore() { sem_destroy(&mutex); } + void wait() + { + int r; + do { + r = sem_wait(&mutex); + } while (r == -1 && errno == EINTR); + } + void post() { sem_post(&mutex); } + int get_value() const { return value; } + + private: + const int value; + sem_t mutex; }; #endif #endif // S3FS_SEMAPHORE_H_ + /* * Local variables: -* tab-width: 2 -* c-basic-offset: 2 +* tab-width: 4 +* c-basic-offset: 4 * End: -* vim600: expandtab sw=2 ts=2 fdm=marker -* vim<600: expandtab sw=2 ts=2 +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 */ diff --git a/src/s3fs.cpp b/src/s3fs.cpp index d892f66..60d8bec 100644 --- a/src/s3fs.cpp +++ b/src/s3fs.cpp @@ -20,99 +20,49 @@ #include #include -#include #include -#include #include -#include -#include -#include -#include -#include -#include -#include #include -#include +#include #include -#include #include -#include -#include -#include -#include -#include #include "common.h" #include "s3fs.h" -#include "curl.h" -#include "cache.h" -#include "string_util.h" -#include "s3fs_util.h" +#include "metaheader.h" #include "fdcache.h" -#include "s3fs_auth.h" +#include "curl.h" +#include "curl_multi.h" +#include "s3objlist.h" +#include "cache.h" +#include "mvnode.h" #include "addhead.h" #include "sighandlers.h" +#include "s3fs_xml.h" +#include "s3fs_util.h" +#include "string_util.h" +#include "s3fs_auth.h" +#include "s3fs_help.h" +#include "mpu_util.h" using namespace std; //------------------------------------------------------------------- -// Define +// Symbols //------------------------------------------------------------------- -enum dirtype { - DIRTYPE_UNKNOWN = -1, - DIRTYPE_NEW = 0, - DIRTYPE_OLD = 1, - DIRTYPE_FOLDER = 2, - DIRTYPE_NOOBJ = 3, -}; - -static bool IS_REPLACEDIR(dirtype type) { return DIRTYPE_OLD == type || DIRTYPE_FOLDER == type || DIRTYPE_NOOBJ == type; } -static bool IS_RMTYPEDIR(dirtype type) { return DIRTYPE_OLD == type || DIRTYPE_FOLDER == type; } - #if !defined(ENOATTR) -#define ENOATTR ENODATA +#define ENOATTR ENODATA #endif -// -// Type of utility process mode -// -enum utility_incomp_type{ - NO_UTILITY_MODE = 0, // not utility mode - INCOMP_TYPE_LIST, // list of incomplete mpu - INCOMP_TYPE_ABORT // delete incomplete mpu +enum dirtype { + DIRTYPE_UNKNOWN = -1, + DIRTYPE_NEW = 0, + DIRTYPE_OLD = 1, + DIRTYPE_FOLDER = 2, + DIRTYPE_NOOBJ = 3, }; -//------------------------------------------------------------------- -// Structs -//------------------------------------------------------------------- -typedef struct incomplete_multipart_upload_info{ - string key; - string id; - string date; -}INCOMP_MPU_INFO; - -typedef std::list incomp_mpu_list_t; -typedef std::list readline_t; -typedef std::map kvmap_t; -typedef std::map bucketkvmap_t; - -//------------------------------------------------------------------- -// Global variables -//------------------------------------------------------------------- -bool foreground = false; -bool nomultipart = false; -bool pathrequeststyle = false; -bool complement_stat = false; -std::string program_name; -std::string service_path = "/"; -std::string host = "https://s3.amazonaws.com"; -std::string bucket; -std::string endpoint = "us-east-1"; -std::string cipher_suites; -std::string instance_name; -std::string aws_profile = "default"; - //------------------------------------------------------------------- // Static variables //------------------------------------------------------------------- @@ -124,8 +74,6 @@ static bool is_mp_umask = false;// default does not set. static std::string mountpoint; static std::string passwd_file; static std::string mimetype_file; -static utility_incomp_type utility_mode = NO_UTILITY_MODE; -static bool noxmlns = false; static bool nocopyapi = false; static bool norenameapi = false; static bool nonempty = false; @@ -155,6 +103,11 @@ static const std::string keyval_fields_type = "\t"; // special key for static const std::string aws_accesskeyid = "AWSAccessKeyId"; static const std::string aws_secretkey = "AWSSecretKey"; +//------------------------------------------------------------------- +// Global functions : prototype +//------------------------------------------------------------------- +int put_headers(const char* path, headers_t& meta, bool is_copy); // [NOTE] global function because this is called from FdEntity class + //------------------------------------------------------------------- // Static functions : prototype //------------------------------------------------------------------- @@ -171,16 +124,6 @@ static S3fsCurl* multi_head_retry_callback(S3fsCurl* s3fscurl); static int readdir_multi_head(const char* path, const S3ObjList& head, void* buf, fuse_fill_dir_t filler); static int list_bucket(const char* path, S3ObjList& head, const char* delimiter, bool check_content_only = false); static int directory_empty(const char* path); -static bool is_truncated(xmlDocPtr doc); -static int append_objects_from_xml_ex(const char* path, xmlDocPtr doc, xmlXPathContextPtr ctx, - const char* ex_contents, const char* ex_key, const char* ex_etag, int isCPrefix, S3ObjList& head); -static int append_objects_from_xml(const char* path, xmlDocPtr doc, S3ObjList& head); -static bool GetXmlNsUrl(xmlDocPtr doc, string& nsurl); -static xmlChar* get_base_exp(xmlDocPtr doc, const char* exp); -static xmlChar* get_prefix(xmlDocPtr doc); -static xmlChar* get_next_marker(xmlDocPtr doc); -static char* get_object_name(xmlDocPtr doc, xmlNodePtr node, const char* path); -int put_headers(const char* path, headers_t& meta, bool is_copy); // [NOTE] global function because this is called from FdEntity class static int rename_large_object(const char* from, const char* to); static int create_file_object(const char* path, mode_t mode, uid_t uid, gid_t gid); static int create_directory_object(const char* path, mode_t mode, time_t time, uid_t uid, gid_t gid); @@ -189,15 +132,10 @@ static int rename_object_nocopy(const char* from, const char* to); static int clone_directory_object(const char* from, const char* to); static int rename_directory(const char* from, const char* to); static int remote_mountpath_exists(const char* path); -static xmlChar* get_exp_value_xml(xmlDocPtr doc, xmlXPathContextPtr ctx, const char* exp_key); -static void print_incomp_mpu_list(incomp_mpu_list_t& list); -static bool abort_incomp_mpu_list(incomp_mpu_list_t& list, time_t abort_time); -static bool get_incomp_mpu_list(xmlDocPtr doc, incomp_mpu_list_t& list); static void free_xattrs(xattrs_t& xattrs); static bool parse_xattr_keyval(const std::string& xattrpair, string& key, PXATTRVAL& pval); static size_t parse_xattrs(const std::string& strxattrs, xattrs_t& xattrs); static std::string build_xattrs(const xattrs_t& xattrs); -static int s3fs_utility_processing(time_t abort_time); static int s3fs_check_service(); static int parse_passwd_file(bucketkvmap_t& resmap); static int check_for_aws_format(const kvmap_t& kvmap); @@ -209,7 +147,9 @@ static bool set_mountpoint_attribute(struct stat& mpst); static int set_bucket(const char* arg); static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_args* outargs); +//------------------------------------------------------------------- // fuse interface functions +//------------------------------------------------------------------- static int s3fs_getattr(const char* path, struct stat* stbuf); static int s3fs_readlink(const char* path, char* buf, size_t size); static int s3fs_mknod(const char* path, mode_t mode, dev_t rdev); @@ -249,48 +189,47 @@ static int s3fs_getxattr(const char* path, const char* name, char* value, size_t static int s3fs_listxattr(const char* path, char* list, size_t size); static int s3fs_removexattr(const char* path, const char* name); -//------------------------------------------------------------------- -// WTF8 macros -//------------------------------------------------------------------- -#define WTF8_ENCODE(ARG) \ - std::string ARG##_buf; \ - const char * ARG = _##ARG; \ - if (use_wtf8 && s3fs_wtf8_encode( _##ARG, 0 )) { \ - s3fs_wtf8_encode( _##ARG, &ARG##_buf); \ - ARG = ARG##_buf.c_str(); \ - } - //------------------------------------------------------------------- // Functions //------------------------------------------------------------------- +static bool IS_REPLACEDIR(dirtype type) +{ + return DIRTYPE_OLD == type || DIRTYPE_FOLDER == type || DIRTYPE_NOOBJ == type; +} + +static bool IS_RMTYPEDIR(dirtype type) +{ + return DIRTYPE_OLD == type || DIRTYPE_FOLDER == type; +} + static bool is_special_name_folder_object(const char* path) { - if(!support_compat_dir){ - // s3fs does not support compatibility directory type("_$folder$" etc) now, - // thus always returns false. - return false; - } - - if(!path || '\0' == path[0]){ - return false; - } - - string strpath = path; - headers_t header; - - if(string::npos == strpath.find("_$folder$", 0)){ - if('/' == strpath[strpath.length() - 1]){ - strpath = strpath.substr(0, strpath.length() - 1); + if(!support_compat_dir){ + // s3fs does not support compatibility directory type("_$folder$" etc) now, + // thus always returns false. + return false; } - strpath += "_$folder$"; - } - S3fsCurl s3fscurl; - if(0 != s3fscurl.HeadRequest(strpath.c_str(), header)){ - return false; - } - header.clear(); - S3FS_MALLOCTRIM(0); - return true; + + if(!path || '\0' == path[0]){ + return false; + } + + string strpath = path; + headers_t header; + + if(string::npos == strpath.find("_$folder$", 0)){ + if('/' == strpath[strpath.length() - 1]){ + strpath = strpath.substr(0, strpath.length() - 1); + } + strpath += "_$folder$"; + } + S3fsCurl s3fscurl; + if(0 != s3fscurl.HeadRequest(strpath.c_str(), header)){ + return false; + } + header.clear(); + S3FS_MALLOCTRIM(0); + return true; } // [Detail] @@ -307,95 +246,95 @@ static bool is_special_name_folder_object(const char* path) // static int chk_dir_object_type(const char* path, string& newpath, string& nowpath, string& nowcache, headers_t* pmeta, dirtype* pDirType) { - dirtype TypeTmp; - int result = -1; - bool isforce = false; - dirtype* pType = pDirType ? pDirType : &TypeTmp; + dirtype TypeTmp; + int result = -1; + bool isforce = false; + dirtype* pType = pDirType ? pDirType : &TypeTmp; - // Normalize new path. - newpath = path; - if('/' != newpath[newpath.length() - 1]){ - string::size_type Pos; - if(string::npos != (Pos = newpath.find("_$folder$", 0))){ - newpath = newpath.substr(0, Pos); + // Normalize new path. + newpath = path; + if('/' != newpath[newpath.length() - 1]){ + string::size_type Pos; + if(string::npos != (Pos = newpath.find("_$folder$", 0))){ + newpath = newpath.substr(0, Pos); + } + newpath += "/"; } - newpath += "/"; - } - // Always check "dir/" at first. - if(0 == (result = get_object_attribute(newpath.c_str(), NULL, pmeta, false, &isforce))){ - // Found "dir/" cache --> Check for "_$folder$", "no dir object" - nowcache = newpath; - if(is_special_name_folder_object(newpath.c_str())){ // check support_compat_dir in this function - // "_$folder$" type. - (*pType) = DIRTYPE_FOLDER; - nowpath = newpath.substr(0, newpath.length() - 1) + "_$folder$"; // cut and add - }else if(isforce){ - // "no dir object" type. - (*pType) = DIRTYPE_NOOBJ; - nowpath = ""; - }else{ - nowpath = newpath; - if(0 < nowpath.length() && '/' == nowpath[nowpath.length() - 1]){ - // "dir/" type - (*pType) = DIRTYPE_NEW; - }else{ - // "dir" type - (*pType) = DIRTYPE_OLD; - } + // Always check "dir/" at first. + if(0 == (result = get_object_attribute(newpath.c_str(), NULL, pmeta, false, &isforce))){ + // Found "dir/" cache --> Check for "_$folder$", "no dir object" + nowcache = newpath; + if(is_special_name_folder_object(newpath.c_str())){ // check support_compat_dir in this function + // "_$folder$" type. + (*pType) = DIRTYPE_FOLDER; + nowpath = newpath.substr(0, newpath.length() - 1) + "_$folder$"; // cut and add + }else if(isforce){ + // "no dir object" type. + (*pType) = DIRTYPE_NOOBJ; + nowpath = ""; + }else{ + nowpath = newpath; + if(0 < nowpath.length() && '/' == nowpath[nowpath.length() - 1]){ + // "dir/" type + (*pType) = DIRTYPE_NEW; + }else{ + // "dir" type + (*pType) = DIRTYPE_OLD; + } + } + }else if(support_compat_dir){ + // Check "dir" when support_compat_dir is enabled + nowpath = newpath.substr(0, newpath.length() - 1); + if(0 == (result = get_object_attribute(nowpath.c_str(), NULL, pmeta, false, &isforce))){ + // Found "dir" cache --> this case is only "dir" type. + // Because, if object is "_$folder$" or "no dir object", the cache is "dir/" type. + // (But "no dir object" is checked here.) + nowcache = nowpath; + if(isforce){ + (*pType) = DIRTYPE_NOOBJ; + nowpath = ""; + }else{ + (*pType) = DIRTYPE_OLD; + } + }else{ + // Not found cache --> check for "_$folder$" and "no dir object". + // (come here is that support_compat_dir is enabled) + nowcache = ""; // This case is no cache. + nowpath += "_$folder$"; + if(is_special_name_folder_object(nowpath.c_str())){ + // "_$folder$" type. + (*pType) = DIRTYPE_FOLDER; + result = 0; // result is OK. + }else if(-ENOTEMPTY == directory_empty(newpath.c_str())){ + // "no dir object" type. + (*pType) = DIRTYPE_NOOBJ; + nowpath = ""; // now path. + result = 0; // result is OK. + }else{ + // Error: Unknown type. + (*pType) = DIRTYPE_UNKNOWN; + newpath = ""; + nowpath = ""; + } + } } - }else if(support_compat_dir){ - // Check "dir" when support_compat_dir is enabled - nowpath = newpath.substr(0, newpath.length() - 1); - if(0 == (result = get_object_attribute(nowpath.c_str(), NULL, pmeta, false, &isforce))){ - // Found "dir" cache --> this case is only "dir" type. - // Because, if object is "_$folder$" or "no dir object", the cache is "dir/" type. - // (But "no dir object" is checked here.) - nowcache = nowpath; - if(isforce){ - (*pType) = DIRTYPE_NOOBJ; - nowpath = ""; - }else{ - (*pType) = DIRTYPE_OLD; - } - }else{ - // Not found cache --> check for "_$folder$" and "no dir object". - // (come here is that support_compat_dir is enabled) - nowcache = ""; // This case is no cache. - nowpath += "_$folder$"; - if(is_special_name_folder_object(nowpath.c_str())){ - // "_$folder$" type. - (*pType) = DIRTYPE_FOLDER; - result = 0; // result is OK. - }else if(-ENOTEMPTY == directory_empty(newpath.c_str())){ - // "no dir object" type. - (*pType) = DIRTYPE_NOOBJ; - nowpath = ""; // now path. - result = 0; // result is OK. - }else{ - // Error: Unknown type. - (*pType) = DIRTYPE_UNKNOWN; - newpath = ""; - nowpath = ""; - } - } - } - return result; + return result; } static int remove_old_type_dir(const string& path, dirtype type) { - if(IS_RMTYPEDIR(type)){ - S3fsCurl s3fscurl; - int result = s3fscurl.DeleteRequest(path.c_str()); - if(0 != result && -ENOENT != result){ - return result; + if(IS_RMTYPEDIR(type)){ + S3fsCurl s3fscurl; + int result = s3fscurl.DeleteRequest(path.c_str()); + if(0 != result && -ENOENT != result){ + return result; + } + // succeed removing or not found the directory + }else{ + // nothing to do } - // succeed removing or not found the directory - }else{ - // nothing to do - } - return 0; + return 0; } // @@ -410,144 +349,144 @@ static int remove_old_type_dir(const string& path, dirtype type) // static int get_object_attribute(const char* path, struct stat* pstbuf, headers_t* pmeta, bool overcheck, bool* pisforce, bool add_no_truncate_cache) { - int result = -1; - struct stat tmpstbuf; - struct stat* pstat = pstbuf ? pstbuf : &tmpstbuf; - headers_t tmpHead; - headers_t* pheader = pmeta ? pmeta : &tmpHead; - string strpath; - S3fsCurl s3fscurl; - bool forcedir = false; - string::size_type Pos; + int result = -1; + struct stat tmpstbuf; + struct stat* pstat = pstbuf ? pstbuf : &tmpstbuf; + headers_t tmpHead; + headers_t* pheader = pmeta ? pmeta : &tmpHead; + string strpath; + S3fsCurl s3fscurl; + bool forcedir = false; + string::size_type Pos; - S3FS_PRN_DBG("[path=%s]", path); + S3FS_PRN_DBG("[path=%s]", path); - if(!path || '\0' == path[0]){ - return -ENOENT; - } - - memset(pstat, 0, sizeof(struct stat)); - if(0 == strcmp(path, "/") || 0 == strcmp(path, ".")){ - pstat->st_nlink = 1; // see fuse faq - pstat->st_mode = mp_mode; - pstat->st_uid = is_s3fs_uid ? s3fs_uid : mp_uid; - pstat->st_gid = is_s3fs_gid ? s3fs_gid : mp_gid; - return 0; - } - - // Check cache. - pisforce = (NULL != pisforce ? pisforce : &forcedir); - (*pisforce) = false; - strpath = path; - if(support_compat_dir && overcheck && string::npos != (Pos = strpath.find("_$folder$", 0))){ - strpath = strpath.substr(0, Pos); - strpath += "/"; - } - if(StatCache::getStatCacheData()->GetStat(strpath, pstat, pheader, overcheck, pisforce)){ - StatCache::getStatCacheData()->ChangeNoTruncateFlag(strpath, add_no_truncate_cache); - return 0; - } - if(StatCache::getStatCacheData()->IsNoObjectCache(strpath)){ - // there is the path in the cache for no object, it is no object. - return -ENOENT; - } - - // At first, check path - strpath = path; - result = s3fscurl.HeadRequest(strpath.c_str(), (*pheader)); - s3fscurl.DestroyCurlHandle(); - - // if not found target path object, do over checking - if(0 != result){ - if(overcheck){ - // when support_compat_dir is disabled, strpath maybe have "_$folder$". - if('/' != strpath[strpath.length() - 1] && string::npos == strpath.find("_$folder$", 0)){ - // now path is "object", do check "object/" for over checking - strpath += "/"; - result = s3fscurl.HeadRequest(strpath.c_str(), (*pheader)); - s3fscurl.DestroyCurlHandle(); - } - if(support_compat_dir && 0 != result){ - // now path is "object/", do check "object_$folder$" for over checking - strpath = strpath.substr(0, strpath.length() - 1); - strpath += "_$folder$"; - result = s3fscurl.HeadRequest(strpath.c_str(), (*pheader)); - s3fscurl.DestroyCurlHandle(); - - if(0 != result){ - // cut "_$folder$" for over checking "no dir object" after here - if(string::npos != (Pos = strpath.find("_$folder$", 0))){ - strpath = strpath.substr(0, Pos); - } - } - } - } - if(support_compat_dir && 0 != result && string::npos == strpath.find("_$folder$", 0)){ - // now path is "object" or "object/", do check "no dir object" which is not object but has only children. - if('/' == strpath[strpath.length() - 1]){ - strpath = strpath.substr(0, strpath.length() - 1); - } - if(-ENOTEMPTY == directory_empty(strpath.c_str())){ - // found "no dir object". - strpath += "/"; - *pisforce = true; - result = 0; - } - } - }else{ - if(support_compat_dir && '/' != strpath[strpath.length() - 1] && string::npos == strpath.find("_$folder$", 0) && is_need_check_obj_detail(*pheader)){ - // check a case of that "object" does not have attribute and "object" is possible to be directory. - if(-ENOTEMPTY == directory_empty(strpath.c_str())){ - // found "no dir object". - strpath += "/"; - *pisforce = true; - result = 0; - } - } - } - - if(0 != result){ - // finally, "path" object did not find. Add no object cache. - strpath = path; // reset original - StatCache::getStatCacheData()->AddNoObjectCache(strpath); - return result; - } - - // if path has "_$folder$", need to cut it. - if(string::npos != (Pos = strpath.find("_$folder$", 0))){ - strpath = strpath.substr(0, Pos); - strpath += "/"; - } - - // Set into cache - // - // [NOTE] - // When add_no_truncate_cache is true, the stats is always cached. - // This cached stats is only removed by DelStat(). - // This is necessary for the case to access the attribute of opened file. - // (ex. getxattr() is called while writing to the opened file.) - // - if(add_no_truncate_cache || 0 != StatCache::getStatCacheData()->GetCacheSize()){ - // add into stat cache - if(!StatCache::getStatCacheData()->AddStat(strpath, (*pheader), forcedir, add_no_truncate_cache)){ - S3FS_PRN_ERR("failed adding stat cache [path=%s]", strpath.c_str()); - return -ENOENT; - } - if(!StatCache::getStatCacheData()->GetStat(strpath, pstat, pheader, overcheck, pisforce)){ - // There is not in cache.(why?) -> retry to convert. - if(!convert_header_to_stat(strpath.c_str(), (*pheader), pstat, forcedir)){ - S3FS_PRN_ERR("failed convert headers to stat[path=%s]", strpath.c_str()); + if(!path || '\0' == path[0]){ return -ENOENT; - } } - }else{ - // cache size is Zero -> only convert. - if(!convert_header_to_stat(strpath.c_str(), (*pheader), pstat, forcedir)){ - S3FS_PRN_ERR("failed convert headers to stat[path=%s]", strpath.c_str()); - return -ENOENT; + + memset(pstat, 0, sizeof(struct stat)); + if(0 == strcmp(path, "/") || 0 == strcmp(path, ".")){ + pstat->st_nlink = 1; // see fuse faq + pstat->st_mode = mp_mode; + pstat->st_uid = is_s3fs_uid ? s3fs_uid : mp_uid; + pstat->st_gid = is_s3fs_gid ? s3fs_gid : mp_gid; + return 0; } - } - return 0; + + // Check cache. + pisforce = (NULL != pisforce ? pisforce : &forcedir); + (*pisforce) = false; + strpath = path; + if(support_compat_dir && overcheck && string::npos != (Pos = strpath.find("_$folder$", 0))){ + strpath = strpath.substr(0, Pos); + strpath += "/"; + } + if(StatCache::getStatCacheData()->GetStat(strpath, pstat, pheader, overcheck, pisforce)){ + StatCache::getStatCacheData()->ChangeNoTruncateFlag(strpath, add_no_truncate_cache); + return 0; + } + if(StatCache::getStatCacheData()->IsNoObjectCache(strpath)){ + // there is the path in the cache for no object, it is no object. + return -ENOENT; + } + + // At first, check path + strpath = path; + result = s3fscurl.HeadRequest(strpath.c_str(), (*pheader)); + s3fscurl.DestroyCurlHandle(); + + // if not found target path object, do over checking + if(0 != result){ + if(overcheck){ + // when support_compat_dir is disabled, strpath maybe have "_$folder$". + if('/' != strpath[strpath.length() - 1] && string::npos == strpath.find("_$folder$", 0)){ + // now path is "object", do check "object/" for over checking + strpath += "/"; + result = s3fscurl.HeadRequest(strpath.c_str(), (*pheader)); + s3fscurl.DestroyCurlHandle(); + } + if(support_compat_dir && 0 != result){ + // now path is "object/", do check "object_$folder$" for over checking + strpath = strpath.substr(0, strpath.length() - 1); + strpath += "_$folder$"; + result = s3fscurl.HeadRequest(strpath.c_str(), (*pheader)); + s3fscurl.DestroyCurlHandle(); + + if(0 != result){ + // cut "_$folder$" for over checking "no dir object" after here + if(string::npos != (Pos = strpath.find("_$folder$", 0))){ + strpath = strpath.substr(0, Pos); + } + } + } + } + if(support_compat_dir && 0 != result && string::npos == strpath.find("_$folder$", 0)){ + // now path is "object" or "object/", do check "no dir object" which is not object but has only children. + if('/' == strpath[strpath.length() - 1]){ + strpath = strpath.substr(0, strpath.length() - 1); + } + if(-ENOTEMPTY == directory_empty(strpath.c_str())){ + // found "no dir object". + strpath += "/"; + *pisforce = true; + result = 0; + } + } + }else{ + if(support_compat_dir && '/' != strpath[strpath.length() - 1] && string::npos == strpath.find("_$folder$", 0) && is_need_check_obj_detail(*pheader)){ + // check a case of that "object" does not have attribute and "object" is possible to be directory. + if(-ENOTEMPTY == directory_empty(strpath.c_str())){ + // found "no dir object". + strpath += "/"; + *pisforce = true; + result = 0; + } + } + } + + if(0 != result){ + // finally, "path" object did not find. Add no object cache. + strpath = path; // reset original + StatCache::getStatCacheData()->AddNoObjectCache(strpath); + return result; + } + + // if path has "_$folder$", need to cut it. + if(string::npos != (Pos = strpath.find("_$folder$", 0))){ + strpath = strpath.substr(0, Pos); + strpath += "/"; + } + + // Set into cache + // + // [NOTE] + // When add_no_truncate_cache is true, the stats is always cached. + // This cached stats is only removed by DelStat(). + // This is necessary for the case to access the attribute of opened file. + // (ex. getxattr() is called while writing to the opened file.) + // + if(add_no_truncate_cache || 0 != StatCache::getStatCacheData()->GetCacheSize()){ + // add into stat cache + if(!StatCache::getStatCacheData()->AddStat(strpath, (*pheader), forcedir, add_no_truncate_cache)){ + S3FS_PRN_ERR("failed adding stat cache [path=%s]", strpath.c_str()); + return -ENOENT; + } + if(!StatCache::getStatCacheData()->GetStat(strpath, pstat, pheader, overcheck, pisforce)){ + // There is not in cache.(why?) -> retry to convert. + if(!convert_header_to_stat(strpath.c_str(), (*pheader), pstat, forcedir)){ + S3FS_PRN_ERR("failed convert headers to stat[path=%s]", strpath.c_str()); + return -ENOENT; + } + } + }else{ + // cache size is Zero -> only convert. + if(!convert_header_to_stat(strpath.c_str(), (*pheader), pstat, forcedir)){ + S3FS_PRN_ERR("failed convert headers to stat[path=%s]", strpath.c_str()); + return -ENOENT; + } + } + return 0; } // @@ -562,107 +501,107 @@ static int get_object_attribute(const char* path, struct stat* pstbuf, headers_t // static int check_object_access(const char* path, int mask, struct stat* pstbuf) { - int result; - struct stat st; - struct stat* pst = (pstbuf ? pstbuf : &st); - struct fuse_context* pcxt; + int result; + struct stat st; + struct stat* pst = (pstbuf ? pstbuf : &st); + struct fuse_context* pcxt; - S3FS_PRN_DBG("[path=%s]", path); + S3FS_PRN_DBG("[path=%s]", path); - if(NULL == (pcxt = fuse_get_context())){ - return -EIO; - } - if(0 != (result = get_object_attribute(path, pst))){ - // If there is not the target file(object), result is -ENOENT. - return result; - } - if(0 == pcxt->uid){ - // root is allowed all accessing. - return 0; - } - if(is_s3fs_uid && s3fs_uid == pcxt->uid){ - // "uid" user is allowed all accessing. - return 0; - } - if(F_OK == mask){ - // if there is a file, always return allowed. - return 0; - } - - // for "uid", "gid" option - uid_t obj_uid = (is_s3fs_uid ? s3fs_uid : pst->st_uid); - gid_t obj_gid = (is_s3fs_gid ? s3fs_gid : pst->st_gid); - - // compare file mode and uid/gid + mask. - mode_t mode; - mode_t base_mask = S_IRWXO; - if(is_s3fs_umask){ - // If umask is set, all object attributes set ~umask. - mode = ((S_IRWXU | S_IRWXG | S_IRWXO) & ~s3fs_umask); - }else{ - mode = pst->st_mode; - } - if(pcxt->uid == obj_uid){ - base_mask |= S_IRWXU; - } - if(pcxt->gid == obj_gid){ - base_mask |= S_IRWXG; - } - if(1 == is_uid_include_group(pcxt->uid, obj_gid)){ - base_mask |= S_IRWXG; - } - mode &= base_mask; - - if(X_OK == (mask & X_OK)){ - if(0 == (mode & (S_IXUSR | S_IXGRP | S_IXOTH))){ - return -EPERM; + if(NULL == (pcxt = fuse_get_context())){ + return -EIO; } - } - if(W_OK == (mask & W_OK)){ - if(0 == (mode & (S_IWUSR | S_IWGRP | S_IWOTH))){ - return -EACCES; + if(0 != (result = get_object_attribute(path, pst))){ + // If there is not the target file(object), result is -ENOENT. + return result; } - } - if(R_OK == (mask & R_OK)){ - if(0 == (mode & (S_IRUSR | S_IRGRP | S_IROTH))){ - return -EACCES; + if(0 == pcxt->uid){ + // root is allowed all accessing. + return 0; } - } - if(0 == mode){ - return -EACCES; - } - return 0; + if(is_s3fs_uid && s3fs_uid == pcxt->uid){ + // "uid" user is allowed all accessing. + return 0; + } + if(F_OK == mask){ + // if there is a file, always return allowed. + return 0; + } + + // for "uid", "gid" option + uid_t obj_uid = (is_s3fs_uid ? s3fs_uid : pst->st_uid); + gid_t obj_gid = (is_s3fs_gid ? s3fs_gid : pst->st_gid); + + // compare file mode and uid/gid + mask. + mode_t mode; + mode_t base_mask = S_IRWXO; + if(is_s3fs_umask){ + // If umask is set, all object attributes set ~umask. + mode = ((S_IRWXU | S_IRWXG | S_IRWXO) & ~s3fs_umask); + }else{ + mode = pst->st_mode; + } + if(pcxt->uid == obj_uid){ + base_mask |= S_IRWXU; + } + if(pcxt->gid == obj_gid){ + base_mask |= S_IRWXG; + } + if(1 == is_uid_include_group(pcxt->uid, obj_gid)){ + base_mask |= S_IRWXG; + } + mode &= base_mask; + + if(X_OK == (mask & X_OK)){ + if(0 == (mode & (S_IXUSR | S_IXGRP | S_IXOTH))){ + return -EPERM; + } + } + if(W_OK == (mask & W_OK)){ + if(0 == (mode & (S_IWUSR | S_IWGRP | S_IWOTH))){ + return -EACCES; + } + } + if(R_OK == (mask & R_OK)){ + if(0 == (mode & (S_IRUSR | S_IRGRP | S_IROTH))){ + return -EACCES; + } + } + if(0 == mode){ + return -EACCES; + } + return 0; } static int check_object_owner(const char* path, struct stat* pstbuf) { - int result; - struct stat st; - struct stat* pst = (pstbuf ? pstbuf : &st); - struct fuse_context* pcxt; + int result; + struct stat st; + struct stat* pst = (pstbuf ? pstbuf : &st); + struct fuse_context* pcxt; - S3FS_PRN_DBG("[path=%s]", path); + S3FS_PRN_DBG("[path=%s]", path); - if(NULL == (pcxt = fuse_get_context())){ - return -EIO; - } - if(0 != (result = get_object_attribute(path, pst))){ - // If there is not the target file(object), result is -ENOENT. - return result; - } - // check owner - if(0 == pcxt->uid){ - // root is allowed all accessing. - return 0; - } - if(is_s3fs_uid && s3fs_uid == pcxt->uid){ - // "uid" user is allowed all accessing. - return 0; - } - if(pcxt->uid == pst->st_uid){ - return 0; - } - return -EPERM; + if(NULL == (pcxt = fuse_get_context())){ + return -EIO; + } + if(0 != (result = get_object_attribute(path, pst))){ + // If there is not the target file(object), result is -ENOENT. + return result; + } + // check owner + if(0 == pcxt->uid){ + // root is allowed all accessing. + return 0; + } + if(is_s3fs_uid && s3fs_uid == pcxt->uid){ + // "uid" user is allowed all accessing. + return 0; + } + if(pcxt->uid == pst->st_uid){ + return 0; + } + return -EPERM; } // @@ -670,39 +609,39 @@ static int check_object_owner(const char* path, struct stat* pstbuf) // static int check_parent_object_access(const char* path, int mask) { - string parent; - int result; + string parent; + int result; - S3FS_PRN_DBG("[path=%s]", path); + S3FS_PRN_DBG("[path=%s]", path); - if(0 == strcmp(path, "/") || 0 == strcmp(path, ".")){ - // path is mount point. + if(0 == strcmp(path, "/") || 0 == strcmp(path, ".")){ + // path is mount point. + return 0; + } + if(X_OK == (mask & X_OK)){ + for(parent = mydirname(path); !parent.empty(); parent = mydirname(parent)){ + if(parent == "."){ + parent = "/"; + } + if(0 != (result = check_object_access(parent.c_str(), X_OK, NULL))){ + return result; + } + if(parent == "/" || parent == "."){ + break; + } + } + } + mask = (mask & ~X_OK); + if(0 != mask){ + parent = mydirname(path); + if(parent == "."){ + parent = "/"; + } + if(0 != (result = check_object_access(parent.c_str(), mask, NULL))){ + return result; + } + } return 0; - } - if(X_OK == (mask & X_OK)){ - for(parent = mydirname(path); !parent.empty(); parent = mydirname(parent)){ - if(parent == "."){ - parent = "/"; - } - if(0 != (result = check_object_access(parent.c_str(), X_OK, NULL))){ - return result; - } - if(parent == "/" || parent == "."){ - break; - } - } - } - mask = (mask & ~X_OK); - if(0 != mask){ - parent = mydirname(path); - if(parent == "."){ - parent = "/"; - } - if(0 != (result = check_object_access(parent.c_str(), mask, NULL))){ - return result; - } - } - return 0; } // @@ -710,1650 +649,1646 @@ static int check_parent_object_access(const char* path, int mask) // bool get_object_sse_type(const char* path, sse_type_t& ssetype, string& ssevalue) { - if(!path){ - return false; - } - - headers_t meta; - if(0 != get_object_attribute(path, NULL, &meta)){ - S3FS_PRN_ERR("Failed to get object(%s) headers", path); - return false; - } - - ssetype = sse_type_t::SSE_DISABLE; - ssevalue.erase(); - for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){ - string key = (*iter).first; - if(0 == strcasecmp(key.c_str(), "x-amz-server-side-encryption") && 0 == strcasecmp((*iter).second.c_str(), "AES256")){ - ssetype = sse_type_t::SSE_S3; - }else if(0 == strcasecmp(key.c_str(), "x-amz-server-side-encryption-aws-kms-key-id")){ - ssetype = sse_type_t::SSE_KMS; - ssevalue = (*iter).second; - }else if(0 == strcasecmp(key.c_str(), "x-amz-server-side-encryption-customer-key-md5")){ - ssetype = sse_type_t::SSE_C; - ssevalue = (*iter).second; + if(!path){ + return false; } - } - return true; + + headers_t meta; + if(0 != get_object_attribute(path, NULL, &meta)){ + S3FS_PRN_ERR("Failed to get object(%s) headers", path); + return false; + } + + ssetype = sse_type_t::SSE_DISABLE; + ssevalue.erase(); + for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){ + string key = (*iter).first; + if(0 == strcasecmp(key.c_str(), "x-amz-server-side-encryption") && 0 == strcasecmp((*iter).second.c_str(), "AES256")){ + ssetype = sse_type_t::SSE_S3; + }else if(0 == strcasecmp(key.c_str(), "x-amz-server-side-encryption-aws-kms-key-id")){ + ssetype = sse_type_t::SSE_KMS; + ssevalue = (*iter).second; + }else if(0 == strcasecmp(key.c_str(), "x-amz-server-side-encryption-customer-key-md5")){ + ssetype = sse_type_t::SSE_C; + ssevalue = (*iter).second; + } + } + return true; } static FdEntity* get_local_fent(const char* path, bool is_load) { - struct stat stobj; - FdEntity* ent; - headers_t meta; + struct stat stobj; + FdEntity* ent; + headers_t meta; - S3FS_PRN_INFO2("[path=%s]", path); + S3FS_PRN_INFO2("[path=%s]", path); - if(0 != get_object_attribute(path, &stobj, &meta)){ - return NULL; - } + if(0 != get_object_attribute(path, &stobj, &meta)){ + return NULL; + } - // open - time_t mtime = (!S_ISREG(stobj.st_mode) || S_ISLNK(stobj.st_mode)) ? -1 : stobj.st_mtime; - bool force_tmpfile = S_ISREG(stobj.st_mode) ? false : true; + // open + time_t mtime = (!S_ISREG(stobj.st_mode) || S_ISLNK(stobj.st_mode)) ? -1 : stobj.st_mtime; + bool force_tmpfile = S_ISREG(stobj.st_mode) ? false : true; - if(NULL == (ent = FdManager::get()->Open(path, &meta, stobj.st_size, mtime, force_tmpfile, true))){ - S3FS_PRN_ERR("Could not open file. errno(%d)", errno); - return NULL; - } - // load - if(is_load && !ent->OpenAndLoadAll(&meta)){ - S3FS_PRN_ERR("Could not load file. errno(%d)", errno); - FdManager::get()->Close(ent); - return NULL; - } - return ent; + if(NULL == (ent = FdManager::get()->Open(path, &meta, stobj.st_size, mtime, force_tmpfile, true))){ + S3FS_PRN_ERR("Could not open file. errno(%d)", errno); + return NULL; + } + // load + if(is_load && !ent->OpenAndLoadAll(&meta)){ + S3FS_PRN_ERR("Could not load file. errno(%d)", errno); + FdManager::get()->Close(ent); + return NULL; + } + return ent; } -/** - * create or update s3 meta - * ow_sse_flg is for over writing sse header by use_sse option. - * @return fuse return code - */ +// +// create or update s3 meta +// ow_sse_flg is for over writing sse header by use_sse option. +// @return fuse return code +// int put_headers(const char* path, headers_t& meta, bool is_copy) { - int result; - S3fsCurl s3fscurl(true); - struct stat buf; + int result; + S3fsCurl s3fscurl(true); + struct stat buf; - S3FS_PRN_INFO2("[path=%s]", path); + S3FS_PRN_INFO2("[path=%s]", path); - // files larger than 5GB must be modified via the multipart interface - // *** If there is not target object(a case of move command), - // get_object_attribute() returns error with initializing buf. - (void)get_object_attribute(path, &buf); + // files larger than 5GB must be modified via the multipart interface + // *** If there is not target object(a case of move command), + // get_object_attribute() returns error with initializing buf. + (void)get_object_attribute(path, &buf); - if(buf.st_size >= FIVE_GB){ - // multipart - if(nocopyapi || nomultipart){ - return -EFBIG; // File too large + if(buf.st_size >= FIVE_GB){ + // multipart + if(nocopyapi || nomultipart){ + return -EFBIG; // File too large + } + if(0 != (result = s3fscurl.MultipartHeadRequest(path, buf.st_size, meta, is_copy))){ + return result; + } + }else{ + if(0 != (result = s3fscurl.PutHeadRequest(path, meta, is_copy))){ + return result; + } } - if(0 != (result = s3fscurl.MultipartHeadRequest(path, buf.st_size, meta, is_copy))){ - return result; - } - }else{ - if(0 != (result = s3fscurl.PutHeadRequest(path, meta, is_copy))){ - return result; - } - } - // [NOTE] - // if path is 'dir/', it does not have cache(could not open file for directory stat) - // - if('/' != path[strlen(path) - 1]){ - FdEntity* ent = NULL; - if(NULL == (ent = FdManager::get()->ExistOpen(path, -1, !FdManager::IsCacheDir()))){ - // no opened fd - if(FdManager::IsCacheDir()){ - // create cache file if be needed - ent = FdManager::get()->Open(path, &meta, buf.st_size, -1, false, true); - } + // [NOTE] + // if path is 'dir/', it does not have cache(could not open file for directory stat) + // + if('/' != path[strlen(path) - 1]){ + FdEntity* ent = NULL; + if(NULL == (ent = FdManager::get()->ExistOpen(path, -1, !FdManager::IsCacheDir()))){ + // no opened fd + if(FdManager::IsCacheDir()){ + // create cache file if be needed + ent = FdManager::get()->Open(path, &meta, buf.st_size, -1, false, true); + } + } + if(ent){ + time_t mtime = get_mtime(meta); + ent->SetMtime(mtime); + FdManager::get()->Close(ent); + } } - if(ent){ - time_t mtime = get_mtime(meta); - ent->SetMtime(mtime); - FdManager::get()->Close(ent); - } - } - - return 0; + return 0; } static int s3fs_getattr(const char* _path, struct stat* stbuf) { - WTF8_ENCODE(path) - int result; + WTF8_ENCODE(path) + int result; - S3FS_PRN_INFO("[path=%s]", path); + S3FS_PRN_INFO("[path=%s]", path); - // check parent directory attribute. - if(0 != (result = check_parent_object_access(path, X_OK))){ - return result; - } - if(0 != (result = check_object_access(path, F_OK, stbuf))){ - return result; - } - // If has already opened fd, the st_size should be instead. - // (See: Issue 241) - if(stbuf){ - FdEntity* ent; - - if(NULL != (ent = FdManager::get()->ExistOpen(path))){ - struct stat tmpstbuf; - if(ent->GetStats(tmpstbuf)){ - stbuf->st_size = tmpstbuf.st_size; - } - FdManager::get()->Close(ent); + // check parent directory attribute. + if(0 != (result = check_parent_object_access(path, X_OK))){ + return result; } - stbuf->st_blksize = 4096; - stbuf->st_blocks = get_blocks(stbuf->st_size); + if(0 != (result = check_object_access(path, F_OK, stbuf))){ + return result; + } + // If has already opened fd, the st_size should be instead. + // (See: Issue 241) + if(stbuf){ + FdEntity* ent; - S3FS_PRN_DBG("[path=%s] uid=%u, gid=%u, mode=%04o", path, (unsigned int)(stbuf->st_uid), (unsigned int)(stbuf->st_gid), stbuf->st_mode); - } - S3FS_MALLOCTRIM(0); + if(NULL != (ent = FdManager::get()->ExistOpen(path))){ + struct stat tmpstbuf; + if(ent->GetStats(tmpstbuf)){ + stbuf->st_size = tmpstbuf.st_size; + } + FdManager::get()->Close(ent); + } + stbuf->st_blksize = 4096; + stbuf->st_blocks = get_blocks(stbuf->st_size); - return result; + S3FS_PRN_DBG("[path=%s] uid=%u, gid=%u, mode=%04o", path, (unsigned int)(stbuf->st_uid), (unsigned int)(stbuf->st_gid), stbuf->st_mode); + } + S3FS_MALLOCTRIM(0); + + return result; } static int s3fs_readlink(const char* _path, char* buf, size_t size) { - if(!_path || !buf || 0 == size){ + if(!_path || !buf || 0 == size){ + return 0; + } + WTF8_ENCODE(path) + string strValue; + + // check symblic link cache + if(!StatCache::getStatCacheData()->GetSymlink(string(path), strValue)){ + // not found in cache, then open the path + FdEntity* ent; + if(NULL == (ent = get_local_fent(path))){ + S3FS_PRN_ERR("could not get fent(file=%s)", path); + return -EIO; + } + // Get size + off_t readsize; + if(!ent->GetSize(readsize)){ + S3FS_PRN_ERR("could not get file size(file=%s)", path); + FdManager::get()->Close(ent); + return -EIO; + } + if(static_cast(size) <= readsize){ + readsize = size - 1; + } + // Read + ssize_t ressize; + if(0 > (ressize = ent->Read(buf, 0, readsize))){ + S3FS_PRN_ERR("could not read file(file=%s, ressize=%zd)", path, ressize); + FdManager::get()->Close(ent); + return static_cast(ressize); + } + buf[ressize] = '\0'; + + // close + FdManager::get()->Close(ent); + + // check buf if it has space words. + strValue = trim(string(buf)); + + // decode wtf8. This will always be shorter + if(use_wtf8){ + strValue = s3fs_wtf8_decode(strValue); + } + + // add symblic link cache + if(!StatCache::getStatCacheData()->AddSymlink(string(path), strValue)){ + S3FS_PRN_ERR("failed to add symbolic link cache for %s", path); + } + } + // copy result + strncpy(buf, strValue.c_str(), size); + + S3FS_MALLOCTRIM(0); + return 0; - } - WTF8_ENCODE(path) - string strValue; - - // check symblic link cache - if(!StatCache::getStatCacheData()->GetSymlink(string(path), strValue)){ - // not found in cache, then open the path - FdEntity* ent; - if(NULL == (ent = get_local_fent(path))){ - S3FS_PRN_ERR("could not get fent(file=%s)", path); - return -EIO; - } - // Get size - off_t readsize; - if(!ent->GetSize(readsize)){ - S3FS_PRN_ERR("could not get file size(file=%s)", path); - FdManager::get()->Close(ent); - return -EIO; - } - if(static_cast(size) <= readsize){ - readsize = size - 1; - } - // Read - ssize_t ressize; - if(0 > (ressize = ent->Read(buf, 0, readsize))){ - S3FS_PRN_ERR("could not read file(file=%s, ressize=%zd)", path, ressize); - FdManager::get()->Close(ent); - return static_cast(ressize); - } - buf[ressize] = '\0'; - - // close - FdManager::get()->Close(ent); - - // check buf if it has space words. - strValue = trim(string(buf)); - - // decode wtf8. This will always be shorter - if(use_wtf8){ - strValue = s3fs_wtf8_decode(strValue); - } - - // add symblic link cache - if(!StatCache::getStatCacheData()->AddSymlink(string(path), strValue)){ - S3FS_PRN_ERR("failed to add symbolic link cache for %s", path); - } - } - // copy result - strncpy(buf, strValue.c_str(), size); - - S3FS_MALLOCTRIM(0); - - return 0; } static int do_create_bucket() { - S3FS_PRN_INFO2("/"); + S3FS_PRN_INFO2("/"); - FILE* ptmpfp; - int tmpfd; - if(endpoint == "us-east-1"){ - ptmpfp = NULL; - tmpfd = -1; - }else{ - if(NULL == (ptmpfp = tmpfile()) || - -1 == (tmpfd = fileno(ptmpfp)) || - 0 >= fprintf(ptmpfp, "\n" - " %s\n" - "", endpoint.c_str()) || - 0 != fflush(ptmpfp) || - -1 == fseek(ptmpfp, 0L, SEEK_SET)){ - S3FS_PRN_ERR("failed to create temporary file. err(%d)", errno); - if(ptmpfp){ + FILE* ptmpfp; + int tmpfd; + if(endpoint == "us-east-1"){ + ptmpfp = NULL; + tmpfd = -1; + }else{ + if(NULL == (ptmpfp = tmpfile()) || + -1 == (tmpfd = fileno(ptmpfp)) || + 0 >= fprintf(ptmpfp, "\n" + " %s\n" + "", endpoint.c_str()) || + 0 != fflush(ptmpfp) || + -1 == fseek(ptmpfp, 0L, SEEK_SET)) + { + S3FS_PRN_ERR("failed to create temporary file. err(%d)", errno); + if(ptmpfp){ + fclose(ptmpfp); + } + return (0 == errno ? -EIO : -errno); + } + } + + headers_t meta; + + S3fsCurl s3fscurl(true); + int res = s3fscurl.PutRequest("/", meta, tmpfd); + if(res < 0){ + long responseCode = s3fscurl.GetLastResponseCode(); + if((responseCode == 400 || responseCode == 403) && S3fsCurl::IsSignatureV4()){ + S3FS_PRN_ERR("Could not connect, so retry to connect by signature version 2."); + S3fsCurl::SetSignatureV4(false); + + // retry to check + s3fscurl.DestroyCurlHandle(); + res = s3fscurl.PutRequest("/", meta, tmpfd); + }else if(responseCode == 409){ + // bucket already exists + res = 0; + } + } + if(ptmpfp != NULL){ fclose(ptmpfp); - } - return (0 == errno ? -EIO : -errno); } - } - - headers_t meta; - - S3fsCurl s3fscurl(true); - int res = s3fscurl.PutRequest("/", meta, tmpfd); - if(res < 0){ - long responseCode = s3fscurl.GetLastResponseCode(); - if((responseCode == 400 || responseCode == 403) && S3fsCurl::IsSignatureV4()){ - S3FS_PRN_ERR("Could not connect, so retry to connect by signature version 2."); - S3fsCurl::SetSignatureV4(false); - - // retry to check - s3fscurl.DestroyCurlHandle(); - res = s3fscurl.PutRequest("/", meta, tmpfd); - }else if(responseCode == 409){ - // bucket already exists - res = 0; - } - } - if(ptmpfp != NULL){ - fclose(ptmpfp); - } - return res; + return res; } // common function for creation of a plain object static int create_file_object(const char* path, mode_t mode, uid_t uid, gid_t gid) { - S3FS_PRN_INFO2("[path=%s][mode=%04o]", path, mode); + S3FS_PRN_INFO2("[path=%s][mode=%04o]", path, mode); - time_t now = time(NULL); - headers_t meta; - meta["Content-Type"] = S3fsCurl::LookupMimeType(string(path)); - meta["x-amz-meta-uid"] = str(uid); - meta["x-amz-meta-gid"] = str(gid); - meta["x-amz-meta-mode"] = str(mode); - meta["x-amz-meta-ctime"] = str(now); - meta["x-amz-meta-mtime"] = str(now); + time_t now = time(NULL); + headers_t meta; + meta["Content-Type"] = S3fsCurl::LookupMimeType(string(path)); + meta["x-amz-meta-uid"] = str(uid); + meta["x-amz-meta-gid"] = str(gid); + meta["x-amz-meta-mode"] = str(mode); + meta["x-amz-meta-ctime"] = str(now); + meta["x-amz-meta-mtime"] = str(now); - S3fsCurl s3fscurl(true); - return s3fscurl.PutRequest(path, meta, -1); // fd=-1 means for creating zero byte object. + S3fsCurl s3fscurl(true); + return s3fscurl.PutRequest(path, meta, -1); // fd=-1 means for creating zero byte object. } static int s3fs_mknod(const char *_path, mode_t mode, dev_t rdev) { - WTF8_ENCODE(path) - int result; - struct fuse_context* pcxt; + WTF8_ENCODE(path) + int result; + struct fuse_context* pcxt; - S3FS_PRN_INFO("[path=%s][mode=%04o][dev=%llu]", path, mode, (unsigned long long)rdev); + S3FS_PRN_INFO("[path=%s][mode=%04o][dev=%llu]", path, mode, (unsigned long long)rdev); - if(NULL == (pcxt = fuse_get_context())){ - return -EIO; - } + if(NULL == (pcxt = fuse_get_context())){ + return -EIO; + } + + if(0 != (result = create_file_object(path, mode, pcxt->uid, pcxt->gid))){ + S3FS_PRN_ERR("could not create object for special file(result=%d)", result); + return result; + } + StatCache::getStatCacheData()->DelStat(path); + S3FS_MALLOCTRIM(0); - if(0 != (result = create_file_object(path, mode, pcxt->uid, pcxt->gid))){ - S3FS_PRN_ERR("could not create object for special file(result=%d)", result); return result; - } - StatCache::getStatCacheData()->DelStat(path); - S3FS_MALLOCTRIM(0); - - return result; } static int s3fs_create(const char* _path, mode_t mode, struct fuse_file_info* fi) { - WTF8_ENCODE(path) - int result; - struct fuse_context* pcxt; + WTF8_ENCODE(path) + int result; + struct fuse_context* pcxt; - S3FS_PRN_INFO("[path=%s][mode=%04o][flags=0x%x]", path, mode, fi->flags); + S3FS_PRN_INFO("[path=%s][mode=%04o][flags=0x%x]", path, mode, fi->flags); - if(NULL == (pcxt = fuse_get_context())){ - return -EIO; - } - - // check parent directory attribute. - if(0 != (result = check_parent_object_access(path, X_OK))){ - return result; - } - result = check_object_access(path, W_OK, NULL); - if(-ENOENT == result){ - if(0 != (result = check_parent_object_access(path, W_OK))){ - return result; + if(NULL == (pcxt = fuse_get_context())){ + return -EIO; } - }else if(0 != result){ - return result; - } - result = create_file_object(path, mode, pcxt->uid, pcxt->gid); - StatCache::getStatCacheData()->DelStat(path); - if(result != 0){ - return result; - } - - FdEntity* ent; - headers_t meta; - get_object_attribute(path, NULL, &meta, true, NULL, true); // no truncate cache - if(NULL == (ent = FdManager::get()->Open(path, &meta, 0, -1, false, true))){ + // check parent directory attribute. + if(0 != (result = check_parent_object_access(path, X_OK))){ + return result; + } + result = check_object_access(path, W_OK, NULL); + if(-ENOENT == result){ + if(0 != (result = check_parent_object_access(path, W_OK))){ + return result; + } + }else if(0 != result){ + return result; + } + result = create_file_object(path, mode, pcxt->uid, pcxt->gid); StatCache::getStatCacheData()->DelStat(path); - return -EIO; - } - fi->fh = ent->GetFd(); - S3FS_MALLOCTRIM(0); + if(result != 0){ + return result; + } - return 0; + FdEntity* ent; + headers_t meta; + get_object_attribute(path, NULL, &meta, true, NULL, true); // no truncate cache + if(NULL == (ent = FdManager::get()->Open(path, &meta, 0, -1, false, true))){ + StatCache::getStatCacheData()->DelStat(path); + return -EIO; + } + fi->fh = ent->GetFd(); + S3FS_MALLOCTRIM(0); + + return 0; } static int create_directory_object(const char* path, mode_t mode, time_t time, uid_t uid, gid_t gid) { - S3FS_PRN_INFO1("[path=%s][mode=%04o][time=%lld][uid=%u][gid=%u]", path, mode, static_cast(time), (unsigned int)uid, (unsigned int)gid); + S3FS_PRN_INFO1("[path=%s][mode=%04o][time=%lld][uid=%u][gid=%u]", path, mode, static_cast(time), (unsigned int)uid, (unsigned int)gid); - if(!path || '\0' == path[0]){ - return -1; - } - string tpath = path; - if('/' != tpath[tpath.length() - 1]){ - tpath += "/"; - } + if(!path || '\0' == path[0]){ + return -1; + } + string tpath = path; + if('/' != tpath[tpath.length() - 1]){ + tpath += "/"; + } - headers_t meta; - meta["x-amz-meta-uid"] = str(uid); - meta["x-amz-meta-gid"] = str(gid); - meta["x-amz-meta-mode"] = str(mode); - meta["x-amz-meta-ctime"] = str(time); - meta["x-amz-meta-mtime"] = str(time); + headers_t meta; + meta["x-amz-meta-uid"] = str(uid); + meta["x-amz-meta-gid"] = str(gid); + meta["x-amz-meta-mode"] = str(mode); + meta["x-amz-meta-ctime"] = str(time); + meta["x-amz-meta-mtime"] = str(time); - S3fsCurl s3fscurl; - return s3fscurl.PutRequest(tpath.c_str(), meta, -1); // fd=-1 means for creating zero byte object. + S3fsCurl s3fscurl; + return s3fscurl.PutRequest(tpath.c_str(), meta, -1); // fd=-1 means for creating zero byte object. } static int s3fs_mkdir(const char* _path, mode_t mode) { - WTF8_ENCODE(path) - int result; - struct fuse_context* pcxt; + WTF8_ENCODE(path) + int result; + struct fuse_context* pcxt; - S3FS_PRN_INFO("[path=%s][mode=%04o]", path, mode); + S3FS_PRN_INFO("[path=%s][mode=%04o]", path, mode); - if(NULL == (pcxt = fuse_get_context())){ - return -EIO; - } - - // check parent directory attribute. - if(0 != (result = check_parent_object_access(path, W_OK | X_OK))){ - return result; - } - if(-ENOENT != (result = check_object_access(path, F_OK, NULL))){ - if(0 == result){ - result = -EEXIST; + if(NULL == (pcxt = fuse_get_context())){ + return -EIO; } + + // check parent directory attribute. + if(0 != (result = check_parent_object_access(path, W_OK | X_OK))){ + return result; + } + if(-ENOENT != (result = check_object_access(path, F_OK, NULL))){ + if(0 == result){ + result = -EEXIST; + } + return result; + } + + result = create_directory_object(path, mode, time(NULL), pcxt->uid, pcxt->gid); + StatCache::getStatCacheData()->DelStat(path); + S3FS_MALLOCTRIM(0); + return result; - } - - result = create_directory_object(path, mode, time(NULL), pcxt->uid, pcxt->gid); - StatCache::getStatCacheData()->DelStat(path); - S3FS_MALLOCTRIM(0); - - return result; } static int s3fs_unlink(const char* _path) { - WTF8_ENCODE(path) - int result; + WTF8_ENCODE(path) + int result; - S3FS_PRN_INFO("[path=%s]", path); + S3FS_PRN_INFO("[path=%s]", path); + + if(0 != (result = check_parent_object_access(path, W_OK | X_OK))){ + return result; + } + S3fsCurl s3fscurl; + result = s3fscurl.DeleteRequest(path); + FdManager::DeleteCacheFile(path); + StatCache::getStatCacheData()->DelStat(path); + StatCache::getStatCacheData()->DelSymlink(path); + S3FS_MALLOCTRIM(0); - if(0 != (result = check_parent_object_access(path, W_OK | X_OK))){ return result; - } - S3fsCurl s3fscurl; - result = s3fscurl.DeleteRequest(path); - FdManager::DeleteCacheFile(path); - StatCache::getStatCacheData()->DelStat(path); - StatCache::getStatCacheData()->DelSymlink(path); - S3FS_MALLOCTRIM(0); - - return result; } static int directory_empty(const char* path) { - int result; - S3ObjList head; + int result; + S3ObjList head; - if((result = list_bucket(path, head, "/", true)) != 0){ - S3FS_PRN_ERR("list_bucket returns error."); - return result; - } - if(!head.IsEmpty()){ - return -ENOTEMPTY; - } - return 0; + if((result = list_bucket(path, head, "/", true)) != 0){ + S3FS_PRN_ERR("list_bucket returns error."); + return result; + } + if(!head.IsEmpty()){ + return -ENOTEMPTY; + } + return 0; } static int s3fs_rmdir(const char* _path) { - WTF8_ENCODE(path) - int result; - string strpath; - struct stat stbuf; + WTF8_ENCODE(path) + int result; + string strpath; + struct stat stbuf; - S3FS_PRN_INFO("[path=%s]", path); + S3FS_PRN_INFO("[path=%s]", path); - if(0 != (result = check_parent_object_access(path, W_OK | X_OK))){ - return result; - } - - // directory must be empty - if(directory_empty(path) != 0){ - return -ENOTEMPTY; - } - - strpath = path; - if('/' != strpath[strpath.length() - 1]){ - strpath += "/"; - } - S3fsCurl s3fscurl; - result = s3fscurl.DeleteRequest(strpath.c_str()); - s3fscurl.DestroyCurlHandle(); - StatCache::getStatCacheData()->DelStat(strpath.c_str()); - - // double check for old version(before 1.63) - // The old version makes "dir" object, newer version makes "dir/". - // A case, there is only "dir", the first removing object is "dir/". - // Then "dir/" is not exists, but curl_delete returns 0. - // So need to check "dir" and should be removed it. - if('/' == strpath[strpath.length() - 1]){ - strpath = strpath.substr(0, strpath.length() - 1); - } - if(0 == get_object_attribute(strpath.c_str(), &stbuf, NULL, false)){ - if(S_ISDIR(stbuf.st_mode)){ - // Found "dir" object. - result = s3fscurl.DeleteRequest(strpath.c_str()); - s3fscurl.DestroyCurlHandle(); - StatCache::getStatCacheData()->DelStat(strpath.c_str()); + if(0 != (result = check_parent_object_access(path, W_OK | X_OK))){ + return result; } - } - // If there is no "dir" and "dir/" object(this case is made by s3cmd/s3sync), - // the cache key is "dir/". So we get error only once(delete "dir/"). - // check for "_$folder$" object. - // This processing is necessary for other S3 clients compatibility. - if(is_special_name_folder_object(strpath.c_str())){ - strpath += "_$folder$"; - result = s3fscurl.DeleteRequest(strpath.c_str()); - } - S3FS_MALLOCTRIM(0); + // directory must be empty + if(directory_empty(path) != 0){ + return -ENOTEMPTY; + } - return result; + strpath = path; + if('/' != strpath[strpath.length() - 1]){ + strpath += "/"; + } + S3fsCurl s3fscurl; + result = s3fscurl.DeleteRequest(strpath.c_str()); + s3fscurl.DestroyCurlHandle(); + StatCache::getStatCacheData()->DelStat(strpath.c_str()); + + // double check for old version(before 1.63) + // The old version makes "dir" object, newer version makes "dir/". + // A case, there is only "dir", the first removing object is "dir/". + // Then "dir/" is not exists, but curl_delete returns 0. + // So need to check "dir" and should be removed it. + if('/' == strpath[strpath.length() - 1]){ + strpath = strpath.substr(0, strpath.length() - 1); + } + if(0 == get_object_attribute(strpath.c_str(), &stbuf, NULL, false)){ + if(S_ISDIR(stbuf.st_mode)){ + // Found "dir" object. + result = s3fscurl.DeleteRequest(strpath.c_str()); + s3fscurl.DestroyCurlHandle(); + StatCache::getStatCacheData()->DelStat(strpath.c_str()); + } + } + // If there is no "dir" and "dir/" object(this case is made by s3cmd/s3sync), + // the cache key is "dir/". So we get error only once(delete "dir/"). + + // check for "_$folder$" object. + // This processing is necessary for other S3 clients compatibility. + if(is_special_name_folder_object(strpath.c_str())){ + strpath += "_$folder$"; + result = s3fscurl.DeleteRequest(strpath.c_str()); + } + S3FS_MALLOCTRIM(0); + + return result; } static int s3fs_symlink(const char* _from, const char* _to) { - WTF8_ENCODE(from) - WTF8_ENCODE(to) - int result; - struct fuse_context* pcxt; + WTF8_ENCODE(from) + WTF8_ENCODE(to) + int result; + struct fuse_context* pcxt; - S3FS_PRN_INFO("[from=%s][to=%s]", from, to); + S3FS_PRN_INFO("[from=%s][to=%s]", from, to); - if(NULL == (pcxt = fuse_get_context())){ - return -EIO; - } - if(0 != (result = check_parent_object_access(to, W_OK | X_OK))){ - return result; - } - if(-ENOENT != (result = check_object_access(to, F_OK, NULL))){ - if(0 == result){ - result = -EEXIST; + if(NULL == (pcxt = fuse_get_context())){ + return -EIO; + } + if(0 != (result = check_parent_object_access(to, W_OK | X_OK))){ + return result; + } + if(-ENOENT != (result = check_object_access(to, F_OK, NULL))){ + if(0 == result){ + result = -EEXIST; + } + return result; } - return result; - } - time_t now = time(NULL); - headers_t headers; - headers["Content-Type"] = string("application/octet-stream"); // Static - headers["x-amz-meta-mode"] = str(S_IFLNK | S_IRWXU | S_IRWXG | S_IRWXO); - headers["x-amz-meta-ctime"] = str(now); - headers["x-amz-meta-mtime"] = str(now); - headers["x-amz-meta-uid"] = str(pcxt->uid); - headers["x-amz-meta-gid"] = str(pcxt->gid); + time_t now = time(NULL); + headers_t headers; + headers["Content-Type"] = string("application/octet-stream"); // Static + headers["x-amz-meta-mode"] = str(S_IFLNK | S_IRWXU | S_IRWXG | S_IRWXO); + headers["x-amz-meta-ctime"] = str(now); + headers["x-amz-meta-mtime"] = str(now); + headers["x-amz-meta-uid"] = str(pcxt->uid); + headers["x-amz-meta-gid"] = str(pcxt->gid); - // open tmpfile - FdEntity* ent; - if(NULL == (ent = FdManager::get()->Open(to, &headers, 0, -1, true, true))){ - S3FS_PRN_ERR("could not open tmpfile(errno=%d)", errno); - return -errno; - } - // write(without space words) - string strFrom = trim(string(from)); - ssize_t from_size = static_cast(strFrom.length()); - if(from_size != ent->Write(strFrom.c_str(), 0, from_size)){ - S3FS_PRN_ERR("could not write tmpfile(errno=%d)", errno); + // open tmpfile + FdEntity* ent; + if(NULL == (ent = FdManager::get()->Open(to, &headers, 0, -1, true, true))){ + S3FS_PRN_ERR("could not open tmpfile(errno=%d)", errno); + return -errno; + } + // write(without space words) + string strFrom = trim(string(from)); + ssize_t from_size = static_cast(strFrom.length()); + if(from_size != ent->Write(strFrom.c_str(), 0, from_size)){ + S3FS_PRN_ERR("could not write tmpfile(errno=%d)", errno); + FdManager::get()->Close(ent); + return -errno; + } + // upload + if(0 != (result = ent->Flush(true))){ + S3FS_PRN_WARN("could not upload tmpfile(result=%d)", result); + } FdManager::get()->Close(ent); - return -errno; - } - // upload - if(0 != (result = ent->Flush(true))){ - S3FS_PRN_WARN("could not upload tmpfile(result=%d)", result); - } - FdManager::get()->Close(ent); - StatCache::getStatCacheData()->DelStat(to); - if(!StatCache::getStatCacheData()->AddSymlink(string(to), strFrom)){ - S3FS_PRN_ERR("failed to add symbolic link cache for %s", to); - } - S3FS_MALLOCTRIM(0); + StatCache::getStatCacheData()->DelStat(to); + if(!StatCache::getStatCacheData()->AddSymlink(string(to), strFrom)){ + S3FS_PRN_ERR("failed to add symbolic link cache for %s", to); + } + S3FS_MALLOCTRIM(0); - return result; + return result; } static int rename_object(const char* from, const char* to) { - int result; - string s3_realpath; - headers_t meta; + int result; + string s3_realpath; + headers_t meta; - S3FS_PRN_INFO1("[from=%s][to=%s]", from , to); + S3FS_PRN_INFO1("[from=%s][to=%s]", from , to); + + if(0 != (result = check_parent_object_access(to, W_OK | X_OK))){ + // not permit writing "to" object parent dir. + return result; + } + if(0 != (result = check_parent_object_access(from, W_OK | X_OK))){ + // not permit removing "from" object parent dir. + return result; + } + if(0 != (result = get_object_attribute(from, NULL, &meta))){ + return result; + } + s3_realpath = get_realpath(from); + + meta["x-amz-copy-source"] = urlEncode(service_path + bucket + s3_realpath); + meta["Content-Type"] = S3fsCurl::LookupMimeType(string(to)); + meta["x-amz-metadata-directive"] = "REPLACE"; + + if(0 != (result = put_headers(to, meta, true))){ + return result; + } + + FdManager::get()->Rename(from, to); + + // Remove file + result = s3fs_unlink(from); + + StatCache::getStatCacheData()->DelStat(to); + FdManager::DeleteCacheFile(to); - if(0 != (result = check_parent_object_access(to, W_OK | X_OK))){ - // not permit writing "to" object parent dir. return result; - } - if(0 != (result = check_parent_object_access(from, W_OK | X_OK))){ - // not permit removing "from" object parent dir. - return result; - } - if(0 != (result = get_object_attribute(from, NULL, &meta))){ - return result; - } - s3_realpath = get_realpath(from); - - meta["x-amz-copy-source"] = urlEncode(service_path + bucket + s3_realpath); - meta["Content-Type"] = S3fsCurl::LookupMimeType(string(to)); - meta["x-amz-metadata-directive"] = "REPLACE"; - - if(0 != (result = put_headers(to, meta, true))){ - return result; - } - - FdManager::get()->Rename(from, to); - - // Remove file - result = s3fs_unlink(from); - - StatCache::getStatCacheData()->DelStat(to); - FdManager::DeleteCacheFile(to); - - return result; } static int rename_object_nocopy(const char* from, const char* to) { - int result; + int result; - S3FS_PRN_INFO1("[from=%s][to=%s]", from , to); + S3FS_PRN_INFO1("[from=%s][to=%s]", from , to); - if(0 != (result = check_parent_object_access(to, W_OK | X_OK))){ - // not permit writing "to" object parent dir. - return result; - } - if(0 != (result = check_parent_object_access(from, W_OK | X_OK))){ - // not permit removing "from" object parent dir. - return result; - } + if(0 != (result = check_parent_object_access(to, W_OK | X_OK))){ + // not permit writing "to" object parent dir. + return result; + } + if(0 != (result = check_parent_object_access(from, W_OK | X_OK))){ + // not permit removing "from" object parent dir. + return result; + } - // open & load - FdEntity* ent; - if(NULL == (ent = get_local_fent(from, true))){ - S3FS_PRN_ERR("could not open and read file(%s)", from); - return -EIO; - } + // open & load + FdEntity* ent; + if(NULL == (ent = get_local_fent(from, true))){ + S3FS_PRN_ERR("could not open and read file(%s)", from); + return -EIO; + } - // Set header - if(!ent->SetContentType(to)){ - S3FS_PRN_ERR("could not set content-type for %s", to); - return -EIO; - } + // Set header + if(!ent->SetContentType(to)){ + S3FS_PRN_ERR("could not set content-type for %s", to); + return -EIO; + } - // upload - if(0 != (result = ent->RowFlush(to, true))){ - S3FS_PRN_ERR("could not upload file(%s): result=%d", to, result); + // upload + if(0 != (result = ent->RowFlush(to, true))){ + S3FS_PRN_ERR("could not upload file(%s): result=%d", to, result); + FdManager::get()->Close(ent); + return result; + } + + FdManager::get()->Rename(from, to); FdManager::get()->Close(ent); + + // Remove file + result = s3fs_unlink(from); + + // Stats + StatCache::getStatCacheData()->DelStat(to); + FdManager::DeleteCacheFile(to); + return result; - } - - FdManager::get()->Rename(from, to); - FdManager::get()->Close(ent); - - // Remove file - result = s3fs_unlink(from); - - // Stats - StatCache::getStatCacheData()->DelStat(to); - FdManager::DeleteCacheFile(to); - - return result; } static int rename_large_object(const char* from, const char* to) { - int result; - struct stat buf; - headers_t meta; + int result; + struct stat buf; + headers_t meta; - S3FS_PRN_INFO1("[from=%s][to=%s]", from , to); + S3FS_PRN_INFO1("[from=%s][to=%s]", from , to); + + if(0 != (result = check_parent_object_access(to, W_OK | X_OK))){ + // not permit writing "to" object parent dir. + return result; + } + if(0 != (result = check_parent_object_access(from, W_OK | X_OK))){ + // not permit removing "from" object parent dir. + return result; + } + if(0 != (result = get_object_attribute(from, &buf, &meta, false))){ + return result; + } + + S3fsCurl s3fscurl(true); + if(0 != (result = s3fscurl.MultipartRenameRequest(from, to, meta, buf.st_size))){ + return result; + } + s3fscurl.DestroyCurlHandle(); + + // Remove file + result = s3fs_unlink(from); + + StatCache::getStatCacheData()->DelStat(to); + FdManager::DeleteCacheFile(to); - if(0 != (result = check_parent_object_access(to, W_OK | X_OK))){ - // not permit writing "to" object parent dir. return result; - } - if(0 != (result = check_parent_object_access(from, W_OK | X_OK))){ - // not permit removing "from" object parent dir. - return result; - } - if(0 != (result = get_object_attribute(from, &buf, &meta, false))){ - return result; - } - - S3fsCurl s3fscurl(true); - if(0 != (result = s3fscurl.MultipartRenameRequest(from, to, meta, buf.st_size))){ - return result; - } - s3fscurl.DestroyCurlHandle(); - - // Remove file - result = s3fs_unlink(from); - - StatCache::getStatCacheData()->DelStat(to); - FdManager::DeleteCacheFile(to); - - return result; } static int clone_directory_object(const char* from, const char* to) { - int result = -1; - struct stat stbuf; + int result = -1; + struct stat stbuf; - S3FS_PRN_INFO1("[from=%s][to=%s]", from, to); + S3FS_PRN_INFO1("[from=%s][to=%s]", from, to); + + // get target's attributes + if(0 != (result = get_object_attribute(from, &stbuf))){ + return result; + } + result = create_directory_object(to, stbuf.st_mode, stbuf.st_mtime, stbuf.st_uid, stbuf.st_gid); + StatCache::getStatCacheData()->DelStat(to); - // get target's attributes - if(0 != (result = get_object_attribute(from, &stbuf))){ return result; - } - result = create_directory_object(to, stbuf.st_mode, stbuf.st_mtime, stbuf.st_uid, stbuf.st_gid); - StatCache::getStatCacheData()->DelStat(to); - - return result; } static int rename_directory(const char* from, const char* to) { - S3ObjList head; - s3obj_list_t headlist; - string strfrom = from ? from : ""; // from is without "/". - string strto = to ? to : ""; // to is without "/" too. - string basepath = strfrom + "/"; - string newpath; // should be from name(not used) - string nowcache; // now cache path(not used) - dirtype DirType; - bool normdir; - MVNODE* mn_head = NULL; - MVNODE* mn_tail = NULL; - MVNODE* mn_cur; - struct stat stbuf; - int result; - bool is_dir; + S3ObjList head; + s3obj_list_t headlist; + string strfrom = from ? from : ""; // from is without "/". + string strto = to ? to : ""; // to is without "/" too. + string basepath = strfrom + "/"; + string newpath; // should be from name(not used) + string nowcache; // now cache path(not used) + dirtype DirType; + bool normdir; + MVNODE* mn_head = NULL; + MVNODE* mn_tail = NULL; + MVNODE* mn_cur; + struct stat stbuf; + int result; + bool is_dir; - S3FS_PRN_INFO1("[from=%s][to=%s]", from, to); + S3FS_PRN_INFO1("[from=%s][to=%s]", from, to); - // - // Initiate and Add base directory into MVNODE struct. - // - strto += "/"; - if(0 == chk_dir_object_type(from, newpath, strfrom, nowcache, NULL, &DirType) && DIRTYPE_UNKNOWN != DirType){ - if(DIRTYPE_NOOBJ != DirType){ - normdir = false; - }else{ - normdir = true; - strfrom = from; // from directory is not removed, but from directory attr is needed. - } - if(NULL == (add_mvnode(&mn_head, &mn_tail, strfrom.c_str(), strto.c_str(), true, normdir))){ - return -ENOMEM; - } - }else{ - // Something wrong about "from" directory. - } - - // - // get a list of all the objects - // - // No delimiter is specified, the result(head) is all object keys. - // (CommonPrefixes is empty, but all object is listed in Key.) - if(0 != (result = list_bucket(basepath.c_str(), head, NULL))){ - S3FS_PRN_ERR("list_bucket returns error."); - return result; - } - head.GetNameList(headlist); // get name without "/". - S3ObjList::MakeHierarchizedList(headlist, false); // add hierarchized dir. - - s3obj_list_t::const_iterator liter; - for(liter = headlist.begin(); headlist.end() != liter; ++liter){ - // make "from" and "to" object name. - string from_name = basepath + (*liter); - string to_name = strto + (*liter); - string etag = head.GetETag((*liter).c_str()); - - // Check subdirectory. - StatCache::getStatCacheData()->HasStat(from_name, etag.c_str()); // Check ETag - if(0 != get_object_attribute(from_name.c_str(), &stbuf, NULL)){ - S3FS_PRN_WARN("failed to get %s object attribute.", from_name.c_str()); - continue; - } - if(S_ISDIR(stbuf.st_mode)){ - is_dir = true; - if(0 != chk_dir_object_type(from_name.c_str(), newpath, from_name, nowcache, NULL, &DirType) || DIRTYPE_UNKNOWN == DirType){ - S3FS_PRN_WARN("failed to get %s%s object directory type.", basepath.c_str(), (*liter).c_str()); - continue; - } - if(DIRTYPE_NOOBJ != DirType){ - normdir = false; - }else{ - normdir = true; - from_name = basepath + (*liter); // from directory is not removed, but from directory attr is needed. - } - }else{ - is_dir = false; - normdir = false; - } - - // push this one onto the stack - if(NULL == add_mvnode(&mn_head, &mn_tail, from_name.c_str(), to_name.c_str(), is_dir, normdir)){ - return -ENOMEM; - } - } - - // - // rename - // - // rename directory objects. - for(mn_cur = mn_head; mn_cur; mn_cur = mn_cur->next){ - if(mn_cur->is_dir && mn_cur->old_path && '\0' != mn_cur->old_path[0]){ - if(0 != (result = clone_directory_object(mn_cur->old_path, mn_cur->new_path))){ - S3FS_PRN_ERR("clone_directory_object returned an error(%d)", result); - free_mvnodes(mn_head); - return -EIO; - } - } - } - - // iterate over the list - copy the files with rename_object - // does a safe copy - copies first and then deletes old - for(mn_cur = mn_head; mn_cur; mn_cur = mn_cur->next){ - if(!mn_cur->is_dir){ - // TODO: call s3fs_rename instead? - if(!nocopyapi && !norenameapi){ - result = rename_object(mn_cur->old_path, mn_cur->new_path); - }else{ - result = rename_object_nocopy(mn_cur->old_path, mn_cur->new_path); - } - if(0 != result){ - S3FS_PRN_ERR("rename_object returned an error(%d)", result); - free_mvnodes(mn_head); - return -EIO; - } - } - } - - // Iterate over old the directories, bottoms up and remove - for(mn_cur = mn_tail; mn_cur; mn_cur = mn_cur->prev){ - if(mn_cur->is_dir && mn_cur->old_path && '\0' != mn_cur->old_path[0]){ - if(!(mn_cur->is_normdir)){ - if(0 != (result = s3fs_rmdir(mn_cur->old_path))){ - S3FS_PRN_ERR("s3fs_rmdir returned an error(%d)", result); - free_mvnodes(mn_head); - return -EIO; + // + // Initiate and Add base directory into MVNODE struct. + // + strto += "/"; + if(0 == chk_dir_object_type(from, newpath, strfrom, nowcache, NULL, &DirType) && DIRTYPE_UNKNOWN != DirType){ + if(DIRTYPE_NOOBJ != DirType){ + normdir = false; + }else{ + normdir = true; + strfrom = from; // from directory is not removed, but from directory attr is needed. } - }else{ - // cache clear. - StatCache::getStatCacheData()->DelStat(mn_cur->old_path); - } + if(NULL == (add_mvnode(&mn_head, &mn_tail, strfrom.c_str(), strto.c_str(), true, normdir))){ + return -ENOMEM; + } + }else{ + // Something wrong about "from" directory. } - } - free_mvnodes(mn_head); - return 0; + // + // get a list of all the objects + // + // No delimiter is specified, the result(head) is all object keys. + // (CommonPrefixes is empty, but all object is listed in Key.) + if(0 != (result = list_bucket(basepath.c_str(), head, NULL))){ + S3FS_PRN_ERR("list_bucket returns error."); + return result; + } + head.GetNameList(headlist); // get name without "/". + S3ObjList::MakeHierarchizedList(headlist, false); // add hierarchized dir. + + s3obj_list_t::const_iterator liter; + for(liter = headlist.begin(); headlist.end() != liter; ++liter){ + // make "from" and "to" object name. + string from_name = basepath + (*liter); + string to_name = strto + (*liter); + string etag = head.GetETag((*liter).c_str()); + + // Check subdirectory. + StatCache::getStatCacheData()->HasStat(from_name, etag.c_str()); // Check ETag + if(0 != get_object_attribute(from_name.c_str(), &stbuf, NULL)){ + S3FS_PRN_WARN("failed to get %s object attribute.", from_name.c_str()); + continue; + } + if(S_ISDIR(stbuf.st_mode)){ + is_dir = true; + if(0 != chk_dir_object_type(from_name.c_str(), newpath, from_name, nowcache, NULL, &DirType) || DIRTYPE_UNKNOWN == DirType){ + S3FS_PRN_WARN("failed to get %s%s object directory type.", basepath.c_str(), (*liter).c_str()); + continue; + } + if(DIRTYPE_NOOBJ != DirType){ + normdir = false; + }else{ + normdir = true; + from_name = basepath + (*liter); // from directory is not removed, but from directory attr is needed. + } + }else{ + is_dir = false; + normdir = false; + } + + // push this one onto the stack + if(NULL == add_mvnode(&mn_head, &mn_tail, from_name.c_str(), to_name.c_str(), is_dir, normdir)){ + return -ENOMEM; + } + } + + // + // rename + // + // rename directory objects. + for(mn_cur = mn_head; mn_cur; mn_cur = mn_cur->next){ + if(mn_cur->is_dir && mn_cur->old_path && '\0' != mn_cur->old_path[0]){ + if(0 != (result = clone_directory_object(mn_cur->old_path, mn_cur->new_path))){ + S3FS_PRN_ERR("clone_directory_object returned an error(%d)", result); + free_mvnodes(mn_head); + return -EIO; + } + } + } + + // iterate over the list - copy the files with rename_object + // does a safe copy - copies first and then deletes old + for(mn_cur = mn_head; mn_cur; mn_cur = mn_cur->next){ + if(!mn_cur->is_dir){ + // TODO: call s3fs_rename instead? + if(!nocopyapi && !norenameapi){ + result = rename_object(mn_cur->old_path, mn_cur->new_path); + }else{ + result = rename_object_nocopy(mn_cur->old_path, mn_cur->new_path); + } + if(0 != result){ + S3FS_PRN_ERR("rename_object returned an error(%d)", result); + free_mvnodes(mn_head); + return -EIO; + } + } + } + + // Iterate over old the directories, bottoms up and remove + for(mn_cur = mn_tail; mn_cur; mn_cur = mn_cur->prev){ + if(mn_cur->is_dir && mn_cur->old_path && '\0' != mn_cur->old_path[0]){ + if(!(mn_cur->is_normdir)){ + if(0 != (result = s3fs_rmdir(mn_cur->old_path))){ + S3FS_PRN_ERR("s3fs_rmdir returned an error(%d)", result); + free_mvnodes(mn_head); + return -EIO; + } + }else{ + // cache clear. + StatCache::getStatCacheData()->DelStat(mn_cur->old_path); + } + } + } + free_mvnodes(mn_head); + + return 0; } static int s3fs_rename(const char* _from, const char* _to) { - WTF8_ENCODE(from) - WTF8_ENCODE(to) - struct stat buf; - int result; + WTF8_ENCODE(from) + WTF8_ENCODE(to) + struct stat buf; + int result; - S3FS_PRN_INFO("[from=%s][to=%s]", from, to); + S3FS_PRN_INFO("[from=%s][to=%s]", from, to); - if(0 != (result = check_parent_object_access(to, W_OK | X_OK))){ - // not permit writing "to" object parent dir. - return result; - } - if(0 != (result = check_parent_object_access(from, W_OK | X_OK))){ - // not permit removing "from" object parent dir. - return result; - } - if(0 != (result = get_object_attribute(from, &buf, NULL))){ - return result; - } - - // flush pending writes if file is open - FdEntity *entity = FdManager::get()->ExistOpen(from); - if(entity != NULL){ - if(0 != (result = entity->Flush(true))){ - S3FS_PRN_ERR("could not upload file(%s): result=%d", to, result); - return result; + if(0 != (result = check_parent_object_access(to, W_OK | X_OK))){ + // not permit writing "to" object parent dir. + return result; + } + if(0 != (result = check_parent_object_access(from, W_OK | X_OK))){ + // not permit removing "from" object parent dir. + return result; + } + if(0 != (result = get_object_attribute(from, &buf, NULL))){ + return result; } - StatCache::getStatCacheData()->DelStat(from); - FdManager::get()->Close(entity); - entity = NULL; - } - // files larger than 5GB must be modified via the multipart interface - if(S_ISDIR(buf.st_mode)){ - result = rename_directory(from, to); - }else if(!nomultipart && buf.st_size >= singlepart_copy_limit){ - result = rename_large_object(from, to); - }else{ - if(!nocopyapi && !norenameapi){ - result = rename_object(from, to); + // flush pending writes if file is open + FdEntity *entity = FdManager::get()->ExistOpen(from); + if(entity != NULL){ + if(0 != (result = entity->Flush(true))){ + S3FS_PRN_ERR("could not upload file(%s): result=%d", to, result); + return result; + } + StatCache::getStatCacheData()->DelStat(from); + FdManager::get()->Close(entity); + entity = NULL; + } + + // files larger than 5GB must be modified via the multipart interface + if(S_ISDIR(buf.st_mode)){ + result = rename_directory(from, to); + }else if(!nomultipart && buf.st_size >= singlepart_copy_limit){ + result = rename_large_object(from, to); }else{ - result = rename_object_nocopy(from, to); + if(!nocopyapi && !norenameapi){ + result = rename_object(from, to); + }else{ + result = rename_object_nocopy(from, to); + } } - } - S3FS_MALLOCTRIM(0); + S3FS_MALLOCTRIM(0); - return result; + return result; } static int s3fs_link(const char* _from, const char* _to) { - WTF8_ENCODE(from) - WTF8_ENCODE(to) - S3FS_PRN_INFO("[from=%s][to=%s]", from, to); - return -ENOTSUP; + WTF8_ENCODE(from) + WTF8_ENCODE(to) + S3FS_PRN_INFO("[from=%s][to=%s]", from, to); + return -ENOTSUP; } static int s3fs_chmod(const char* _path, mode_t mode) { - WTF8_ENCODE(path) - int result; - string strpath; - string newpath; - string nowcache; - headers_t meta; - struct stat stbuf; - dirtype nDirType = DIRTYPE_UNKNOWN; + WTF8_ENCODE(path) + int result; + string strpath; + string newpath; + string nowcache; + headers_t meta; + struct stat stbuf; + dirtype nDirType = DIRTYPE_UNKNOWN; - S3FS_PRN_INFO("[path=%s][mode=%04o]", path, mode); + S3FS_PRN_INFO("[path=%s][mode=%04o]", path, mode); - if(0 == strcmp(path, "/")){ - S3FS_PRN_ERR("Could not change mode for mount point."); - return -EIO; - } - if(0 != (result = check_parent_object_access(path, X_OK))){ - return result; - } - if(0 != (result = check_object_owner(path, &stbuf))){ - return result; - } - - if(S_ISDIR(stbuf.st_mode)){ - result = chk_dir_object_type(path, newpath, strpath, nowcache, &meta, &nDirType); - }else{ - strpath = path; - nowcache = strpath; - result = get_object_attribute(strpath.c_str(), NULL, &meta); - } - if(0 != result){ - return result; - } - - if(S_ISDIR(stbuf.st_mode) && IS_REPLACEDIR(nDirType)){ - // Should rebuild directory object(except new type) - // Need to remove old dir("dir" etc) and make new dir("dir/") - - // At first, remove directory old object - if(0 != (result = remove_old_type_dir(strpath, nDirType))){ - return result; + if(0 == strcmp(path, "/")){ + S3FS_PRN_ERR("Could not change mode for mount point."); + return -EIO; } - StatCache::getStatCacheData()->DelStat(nowcache); - - // Make new directory object("dir/") - if(0 != (result = create_directory_object(newpath.c_str(), mode, stbuf.st_mtime, stbuf.st_uid, stbuf.st_gid))){ - return result; + if(0 != (result = check_parent_object_access(path, X_OK))){ + return result; + } + if(0 != (result = check_object_owner(path, &stbuf))){ + return result; } - }else{ - // normal object or directory object of newer version - headers_t updatemeta; - updatemeta["x-amz-meta-ctime"] = str(time(NULL)); - updatemeta["x-amz-meta-mode"] = str(mode); - updatemeta["x-amz-copy-source"] = urlEncode(service_path + bucket + get_realpath(strpath.c_str())); - updatemeta["x-amz-metadata-directive"] = "REPLACE"; - // check opened file handle. - // - // If the file starts uploading by multipart when the disk capacity is insufficient, - // we need to put these header after finishing upload. - // Or if the file is only open, we must update to FdEntity's internal meta. - // - FdEntity* ent; - if(NULL != (ent = FdManager::get()->ExistOpen(path, -1, true))){ - // the file is opened now. - if(ent->MergeOrgMeta(updatemeta)){ - // now uploading - // the meta is pending and accumulated to be put after the upload is complete. - S3FS_PRN_INFO("meta pending until upload is complete"); - }else{ - // allow to put header - // updatemeta already merged the orgmeta of the opened files. - if(0 != put_headers(strpath.c_str(), updatemeta, true)){ - return -EIO; + if(S_ISDIR(stbuf.st_mode)){ + result = chk_dir_object_type(path, newpath, strpath, nowcache, &meta, &nDirType); + }else{ + strpath = path; + nowcache = strpath; + result = get_object_attribute(strpath.c_str(), NULL, &meta); + } + if(0 != result){ + return result; + } + + if(S_ISDIR(stbuf.st_mode) && IS_REPLACEDIR(nDirType)){ + // Should rebuild directory object(except new type) + // Need to remove old dir("dir" etc) and make new dir("dir/") + + // At first, remove directory old object + if(0 != (result = remove_old_type_dir(strpath, nDirType))){ + return result; } StatCache::getStatCacheData()->DelStat(nowcache); - } + // Make new directory object("dir/") + if(0 != (result = create_directory_object(newpath.c_str(), mode, stbuf.st_mtime, stbuf.st_uid, stbuf.st_gid))){ + return result; + } }else{ - // not opened file, then put headers - merge_headers(meta, updatemeta, true); - if(0 != put_headers(strpath.c_str(), meta, true)){ - return -EIO; - } - StatCache::getStatCacheData()->DelStat(nowcache); - } - } - S3FS_MALLOCTRIM(0); + // normal object or directory object of newer version + headers_t updatemeta; + updatemeta["x-amz-meta-ctime"] = str(time(NULL)); + updatemeta["x-amz-meta-mode"] = str(mode); + updatemeta["x-amz-copy-source"] = urlEncode(service_path + bucket + get_realpath(strpath.c_str())); + updatemeta["x-amz-metadata-directive"] = "REPLACE"; - return 0; + // check opened file handle. + // + // If the file starts uploading by multipart when the disk capacity is insufficient, + // we need to put these header after finishing upload. + // Or if the file is only open, we must update to FdEntity's internal meta. + // + FdEntity* ent; + if(NULL != (ent = FdManager::get()->ExistOpen(path, -1, true))){ + // the file is opened now. + if(ent->MergeOrgMeta(updatemeta)){ + // now uploading + // the meta is pending and accumulated to be put after the upload is complete. + S3FS_PRN_INFO("meta pending until upload is complete"); + }else{ + // allow to put header + // updatemeta already merged the orgmeta of the opened files. + if(0 != put_headers(strpath.c_str(), updatemeta, true)){ + return -EIO; + } + StatCache::getStatCacheData()->DelStat(nowcache); + } + }else{ + // not opened file, then put headers + merge_headers(meta, updatemeta, true); + if(0 != put_headers(strpath.c_str(), meta, true)){ + return -EIO; + } + StatCache::getStatCacheData()->DelStat(nowcache); + } + } + S3FS_MALLOCTRIM(0); + + return 0; } static int s3fs_chmod_nocopy(const char* _path, mode_t mode) { - WTF8_ENCODE(path) - int result; - string strpath; - string newpath; - string nowcache; - struct stat stbuf; - dirtype nDirType = DIRTYPE_UNKNOWN; + WTF8_ENCODE(path) + int result; + string strpath; + string newpath; + string nowcache; + struct stat stbuf; + dirtype nDirType = DIRTYPE_UNKNOWN; - S3FS_PRN_INFO1("[path=%s][mode=%04o]", path, mode); + S3FS_PRN_INFO1("[path=%s][mode=%04o]", path, mode); - if(0 == strcmp(path, "/")){ - S3FS_PRN_ERR("Could not change mode for mount point."); - return -EIO; - } - if(0 != (result = check_parent_object_access(path, X_OK))){ + if(0 == strcmp(path, "/")){ + S3FS_PRN_ERR("Could not change mode for mount point."); + return -EIO; + } + if(0 != (result = check_parent_object_access(path, X_OK))){ + return result; + } + if(0 != (result = check_object_owner(path, &stbuf))){ + return result; + } + + // Get attributes + if(S_ISDIR(stbuf.st_mode)){ + result = chk_dir_object_type(path, newpath, strpath, nowcache, NULL, &nDirType); + }else{ + strpath = path; + nowcache = strpath; + result = get_object_attribute(strpath.c_str(), NULL, NULL); + } + if(0 != result){ + return result; + } + + if(S_ISDIR(stbuf.st_mode)){ + // Should rebuild all directory object + // Need to remove old dir("dir" etc) and make new dir("dir/") + + // At first, remove directory old object + if(0 != (result = remove_old_type_dir(strpath, nDirType))){ + return result; + } + StatCache::getStatCacheData()->DelStat(nowcache); + + // Make new directory object("dir/") + if(0 != (result = create_directory_object(newpath.c_str(), mode, stbuf.st_mtime, stbuf.st_uid, stbuf.st_gid))){ + return result; + } + }else{ + // normal object or directory object of newer version + + // open & load + FdEntity* ent; + if(NULL == (ent = get_local_fent(strpath.c_str(), true))){ + S3FS_PRN_ERR("could not open and read file(%s)", strpath.c_str()); + return -EIO; + } + + ent->SetCtime(time(NULL)); + + // Change file mode + ent->SetMode(mode); + + // upload + if(0 != (result = ent->Flush(true))){ + S3FS_PRN_ERR("could not upload file(%s): result=%d", strpath.c_str(), result); + FdManager::get()->Close(ent); + return result; + } + FdManager::get()->Close(ent); + + StatCache::getStatCacheData()->DelStat(nowcache); + } + S3FS_MALLOCTRIM(0); + return result; - } - if(0 != (result = check_object_owner(path, &stbuf))){ - return result; - } - - // Get attributes - if(S_ISDIR(stbuf.st_mode)){ - result = chk_dir_object_type(path, newpath, strpath, nowcache, NULL, &nDirType); - }else{ - strpath = path; - nowcache = strpath; - result = get_object_attribute(strpath.c_str(), NULL, NULL); - } - if(0 != result){ - return result; - } - - if(S_ISDIR(stbuf.st_mode)){ - // Should rebuild all directory object - // Need to remove old dir("dir" etc) and make new dir("dir/") - - // At first, remove directory old object - if(0 != (result = remove_old_type_dir(strpath, nDirType))){ - return result; - } - StatCache::getStatCacheData()->DelStat(nowcache); - - // Make new directory object("dir/") - if(0 != (result = create_directory_object(newpath.c_str(), mode, stbuf.st_mtime, stbuf.st_uid, stbuf.st_gid))){ - return result; - } - }else{ - // normal object or directory object of newer version - - // open & load - FdEntity* ent; - if(NULL == (ent = get_local_fent(strpath.c_str(), true))){ - S3FS_PRN_ERR("could not open and read file(%s)", strpath.c_str()); - return -EIO; - } - - ent->SetCtime(time(NULL)); - - // Change file mode - ent->SetMode(mode); - - // upload - if(0 != (result = ent->Flush(true))){ - S3FS_PRN_ERR("could not upload file(%s): result=%d", strpath.c_str(), result); - FdManager::get()->Close(ent); - return result; - } - FdManager::get()->Close(ent); - - StatCache::getStatCacheData()->DelStat(nowcache); - } - S3FS_MALLOCTRIM(0); - - return result; } static int s3fs_chown(const char* _path, uid_t uid, gid_t gid) { - WTF8_ENCODE(path) - int result; - string strpath; - string newpath; - string nowcache; - headers_t meta; - struct stat stbuf; - dirtype nDirType = DIRTYPE_UNKNOWN; + WTF8_ENCODE(path) + int result; + string strpath; + string newpath; + string nowcache; + headers_t meta; + struct stat stbuf; + dirtype nDirType = DIRTYPE_UNKNOWN; - S3FS_PRN_INFO("[path=%s][uid=%u][gid=%u]", path, (unsigned int)uid, (unsigned int)gid); + S3FS_PRN_INFO("[path=%s][uid=%u][gid=%u]", path, (unsigned int)uid, (unsigned int)gid); - if(0 == strcmp(path, "/")){ - S3FS_PRN_ERR("Could not change owner for mount point."); - return -EIO; - } - if(0 != (result = check_parent_object_access(path, X_OK))){ - return result; - } - if(0 != (result = check_object_owner(path, &stbuf))){ - return result; - } - - if((uid_t)(-1) == uid){ - uid = stbuf.st_uid; - } - if((gid_t)(-1) == gid){ - gid = stbuf.st_gid; - } - if(S_ISDIR(stbuf.st_mode)){ - result = chk_dir_object_type(path, newpath, strpath, nowcache, &meta, &nDirType); - }else{ - strpath = path; - nowcache = strpath; - result = get_object_attribute(strpath.c_str(), NULL, &meta); - } - if(0 != result){ - return result; - } - - if(S_ISDIR(stbuf.st_mode) && IS_REPLACEDIR(nDirType)){ - // Should rebuild directory object(except new type) - // Need to remove old dir("dir" etc) and make new dir("dir/") - - // At first, remove directory old object - if(0 != (result = remove_old_type_dir(strpath, nDirType))){ - return result; + if(0 == strcmp(path, "/")){ + S3FS_PRN_ERR("Could not change owner for mount point."); + return -EIO; } - StatCache::getStatCacheData()->DelStat(nowcache); - - // Make new directory object("dir/") - if(0 != (result = create_directory_object(newpath.c_str(), stbuf.st_mode, stbuf.st_mtime, uid, gid))){ - return result; + if(0 != (result = check_parent_object_access(path, X_OK))){ + return result; + } + if(0 != (result = check_object_owner(path, &stbuf))){ + return result; } - }else{ - headers_t updatemeta; - updatemeta["x-amz-meta-ctime"] = str(time(NULL)); - updatemeta["x-amz-meta-uid"] = str(uid); - updatemeta["x-amz-meta-gid"] = str(gid); - updatemeta["x-amz-copy-source"] = urlEncode(service_path + bucket + get_realpath(strpath.c_str())); - updatemeta["x-amz-metadata-directive"] = "REPLACE"; - // check opened file handle. - // - // If the file starts uploading by multipart when the disk capacity is insufficient, - // we need to put these header after finishing upload. - // Or if the file is only open, we must update to FdEntity's internal meta. - // - FdEntity* ent; - if(NULL != (ent = FdManager::get()->ExistOpen(path, -1, true))){ - // the file is opened now. - if(ent->MergeOrgMeta(updatemeta)){ - // now uploading - // the meta is pending and accumulated to be put after the upload is complete. - S3FS_PRN_INFO("meta pending until upload is complete"); - }else{ - // allow to put header - // updatemeta already merged the orgmeta of the opened files. - if(0 != put_headers(strpath.c_str(), updatemeta, true)){ - return -EIO; + if((uid_t)(-1) == uid){ + uid = stbuf.st_uid; + } + if((gid_t)(-1) == gid){ + gid = stbuf.st_gid; + } + if(S_ISDIR(stbuf.st_mode)){ + result = chk_dir_object_type(path, newpath, strpath, nowcache, &meta, &nDirType); + }else{ + strpath = path; + nowcache = strpath; + result = get_object_attribute(strpath.c_str(), NULL, &meta); + } + if(0 != result){ + return result; + } + + if(S_ISDIR(stbuf.st_mode) && IS_REPLACEDIR(nDirType)){ + // Should rebuild directory object(except new type) + // Need to remove old dir("dir" etc) and make new dir("dir/") + + // At first, remove directory old object + if(0 != (result = remove_old_type_dir(strpath, nDirType))){ + return result; } StatCache::getStatCacheData()->DelStat(nowcache); - } + // Make new directory object("dir/") + if(0 != (result = create_directory_object(newpath.c_str(), stbuf.st_mode, stbuf.st_mtime, uid, gid))){ + return result; + } }else{ - // not opened file, then put headers - merge_headers(meta, updatemeta, true); - if(0 != put_headers(strpath.c_str(), meta, true)){ - return -EIO; - } - StatCache::getStatCacheData()->DelStat(nowcache); - } - } - S3FS_MALLOCTRIM(0); + headers_t updatemeta; + updatemeta["x-amz-meta-ctime"] = str(time(NULL)); + updatemeta["x-amz-meta-uid"] = str(uid); + updatemeta["x-amz-meta-gid"] = str(gid); + updatemeta["x-amz-copy-source"] = urlEncode(service_path + bucket + get_realpath(strpath.c_str())); + updatemeta["x-amz-metadata-directive"] = "REPLACE"; - return 0; + // check opened file handle. + // + // If the file starts uploading by multipart when the disk capacity is insufficient, + // we need to put these header after finishing upload. + // Or if the file is only open, we must update to FdEntity's internal meta. + // + FdEntity* ent; + if(NULL != (ent = FdManager::get()->ExistOpen(path, -1, true))){ + // the file is opened now. + if(ent->MergeOrgMeta(updatemeta)){ + // now uploading + // the meta is pending and accumulated to be put after the upload is complete. + S3FS_PRN_INFO("meta pending until upload is complete"); + }else{ + // allow to put header + // updatemeta already merged the orgmeta of the opened files. + if(0 != put_headers(strpath.c_str(), updatemeta, true)){ + return -EIO; + } + StatCache::getStatCacheData()->DelStat(nowcache); + } + }else{ + // not opened file, then put headers + merge_headers(meta, updatemeta, true); + if(0 != put_headers(strpath.c_str(), meta, true)){ + return -EIO; + } + StatCache::getStatCacheData()->DelStat(nowcache); + } + } + S3FS_MALLOCTRIM(0); + + return 0; } static int s3fs_chown_nocopy(const char* _path, uid_t uid, gid_t gid) { - WTF8_ENCODE(path) - int result; - string strpath; - string newpath; - string nowcache; - struct stat stbuf; - dirtype nDirType = DIRTYPE_UNKNOWN; + WTF8_ENCODE(path) + int result; + string strpath; + string newpath; + string nowcache; + struct stat stbuf; + dirtype nDirType = DIRTYPE_UNKNOWN; - S3FS_PRN_INFO1("[path=%s][uid=%u][gid=%u]", path, (unsigned int)uid, (unsigned int)gid); + S3FS_PRN_INFO1("[path=%s][uid=%u][gid=%u]", path, (unsigned int)uid, (unsigned int)gid); - if(0 == strcmp(path, "/")){ - S3FS_PRN_ERR("Could not change owner for mount point."); - return -EIO; - } - if(0 != (result = check_parent_object_access(path, X_OK))){ + if(0 == strcmp(path, "/")){ + S3FS_PRN_ERR("Could not change owner for mount point."); + return -EIO; + } + if(0 != (result = check_parent_object_access(path, X_OK))){ + return result; + } + if(0 != (result = check_object_owner(path, &stbuf))){ + return result; + } + + if((uid_t)(-1) == uid){ + uid = stbuf.st_uid; + } + if((gid_t)(-1) == gid){ + gid = stbuf.st_gid; + } + + // Get attributes + if(S_ISDIR(stbuf.st_mode)){ + result = chk_dir_object_type(path, newpath, strpath, nowcache, NULL, &nDirType); + }else{ + strpath = path; + nowcache = strpath; + result = get_object_attribute(strpath.c_str(), NULL, NULL); + } + if(0 != result){ + return result; + } + + if(S_ISDIR(stbuf.st_mode)){ + // Should rebuild all directory object + // Need to remove old dir("dir" etc) and make new dir("dir/") + + // At first, remove directory old object + if(0 != (result = remove_old_type_dir(strpath, nDirType))){ + return result; + } + StatCache::getStatCacheData()->DelStat(nowcache); + + // Make new directory object("dir/") + if(0 != (result = create_directory_object(newpath.c_str(), stbuf.st_mode, stbuf.st_mtime, uid, gid))){ + return result; + } + }else{ + // normal object or directory object of newer version + + // open & load + FdEntity* ent; + if(NULL == (ent = get_local_fent(strpath.c_str(), true))){ + S3FS_PRN_ERR("could not open and read file(%s)", strpath.c_str()); + return -EIO; + } + + ent->SetCtime(time(NULL)); + + // Change owner + ent->SetUId(uid); + ent->SetGId(gid); + + // upload + if(0 != (result = ent->Flush(true))){ + S3FS_PRN_ERR("could not upload file(%s): result=%d", strpath.c_str(), result); + FdManager::get()->Close(ent); + return result; + } + FdManager::get()->Close(ent); + + StatCache::getStatCacheData()->DelStat(nowcache); + } + S3FS_MALLOCTRIM(0); + return result; - } - if(0 != (result = check_object_owner(path, &stbuf))){ - return result; - } - - if((uid_t)(-1) == uid){ - uid = stbuf.st_uid; - } - if((gid_t)(-1) == gid){ - gid = stbuf.st_gid; - } - - // Get attributes - if(S_ISDIR(stbuf.st_mode)){ - result = chk_dir_object_type(path, newpath, strpath, nowcache, NULL, &nDirType); - }else{ - strpath = path; - nowcache = strpath; - result = get_object_attribute(strpath.c_str(), NULL, NULL); - } - if(0 != result){ - return result; - } - - if(S_ISDIR(stbuf.st_mode)){ - // Should rebuild all directory object - // Need to remove old dir("dir" etc) and make new dir("dir/") - - // At first, remove directory old object - if(0 != (result = remove_old_type_dir(strpath, nDirType))){ - return result; - } - StatCache::getStatCacheData()->DelStat(nowcache); - - // Make new directory object("dir/") - if(0 != (result = create_directory_object(newpath.c_str(), stbuf.st_mode, stbuf.st_mtime, uid, gid))){ - return result; - } - }else{ - // normal object or directory object of newer version - - // open & load - FdEntity* ent; - if(NULL == (ent = get_local_fent(strpath.c_str(), true))){ - S3FS_PRN_ERR("could not open and read file(%s)", strpath.c_str()); - return -EIO; - } - - ent->SetCtime(time(NULL)); - - // Change owner - ent->SetUId(uid); - ent->SetGId(gid); - - // upload - if(0 != (result = ent->Flush(true))){ - S3FS_PRN_ERR("could not upload file(%s): result=%d", strpath.c_str(), result); - FdManager::get()->Close(ent); - return result; - } - FdManager::get()->Close(ent); - - StatCache::getStatCacheData()->DelStat(nowcache); - } - S3FS_MALLOCTRIM(0); - - return result; } static int s3fs_utimens(const char* _path, const struct timespec ts[2]) { - WTF8_ENCODE(path) - int result; - string strpath; - string newpath; - string nowcache; - headers_t meta; - struct stat stbuf; - dirtype nDirType = DIRTYPE_UNKNOWN; + WTF8_ENCODE(path) + int result; + string strpath; + string newpath; + string nowcache; + headers_t meta; + struct stat stbuf; + dirtype nDirType = DIRTYPE_UNKNOWN; - S3FS_PRN_INFO("[path=%s][mtime=%lld]", path, static_cast(ts[1].tv_sec)); + S3FS_PRN_INFO("[path=%s][mtime=%lld]", path, static_cast(ts[1].tv_sec)); - if(0 == strcmp(path, "/")){ - S3FS_PRN_ERR("Could not change mtime for mount point."); - return -EIO; - } - if(0 != (result = check_parent_object_access(path, X_OK))){ - return result; - } - if(0 != (result = check_object_access(path, W_OK, &stbuf))){ - if(0 != check_object_owner(path, &stbuf)){ - return result; + if(0 == strcmp(path, "/")){ + S3FS_PRN_ERR("Could not change mtime for mount point."); + return -EIO; } - } - - if(S_ISDIR(stbuf.st_mode)){ - result = chk_dir_object_type(path, newpath, strpath, nowcache, &meta, &nDirType); - }else{ - strpath = path; - nowcache = strpath; - result = get_object_attribute(strpath.c_str(), NULL, &meta); - } - if(0 != result){ - return result; - } - - if(S_ISDIR(stbuf.st_mode) && IS_REPLACEDIR(nDirType)){ - // Should rebuild directory object(except new type) - // Need to remove old dir("dir" etc) and make new dir("dir/") - - // At first, remove directory old object - if(0 != (result = remove_old_type_dir(strpath, nDirType))){ - return result; + if(0 != (result = check_parent_object_access(path, X_OK))){ + return result; } - StatCache::getStatCacheData()->DelStat(nowcache); - - // Make new directory object("dir/") - if(0 != (result = create_directory_object(newpath.c_str(), stbuf.st_mode, ts[1].tv_sec, stbuf.st_uid, stbuf.st_gid))){ - return result; + if(0 != (result = check_object_access(path, W_OK, &stbuf))){ + if(0 != check_object_owner(path, &stbuf)){ + return result; + } } - }else{ - headers_t updatemeta; - updatemeta["x-amz-meta-mtime"] = str(ts[1].tv_sec); - updatemeta["x-amz-copy-source"] = urlEncode(service_path + bucket + get_realpath(strpath.c_str())); - updatemeta["x-amz-metadata-directive"] = "REPLACE"; - // check opened file handle. - // - // If the file starts uploading by multipart when the disk capacity is insufficient, - // we need to put these header after finishing upload. - // Or if the file is only open, we must update to FdEntity's internal meta. - // - FdEntity* ent; - if(NULL != (ent = FdManager::get()->ExistOpen(path, -1, true))){ - // the file is opened now. - if(ent->MergeOrgMeta(updatemeta)){ - // now uploading - // the meta is pending and accumulated to be put after the upload is complete. - S3FS_PRN_INFO("meta pending until upload is complete"); - }else{ - // allow to put header - // updatemeta already merged the orgmeta of the opened files. - if(0 != put_headers(strpath.c_str(), updatemeta, true)){ - return -EIO; + if(S_ISDIR(stbuf.st_mode)){ + result = chk_dir_object_type(path, newpath, strpath, nowcache, &meta, &nDirType); + }else{ + strpath = path; + nowcache = strpath; + result = get_object_attribute(strpath.c_str(), NULL, &meta); + } + if(0 != result){ + return result; + } + + if(S_ISDIR(stbuf.st_mode) && IS_REPLACEDIR(nDirType)){ + // Should rebuild directory object(except new type) + // Need to remove old dir("dir" etc) and make new dir("dir/") + + // At first, remove directory old object + if(0 != (result = remove_old_type_dir(strpath, nDirType))){ + return result; } StatCache::getStatCacheData()->DelStat(nowcache); - } + // Make new directory object("dir/") + if(0 != (result = create_directory_object(newpath.c_str(), stbuf.st_mode, ts[1].tv_sec, stbuf.st_uid, stbuf.st_gid))){ + return result; + } }else{ - // not opened file, then put headers - merge_headers(meta, updatemeta, true); - if(0 != put_headers(strpath.c_str(), meta, true)){ - return -EIO; - } - StatCache::getStatCacheData()->DelStat(nowcache); - } - } - S3FS_MALLOCTRIM(0); + headers_t updatemeta; + updatemeta["x-amz-meta-mtime"] = str(ts[1].tv_sec); + updatemeta["x-amz-copy-source"] = urlEncode(service_path + bucket + get_realpath(strpath.c_str())); + updatemeta["x-amz-metadata-directive"] = "REPLACE"; - return 0; + // check opened file handle. + // + // If the file starts uploading by multipart when the disk capacity is insufficient, + // we need to put these header after finishing upload. + // Or if the file is only open, we must update to FdEntity's internal meta. + // + FdEntity* ent; + if(NULL != (ent = FdManager::get()->ExistOpen(path, -1, true))){ + // the file is opened now. + if(ent->MergeOrgMeta(updatemeta)){ + // now uploading + // the meta is pending and accumulated to be put after the upload is complete. + S3FS_PRN_INFO("meta pending until upload is complete"); + }else{ + // allow to put header + // updatemeta already merged the orgmeta of the opened files. + if(0 != put_headers(strpath.c_str(), updatemeta, true)){ + return -EIO; + } + StatCache::getStatCacheData()->DelStat(nowcache); + } + }else{ + // not opened file, then put headers + merge_headers(meta, updatemeta, true); + if(0 != put_headers(strpath.c_str(), meta, true)){ + return -EIO; + } + StatCache::getStatCacheData()->DelStat(nowcache); + } + } + S3FS_MALLOCTRIM(0); + + return 0; } static int s3fs_utimens_nocopy(const char* _path, const struct timespec ts[2]) { - WTF8_ENCODE(path) - int result; - string strpath; - string newpath; - string nowcache; - struct stat stbuf; - dirtype nDirType = DIRTYPE_UNKNOWN; + WTF8_ENCODE(path) + int result; + string strpath; + string newpath; + string nowcache; + struct stat stbuf; + dirtype nDirType = DIRTYPE_UNKNOWN; - S3FS_PRN_INFO1("[path=%s][mtime=%lld]", path, static_cast(ts[1].tv_sec)); + S3FS_PRN_INFO1("[path=%s][mtime=%lld]", path, static_cast(ts[1].tv_sec)); + + if(0 == strcmp(path, "/")){ + S3FS_PRN_ERR("Could not change mtime for mount point."); + return -EIO; + } + if(0 != (result = check_parent_object_access(path, X_OK))){ + return result; + } + if(0 != (result = check_object_access(path, W_OK, &stbuf))){ + if(0 != check_object_owner(path, &stbuf)){ + return result; + } + } + + // Get attributes + if(S_ISDIR(stbuf.st_mode)){ + result = chk_dir_object_type(path, newpath, strpath, nowcache, NULL, &nDirType); + }else{ + strpath = path; + nowcache = strpath; + result = get_object_attribute(strpath.c_str(), NULL, NULL); + } + if(0 != result){ + return result; + } + + if(S_ISDIR(stbuf.st_mode)){ + // Should rebuild all directory object + // Need to remove old dir("dir" etc) and make new dir("dir/") + + // At first, remove directory old object + if(0 != (result = remove_old_type_dir(strpath, nDirType))){ + return result; + } + StatCache::getStatCacheData()->DelStat(nowcache); + + // Make new directory object("dir/") + if(0 != (result = create_directory_object(newpath.c_str(), stbuf.st_mode, ts[1].tv_sec, stbuf.st_uid, stbuf.st_gid))){ + return result; + } + }else{ + // normal object or directory object of newer version + + // open & load + FdEntity* ent; + if(NULL == (ent = get_local_fent(strpath.c_str(), true))){ + S3FS_PRN_ERR("could not open and read file(%s)", strpath.c_str()); + return -EIO; + } + + // set mtime + if(0 != (result = ent->SetMtime(ts[1].tv_sec))){ + S3FS_PRN_ERR("could not set mtime to file(%s): result=%d", strpath.c_str(), result); + FdManager::get()->Close(ent); + return result; + } + + // upload + if(0 != (result = ent->Flush(true))){ + S3FS_PRN_ERR("could not upload file(%s): result=%d", strpath.c_str(), result); + FdManager::get()->Close(ent); + return result; + } + FdManager::get()->Close(ent); + + StatCache::getStatCacheData()->DelStat(nowcache); + } + S3FS_MALLOCTRIM(0); - if(0 == strcmp(path, "/")){ - S3FS_PRN_ERR("Could not change mtime for mount point."); - return -EIO; - } - if(0 != (result = check_parent_object_access(path, X_OK))){ return result; - } - if(0 != (result = check_object_access(path, W_OK, &stbuf))){ - if(0 != check_object_owner(path, &stbuf)){ - return result; - } - } - - // Get attributes - if(S_ISDIR(stbuf.st_mode)){ - result = chk_dir_object_type(path, newpath, strpath, nowcache, NULL, &nDirType); - }else{ - strpath = path; - nowcache = strpath; - result = get_object_attribute(strpath.c_str(), NULL, NULL); - } - if(0 != result){ - return result; - } - - if(S_ISDIR(stbuf.st_mode)){ - // Should rebuild all directory object - // Need to remove old dir("dir" etc) and make new dir("dir/") - - // At first, remove directory old object - if(0 != (result = remove_old_type_dir(strpath, nDirType))){ - return result; - } - StatCache::getStatCacheData()->DelStat(nowcache); - - // Make new directory object("dir/") - if(0 != (result = create_directory_object(newpath.c_str(), stbuf.st_mode, ts[1].tv_sec, stbuf.st_uid, stbuf.st_gid))){ - return result; - } - }else{ - // normal object or directory object of newer version - - // open & load - FdEntity* ent; - if(NULL == (ent = get_local_fent(strpath.c_str(), true))){ - S3FS_PRN_ERR("could not open and read file(%s)", strpath.c_str()); - return -EIO; - } - - // set mtime - if(0 != (result = ent->SetMtime(ts[1].tv_sec))){ - S3FS_PRN_ERR("could not set mtime to file(%s): result=%d", strpath.c_str(), result); - FdManager::get()->Close(ent); - return result; - } - - // upload - if(0 != (result = ent->Flush(true))){ - S3FS_PRN_ERR("could not upload file(%s): result=%d", strpath.c_str(), result); - FdManager::get()->Close(ent); - return result; - } - FdManager::get()->Close(ent); - - StatCache::getStatCacheData()->DelStat(nowcache); - } - S3FS_MALLOCTRIM(0); - - return result; } static int s3fs_truncate(const char* _path, off_t size) { - WTF8_ENCODE(path) - int result; - headers_t meta; - FdEntity* ent = NULL; + WTF8_ENCODE(path) + int result; + headers_t meta; + FdEntity* ent = NULL; - S3FS_PRN_INFO("[path=%s][size=%lld]", path, static_cast(size)); + S3FS_PRN_INFO("[path=%s][size=%lld]", path, static_cast(size)); - if(size < 0){ - size = 0; - } - - if(0 != (result = check_parent_object_access(path, X_OK))){ - return result; - } - if(0 != (result = check_object_access(path, W_OK, NULL))){ - return result; - } - - // Get file information - if(0 == (result = get_object_attribute(path, NULL, &meta))){ - // Exists -> Get file(with size) - if(NULL == (ent = FdManager::get()->Open(path, &meta, size, -1, false, true))){ - S3FS_PRN_ERR("could not open file(%s): errno=%d", path, errno); - return -EIO; - } - if(0 != (result = ent->Load(0, size))){ - S3FS_PRN_ERR("could not download file(%s): result=%d", path, result); - FdManager::get()->Close(ent); - return result; + if(size < 0){ + size = 0; } - }else{ - // Not found -> Make tmpfile(with size) - - struct fuse_context* pcxt; - if(NULL == (pcxt = fuse_get_context())){ - return -EIO; + if(0 != (result = check_parent_object_access(path, X_OK))){ + return result; } - time_t now = time(NULL); - meta["Content-Type"] = string("application/octet-stream"); // Static - meta["x-amz-meta-mode"] = str(S_IFLNK | S_IRWXU | S_IRWXG | S_IRWXO); - meta["x-amz-meta-ctime"] = str(now); - meta["x-amz-meta-mtime"] = str(now); - meta["x-amz-meta-uid"] = str(pcxt->uid); - meta["x-amz-meta-gid"] = str(pcxt->gid); - - if(NULL == (ent = FdManager::get()->Open(path, &meta, size, -1, true, true))){ - S3FS_PRN_ERR("could not open file(%s): errno=%d", path, errno); - return -EIO; + if(0 != (result = check_object_access(path, W_OK, NULL))){ + return result; } - } - // upload - if(0 != (result = ent->Flush(true))){ - S3FS_PRN_ERR("could not upload file(%s): result=%d", path, result); + // Get file information + if(0 == (result = get_object_attribute(path, NULL, &meta))){ + // Exists -> Get file(with size) + if(NULL == (ent = FdManager::get()->Open(path, &meta, size, -1, false, true))){ + S3FS_PRN_ERR("could not open file(%s): errno=%d", path, errno); + return -EIO; + } + if(0 != (result = ent->Load(0, size))){ + S3FS_PRN_ERR("could not download file(%s): result=%d", path, result); + FdManager::get()->Close(ent); + return result; + } + + }else{ + // Not found -> Make tmpfile(with size) + + struct fuse_context* pcxt; + if(NULL == (pcxt = fuse_get_context())){ + return -EIO; + } + time_t now = time(NULL); + meta["Content-Type"] = string("application/octet-stream"); // Static + meta["x-amz-meta-mode"] = str(S_IFLNK | S_IRWXU | S_IRWXG | S_IRWXO); + meta["x-amz-meta-ctime"] = str(now); + meta["x-amz-meta-mtime"] = str(now); + meta["x-amz-meta-uid"] = str(pcxt->uid); + meta["x-amz-meta-gid"] = str(pcxt->gid); + + if(NULL == (ent = FdManager::get()->Open(path, &meta, size, -1, true, true))){ + S3FS_PRN_ERR("could not open file(%s): errno=%d", path, errno); + return -EIO; + } + } + + // upload + if(0 != (result = ent->Flush(true))){ + S3FS_PRN_ERR("could not upload file(%s): result=%d", path, result); + FdManager::get()->Close(ent); + return result; + } FdManager::get()->Close(ent); + + StatCache::getStatCacheData()->DelStat(path); + S3FS_MALLOCTRIM(0); + return result; - } - FdManager::get()->Close(ent); - - StatCache::getStatCacheData()->DelStat(path); - S3FS_MALLOCTRIM(0); - - return result; } static int s3fs_open(const char* _path, struct fuse_file_info* fi) { - WTF8_ENCODE(path) - int result; - struct stat st; - bool needs_flush = false; + WTF8_ENCODE(path) + int result; + struct stat st; + bool needs_flush = false; - S3FS_PRN_INFO("[path=%s][flags=0x%x]", path, fi->flags); + S3FS_PRN_INFO("[path=%s][flags=0x%x]", path, fi->flags); - // clear stat for reading fresh stat. - // (if object stat is changed, we refresh it. then s3fs gets always - // stat when s3fs open the object). - if(StatCache::getStatCacheData()->HasStat(path)){ - // flush any dirty data so that subsequent stat gets correct size - if((result = s3fs_flush(_path, fi)) != 0){ - S3FS_PRN_ERR("could not flush(%s): result=%d", path, result); + // clear stat for reading fresh stat. + // (if object stat is changed, we refresh it. then s3fs gets always + // stat when s3fs open the object). + if(StatCache::getStatCacheData()->HasStat(path)){ + // flush any dirty data so that subsequent stat gets correct size + if((result = s3fs_flush(_path, fi)) != 0){ + S3FS_PRN_ERR("could not flush(%s): result=%d", path, result); + } + StatCache::getStatCacheData()->DelStat(path); } - StatCache::getStatCacheData()->DelStat(path); - } - int mask = (O_RDONLY != (fi->flags & O_ACCMODE) ? W_OK : R_OK); - if(0 != (result = check_parent_object_access(path, X_OK))){ - return result; - } - - result = check_object_access(path, mask, &st); - if(-ENOENT == result){ - if(0 != (result = check_parent_object_access(path, W_OK))){ - return result; + int mask = (O_RDONLY != (fi->flags & O_ACCMODE) ? W_OK : R_OK); + if(0 != (result = check_parent_object_access(path, X_OK))){ + return result; } - }else if(0 != result){ - return result; - } - if((unsigned int)fi->flags & O_TRUNC){ - if(0 != st.st_size){ - st.st_size = 0; - needs_flush = true; + result = check_object_access(path, mask, &st); + if(-ENOENT == result){ + if(0 != (result = check_parent_object_access(path, W_OK))){ + return result; + } + }else if(0 != result){ + return result; } - } - if(!S_ISREG(st.st_mode) || S_ISLNK(st.st_mode)){ - st.st_mtime = -1; - } - FdEntity* ent; - headers_t meta; - get_object_attribute(path, NULL, &meta, true, NULL, true); // no truncate cache - if(NULL == (ent = FdManager::get()->Open(path, &meta, st.st_size, st.st_mtime, false, true))){ - StatCache::getStatCacheData()->DelStat(path); - return -EIO; - } - - if (needs_flush){ - if(0 != (result = ent->RowFlush(path, true))){ - S3FS_PRN_ERR("could not upload file(%s): result=%d", path, result); - FdManager::get()->Close(ent); - StatCache::getStatCacheData()->DelStat(path); - return result; + if((unsigned int)fi->flags & O_TRUNC){ + if(0 != st.st_size){ + st.st_size = 0; + needs_flush = true; + } + } + if(!S_ISREG(st.st_mode) || S_ISLNK(st.st_mode)){ + st.st_mtime = -1; } - } - fi->fh = ent->GetFd(); - S3FS_MALLOCTRIM(0); + FdEntity* ent; + headers_t meta; + get_object_attribute(path, NULL, &meta, true, NULL, true); // no truncate cache + if(NULL == (ent = FdManager::get()->Open(path, &meta, st.st_size, st.st_mtime, false, true))){ + StatCache::getStatCacheData()->DelStat(path); + return -EIO; + } - return 0; + if (needs_flush){ + if(0 != (result = ent->RowFlush(path, true))){ + S3FS_PRN_ERR("could not upload file(%s): result=%d", path, result); + FdManager::get()->Close(ent); + StatCache::getStatCacheData()->DelStat(path); + return result; + } + } + + fi->fh = ent->GetFd(); + S3FS_MALLOCTRIM(0); + + return 0; } static int s3fs_read(const char* _path, char* buf, size_t size, off_t offset, struct fuse_file_info* fi) { - WTF8_ENCODE(path) - ssize_t res; + WTF8_ENCODE(path) + ssize_t res; - S3FS_PRN_DBG("[path=%s][size=%zu][offset=%lld][fd=%llu]", path, size, static_cast(offset), (unsigned long long)(fi->fh)); + S3FS_PRN_DBG("[path=%s][size=%zu][offset=%lld][fd=%llu]", path, size, static_cast(offset), (unsigned long long)(fi->fh)); - FdEntity* ent; - if(NULL == (ent = FdManager::get()->ExistOpen(path, static_cast(fi->fh)))){ - S3FS_PRN_ERR("could not find opened fd(%s)", path); - return -EIO; - } - if(ent->GetFd() != static_cast(fi->fh)){ - S3FS_PRN_WARN("different fd(%d - %llu)", ent->GetFd(), (unsigned long long)(fi->fh)); - } + FdEntity* ent; + if(NULL == (ent = FdManager::get()->ExistOpen(path, static_cast(fi->fh)))){ + S3FS_PRN_ERR("could not find opened fd(%s)", path); + return -EIO; + } + if(ent->GetFd() != static_cast(fi->fh)){ + S3FS_PRN_WARN("different fd(%d - %llu)", ent->GetFd(), (unsigned long long)(fi->fh)); + } - // check real file size - off_t realsize = 0; - if(!ent->GetSize(realsize) || 0 == realsize){ - S3FS_PRN_DBG("file size is 0, so break to read."); + // check real file size + off_t realsize = 0; + if(!ent->GetSize(realsize) || 0 == realsize){ + S3FS_PRN_DBG("file size is 0, so break to read."); + FdManager::get()->Close(ent); + return 0; + } + + if(0 > (res = ent->Read(buf, offset, size, false))){ + S3FS_PRN_WARN("failed to read file(%s). result=%zd", path, res); + } FdManager::get()->Close(ent); - return 0; - } - if(0 > (res = ent->Read(buf, offset, size, false))){ - S3FS_PRN_WARN("failed to read file(%s). result=%zd", path, res); - } - FdManager::get()->Close(ent); - - return static_cast(res); + return static_cast(res); } static int s3fs_write(const char* _path, const char* buf, size_t size, off_t offset, struct fuse_file_info* fi) { - WTF8_ENCODE(path) - ssize_t res; + WTF8_ENCODE(path) + ssize_t res; - S3FS_PRN_DBG("[path=%s][size=%zu][offset=%lld][fd=%llu]", path, size, static_cast(offset), (unsigned long long)(fi->fh)); + S3FS_PRN_DBG("[path=%s][size=%zu][offset=%lld][fd=%llu]", path, size, static_cast(offset), (unsigned long long)(fi->fh)); - FdEntity* ent; - if(NULL == (ent = FdManager::get()->ExistOpen(path, static_cast(fi->fh)))){ - S3FS_PRN_ERR("could not find opened fd(%s)", path); - return -EIO; - } - if(ent->GetFd() != static_cast(fi->fh)){ - S3FS_PRN_WARN("different fd(%d - %llu)", ent->GetFd(), (unsigned long long)(fi->fh)); - } - if(0 > (res = ent->Write(buf, offset, size))){ - S3FS_PRN_WARN("failed to write file(%s). result=%zd", path, res); - } - FdManager::get()->Close(ent); + FdEntity* ent; + if(NULL == (ent = FdManager::get()->ExistOpen(path, static_cast(fi->fh)))){ + S3FS_PRN_ERR("could not find opened fd(%s)", path); + return -EIO; + } + if(ent->GetFd() != static_cast(fi->fh)){ + S3FS_PRN_WARN("different fd(%d - %llu)", ent->GetFd(), (unsigned long long)(fi->fh)); + } + if(0 > (res = ent->Write(buf, offset, size))){ + S3FS_PRN_WARN("failed to write file(%s). result=%zd", path, res); + } + FdManager::get()->Close(ent); - return static_cast(res); + return static_cast(res); } static int s3fs_statfs(const char* _path, struct statvfs* stbuf) { - // WTF8_ENCODE(path) - // 256T - stbuf->f_bsize = 0X1000000; - stbuf->f_blocks = 0X1000000; - stbuf->f_bfree = 0x1000000; - stbuf->f_bavail = 0x1000000; - stbuf->f_namemax = NAME_MAX; - return 0; + // WTF8_ENCODE(path) + // 256T + stbuf->f_bsize = 0X1000000; + stbuf->f_blocks = 0X1000000; + stbuf->f_bfree = 0x1000000; + stbuf->f_bavail = 0x1000000; + stbuf->f_namemax = NAME_MAX; + return 0; } static int s3fs_flush(const char* _path, struct fuse_file_info* fi) { - WTF8_ENCODE(path) - int result; + WTF8_ENCODE(path) + int result; - S3FS_PRN_INFO("[path=%s][fd=%llu]", path, (unsigned long long)(fi->fh)); + S3FS_PRN_INFO("[path=%s][fd=%llu]", path, (unsigned long long)(fi->fh)); - int mask = (O_RDONLY != (fi->flags & O_ACCMODE) ? W_OK : R_OK); - if(0 != (result = check_parent_object_access(path, X_OK))){ - return result; - } - result = check_object_access(path, mask, NULL); - if(-ENOENT == result){ - if(0 != (result = check_parent_object_access(path, W_OK))){ - return result; + int mask = (O_RDONLY != (fi->flags & O_ACCMODE) ? W_OK : R_OK); + if(0 != (result = check_parent_object_access(path, X_OK))){ + return result; } - }else if(0 != result){ + result = check_object_access(path, mask, NULL); + if(-ENOENT == result){ + if(0 != (result = check_parent_object_access(path, W_OK))){ + return result; + } + }else if(0 != result){ + return result; + } + + FdEntity* ent; + if(NULL != (ent = FdManager::get()->ExistOpen(path, static_cast(fi->fh)))){ + ent->UpdateMtime(); + result = ent->Flush(false); + FdManager::get()->Close(ent); + } + S3FS_MALLOCTRIM(0); + return result; - } - - FdEntity* ent; - if(NULL != (ent = FdManager::get()->ExistOpen(path, static_cast(fi->fh)))){ - ent->UpdateMtime(); - result = ent->Flush(false); - FdManager::get()->Close(ent); - } - S3FS_MALLOCTRIM(0); - - return result; } // [NOTICE] @@ -2361,814 +2296,521 @@ static int s3fs_flush(const char* _path, struct fuse_file_info* fi) // static int s3fs_fsync(const char* _path, int datasync, struct fuse_file_info* fi) { - WTF8_ENCODE(path) - int result = 0; + WTF8_ENCODE(path) + int result = 0; - S3FS_PRN_INFO("[path=%s][fd=%llu]", path, (unsigned long long)(fi->fh)); + S3FS_PRN_INFO("[path=%s][fd=%llu]", path, (unsigned long long)(fi->fh)); - FdEntity* ent; - if(NULL != (ent = FdManager::get()->ExistOpen(path, static_cast(fi->fh)))){ - if(0 == datasync){ - ent->UpdateMtime(); + FdEntity* ent; + if(NULL != (ent = FdManager::get()->ExistOpen(path, static_cast(fi->fh)))){ + if(0 == datasync){ + ent->UpdateMtime(); + } + result = ent->Flush(false); + FdManager::get()->Close(ent); } - result = ent->Flush(false); - FdManager::get()->Close(ent); - } - S3FS_MALLOCTRIM(0); + S3FS_MALLOCTRIM(0); - // Issue 320: Delete stat cache entry because st_size may have changed. - StatCache::getStatCacheData()->DelStat(path); + // Issue 320: Delete stat cache entry because st_size may have changed. + StatCache::getStatCacheData()->DelStat(path); - return result; + return result; } static int s3fs_release(const char* _path, struct fuse_file_info* fi) { - WTF8_ENCODE(path) - S3FS_PRN_INFO("[path=%s][fd=%llu]", path, (unsigned long long)(fi->fh)); + WTF8_ENCODE(path) + S3FS_PRN_INFO("[path=%s][fd=%llu]", path, (unsigned long long)(fi->fh)); - // [NOTE] - // All opened file's stats is cached with no truncate flag. - // Thus we unset it here. - StatCache::getStatCacheData()->ChangeNoTruncateFlag(string(path), false); + // [NOTE] + // All opened file's stats is cached with no truncate flag. + // Thus we unset it here. + StatCache::getStatCacheData()->ChangeNoTruncateFlag(string(path), false); - // [NOTICE] - // At first, we remove stats cache. - // Because fuse does not wait for response from "release" function. :-( - // And fuse runs next command before this function returns. - // Thus we call deleting stats function ASSAP. - // - if((fi->flags & O_RDWR) || (fi->flags & O_WRONLY)){ - StatCache::getStatCacheData()->DelStat(path); - } - - FdEntity* ent; - if(NULL == (ent = FdManager::get()->GetFdEntity(path, static_cast(fi->fh)))){ - S3FS_PRN_ERR("could not find fd(file=%s)", path); - return -EIO; - } - if(ent->GetFd() != static_cast(fi->fh)){ - S3FS_PRN_WARN("different fd(%d - %llu)", ent->GetFd(), (unsigned long long)(fi->fh)); - } - - // Once for the implicit refcnt from GetFdEntity and again for release - ent->Close(); - FdManager::get()->Close(ent); - - // check - for debug - if(IS_S3FS_LOG_DBG()){ - if(NULL != (ent = FdManager::get()->GetFdEntity(path, static_cast(fi->fh)))){ - S3FS_PRN_WARN("file(%s),fd(%d) is still opened.", path, ent->GetFd()); + // [NOTICE] + // At first, we remove stats cache. + // Because fuse does not wait for response from "release" function. :-( + // And fuse runs next command before this function returns. + // Thus we call deleting stats function ASSAP. + // + if((fi->flags & O_RDWR) || (fi->flags & O_WRONLY)){ + StatCache::getStatCacheData()->DelStat(path); } - } - S3FS_MALLOCTRIM(0); - return 0; + FdEntity* ent; + if(NULL == (ent = FdManager::get()->GetFdEntity(path, static_cast(fi->fh)))){ + S3FS_PRN_ERR("could not find fd(file=%s)", path); + return -EIO; + } + if(ent->GetFd() != static_cast(fi->fh)){ + S3FS_PRN_WARN("different fd(%d - %llu)", ent->GetFd(), (unsigned long long)(fi->fh)); + } + + // Once for the implicit refcnt from GetFdEntity and again for release + ent->Close(); + FdManager::get()->Close(ent); + + // check - for debug + if(IS_S3FS_LOG_DBG()){ + if(NULL != (ent = FdManager::get()->GetFdEntity(path, static_cast(fi->fh)))){ + S3FS_PRN_WARN("file(%s),fd(%d) is still opened.", path, ent->GetFd()); + } + } + S3FS_MALLOCTRIM(0); + + return 0; } static int s3fs_opendir(const char* _path, struct fuse_file_info* fi) { - WTF8_ENCODE(path) - int result; - int mask = (O_RDONLY != (fi->flags & O_ACCMODE) ? W_OK : R_OK) | X_OK; + WTF8_ENCODE(path) + int result; + int mask = (O_RDONLY != (fi->flags & O_ACCMODE) ? W_OK : R_OK) | X_OK; - S3FS_PRN_INFO("[path=%s][flags=0x%x]", path, fi->flags); + S3FS_PRN_INFO("[path=%s][flags=0x%x]", path, fi->flags); - if(0 == (result = check_object_access(path, mask, NULL))){ - result = check_parent_object_access(path, mask); - } - S3FS_MALLOCTRIM(0); + if(0 == (result = check_object_access(path, mask, NULL))){ + result = check_parent_object_access(path, mask); + } + S3FS_MALLOCTRIM(0); - return result; + return result; } static bool multi_head_callback(S3fsCurl* s3fscurl) { - if(!s3fscurl){ - return false; - } - string saved_path = s3fscurl->GetSpacialSavedPath(); - if(!StatCache::getStatCacheData()->AddStat(saved_path, *(s3fscurl->GetResponseHeaders()))){ - S3FS_PRN_ERR("failed adding stat cache [path=%s]", saved_path.c_str()); - return false; - } - return true; + if(!s3fscurl){ + return false; + } + string saved_path = s3fscurl->GetSpacialSavedPath(); + if(!StatCache::getStatCacheData()->AddStat(saved_path, *(s3fscurl->GetResponseHeaders()))){ + S3FS_PRN_ERR("failed adding stat cache [path=%s]", saved_path.c_str()); + return false; + } + return true; } static S3fsCurl* multi_head_retry_callback(S3fsCurl* s3fscurl) { - if(!s3fscurl){ - return NULL; - } - int ssec_key_pos= s3fscurl->GetLastPreHeadSeecKeyPos(); - int retry_count = s3fscurl->GetMultipartRetryCount(); - - // retry next sse key. - // if end of sse key, set retry master count is up. - ssec_key_pos = (ssec_key_pos < 0 ? 0 : ssec_key_pos + 1); - if(0 == S3fsCurl::GetSseKeyCount() || S3fsCurl::GetSseKeyCount() <= ssec_key_pos){ - if(s3fscurl->IsOverMultipartRetryCount()){ - S3FS_PRN_ERR("Over retry count(%d) limit(%s).", s3fscurl->GetMultipartRetryCount(), s3fscurl->GetSpacialSavedPath().c_str()); - return NULL; + if(!s3fscurl){ + return NULL; } - ssec_key_pos= -1; - retry_count++; - } + int ssec_key_pos= s3fscurl->GetLastPreHeadSeecKeyPos(); + int retry_count = s3fscurl->GetMultipartRetryCount(); - S3fsCurl* newcurl = new S3fsCurl(s3fscurl->IsUseAhbe()); - string path = s3fscurl->GetPath(); - string base_path = s3fscurl->GetBasePath(); - string saved_path = s3fscurl->GetSpacialSavedPath(); + // retry next sse key. + // if end of sse key, set retry master count is up. + ssec_key_pos = (ssec_key_pos < 0 ? 0 : ssec_key_pos + 1); + if(0 == S3fsCurl::GetSseKeyCount() || S3fsCurl::GetSseKeyCount() <= ssec_key_pos){ + if(s3fscurl->IsOverMultipartRetryCount()){ + S3FS_PRN_ERR("Over retry count(%d) limit(%s).", s3fscurl->GetMultipartRetryCount(), s3fscurl->GetSpacialSavedPath().c_str()); + return NULL; + } + ssec_key_pos= -1; + retry_count++; + } - if(!newcurl->PreHeadRequest(path, base_path, saved_path, ssec_key_pos)){ - S3FS_PRN_ERR("Could not duplicate curl object(%s).", saved_path.c_str()); - delete newcurl; - return NULL; - } - newcurl->SetMultipartRetryCount(retry_count); + S3fsCurl* newcurl = new S3fsCurl(s3fscurl->IsUseAhbe()); + string path = s3fscurl->GetPath(); + string base_path = s3fscurl->GetBasePath(); + string saved_path = s3fscurl->GetSpacialSavedPath(); - return newcurl; + if(!newcurl->PreHeadRequest(path, base_path, saved_path, ssec_key_pos)){ + S3FS_PRN_ERR("Could not duplicate curl object(%s).", saved_path.c_str()); + delete newcurl; + return NULL; + } + newcurl->SetMultipartRetryCount(retry_count); + + return newcurl; } static int readdir_multi_head(const char* path, const S3ObjList& head, void* buf, fuse_fill_dir_t filler) { - S3fsMultiCurl curlmulti(S3fsCurl::GetMaxMultiRequest()); - s3obj_list_t headlist; - s3obj_list_t fillerlist; - int result = 0; + S3fsMultiCurl curlmulti(S3fsCurl::GetMaxMultiRequest()); + s3obj_list_t headlist; + s3obj_list_t fillerlist; + int result = 0; - S3FS_PRN_INFO1("[path=%s][list=%zu]", path, headlist.size()); + S3FS_PRN_INFO1("[path=%s][list=%zu]", path, headlist.size()); - // Make base path list. - head.GetNameList(headlist, true, false); // get name with "/". + // Make base path list. + head.GetNameList(headlist, true, false); // get name with "/". - // Initialize S3fsMultiCurl - curlmulti.SetSuccessCallback(multi_head_callback); - curlmulti.SetRetryCallback(multi_head_retry_callback); + // Initialize S3fsMultiCurl + curlmulti.SetSuccessCallback(multi_head_callback); + curlmulti.SetRetryCallback(multi_head_retry_callback); - s3obj_list_t::iterator iter; + s3obj_list_t::iterator iter; - fillerlist.clear(); - // Make single head request(with max). - for(iter = headlist.begin(); headlist.end() != iter; iter = headlist.erase(iter)){ - string disppath = path + (*iter); - string etag = head.GetETag((*iter).c_str()); + fillerlist.clear(); + // Make single head request(with max). + for(iter = headlist.begin(); headlist.end() != iter; iter = headlist.erase(iter)){ + string disppath = path + (*iter); + string etag = head.GetETag((*iter).c_str()); - string fillpath = disppath; - if('/' == disppath[disppath.length() - 1]){ - fillpath = fillpath.substr(0, fillpath.length() -1); - } - fillerlist.push_back(fillpath); + string fillpath = disppath; + if('/' == disppath[disppath.length() - 1]){ + fillpath = fillpath.substr(0, fillpath.length() -1); + } + fillerlist.push_back(fillpath); - if(StatCache::getStatCacheData()->HasStat(disppath, etag.c_str())){ - continue; + if(StatCache::getStatCacheData()->HasStat(disppath, etag.c_str())){ + continue; + } + + // First check for directory, start checking "not SSE-C". + // If checking failed, retry to check with "SSE-C" by retry callback func when SSE-C mode. + S3fsCurl* s3fscurl = new S3fsCurl(); + if(!s3fscurl->PreHeadRequest(disppath, (*iter), disppath)){ // target path = cache key path.(ex "dir/") + S3FS_PRN_WARN("Could not make curl object for head request(%s).", disppath.c_str()); + delete s3fscurl; + continue; + } + + if(!curlmulti.SetS3fsCurlObject(s3fscurl)){ + S3FS_PRN_WARN("Could not make curl object into multi curl(%s).", disppath.c_str()); + delete s3fscurl; + continue; + } } - // First check for directory, start checking "not SSE-C". - // If checking failed, retry to check with "SSE-C" by retry callback func when SSE-C mode. - S3fsCurl* s3fscurl = new S3fsCurl(); - if(!s3fscurl->PreHeadRequest(disppath, (*iter), disppath)){ // target path = cache key path.(ex "dir/") - S3FS_PRN_WARN("Could not make curl object for head request(%s).", disppath.c_str()); - delete s3fscurl; - continue; + // Multi request + if(0 != (result = curlmulti.Request())){ + // If result is -EIO, it is something error occurred. + // This case includes that the object is encrypting(SSE) and s3fs does not have keys. + // So s3fs set result to 0 in order to continue the process. + if(-EIO == result){ + S3FS_PRN_WARN("error occurred in multi request(errno=%d), but continue...", result); + result = 0; + }else{ + S3FS_PRN_ERR("error occurred in multi request(errno=%d).", result); + return result; + } } - if(!curlmulti.SetS3fsCurlObject(s3fscurl)){ - S3FS_PRN_WARN("Could not make curl object into multi curl(%s).", disppath.c_str()); - delete s3fscurl; - continue; + // populate fuse buffer + // here is best position, because a case is cache size < files in directory + // + for(iter = fillerlist.begin(); fillerlist.end() != iter; ++iter){ + struct stat st; + bool in_cache = StatCache::getStatCacheData()->GetStat((*iter), &st); + string bpath = mybasename((*iter)); + if(use_wtf8){ + bpath = s3fs_wtf8_decode(bpath); + } + if(in_cache){ + filler(buf, bpath.c_str(), &st, 0); + }else{ + S3FS_PRN_INFO2("Could not find %s file in stat cache.", (*iter).c_str()); + filler(buf, bpath.c_str(), 0, 0); + } } - } - // Multi request - if(0 != (result = curlmulti.Request())){ - // If result is -EIO, it is something error occurred. - // This case includes that the object is encrypting(SSE) and s3fs does not have keys. - // So s3fs set result to 0 in order to continue the process. - if(-EIO == result){ - S3FS_PRN_WARN("error occurred in multi request(errno=%d), but continue...", result); - result = 0; - }else{ - S3FS_PRN_ERR("error occurred in multi request(errno=%d).", result); - return result; - } - } - - // populate fuse buffer - // here is best position, because a case is cache size < files in directory - // - for(iter = fillerlist.begin(); fillerlist.end() != iter; ++iter){ - struct stat st; - bool in_cache = StatCache::getStatCacheData()->GetStat((*iter), &st); - string bpath = mybasename((*iter)); - if(use_wtf8){ - bpath = s3fs_wtf8_decode(bpath); - } - if(in_cache){ - filler(buf, bpath.c_str(), &st, 0); - }else{ - S3FS_PRN_INFO2("Could not find %s file in stat cache.", (*iter).c_str()); - filler(buf, bpath.c_str(), 0, 0); - } - } - - return result; + return result; } static int s3fs_readdir(const char* _path, void* buf, fuse_fill_dir_t filler, off_t offset, struct fuse_file_info* fi) { - WTF8_ENCODE(path) - S3ObjList head; - int result; + WTF8_ENCODE(path) + S3ObjList head; + int result; - S3FS_PRN_INFO("[path=%s]", path); + S3FS_PRN_INFO("[path=%s]", path); + + if(0 != (result = check_object_access(path, X_OK, NULL))){ + return result; + } + + // get a list of all the objects + if((result = list_bucket(path, head, "/")) != 0){ + S3FS_PRN_ERR("list_bucket returns error(%d).", result); + return result; + } + + // force to add "." and ".." name. + filler(buf, ".", 0, 0); + filler(buf, "..", 0, 0); + if(head.IsEmpty()){ + return 0; + } + + // Send multi head request for stats caching. + string strpath = path; + if(strcmp(path, "/") != 0){ + strpath += "/"; + } + if(0 != (result = readdir_multi_head(strpath.c_str(), head, buf, filler))){ + S3FS_PRN_ERR("readdir_multi_head returns error(%d).", result); + } + S3FS_MALLOCTRIM(0); - if(0 != (result = check_object_access(path, X_OK, NULL))){ return result; - } - - // get a list of all the objects - if((result = list_bucket(path, head, "/")) != 0){ - S3FS_PRN_ERR("list_bucket returns error(%d).", result); - return result; - } - - // force to add "." and ".." name. - filler(buf, ".", 0, 0); - filler(buf, "..", 0, 0); - if(head.IsEmpty()){ - return 0; - } - - // Send multi head request for stats caching. - string strpath = path; - if(strcmp(path, "/") != 0){ - strpath += "/"; - } - if(0 != (result = readdir_multi_head(strpath.c_str(), head, buf, filler))){ - S3FS_PRN_ERR("readdir_multi_head returns error(%d).", result); - } - S3FS_MALLOCTRIM(0); - - return result; } static int list_bucket(const char* path, S3ObjList& head, const char* delimiter, bool check_content_only) { - string s3_realpath; - string query_delimiter;; - string query_prefix;; - string query_maxkey;; - string next_marker; - bool truncated = true; - S3fsCurl s3fscurl; - xmlDocPtr doc; + string s3_realpath; + string query_delimiter;; + string query_prefix;; + string query_maxkey;; + string next_marker; + bool truncated = true; + S3fsCurl s3fscurl; + xmlDocPtr doc; - S3FS_PRN_INFO1("[path=%s]", path); + S3FS_PRN_INFO1("[path=%s]", path); - if(delimiter && 0 < strlen(delimiter)){ - query_delimiter += "delimiter="; - query_delimiter += delimiter; - query_delimiter += "&"; - } - - query_prefix += "&prefix="; - s3_realpath = get_realpath(path); - if(0 == s3_realpath.length() || '/' != s3_realpath[s3_realpath.length() - 1]){ - // last word must be "/" - query_prefix += urlEncode(s3_realpath.substr(1) + "/"); - }else{ - query_prefix += urlEncode(s3_realpath.substr(1)); - } - if (check_content_only){ - // Just need to know if there are child objects in dir - // For dir with children, expect "dir/" and "dir/child" - query_maxkey += "max-keys=2"; - }else{ - query_maxkey += "max-keys=" + str(max_keys_list_object); - } - - while(truncated){ - string each_query = query_delimiter; - if(!next_marker.empty()){ - each_query += "marker=" + urlEncode(next_marker) + "&"; - next_marker = ""; + if(delimiter && 0 < strlen(delimiter)){ + query_delimiter += "delimiter="; + query_delimiter += delimiter; + query_delimiter += "&"; } - each_query += query_maxkey; - each_query += query_prefix; - // request - int result; - if(0 != (result = s3fscurl.ListBucketRequest(path, each_query.c_str()))){ - S3FS_PRN_ERR("ListBucketRequest returns with error."); - return result; + query_prefix += "&prefix="; + s3_realpath = get_realpath(path); + if(0 == s3_realpath.length() || '/' != s3_realpath[s3_realpath.length() - 1]){ + // last word must be "/" + query_prefix += urlEncode(s3_realpath.substr(1) + "/"); + }else{ + query_prefix += urlEncode(s3_realpath.substr(1)); + } + if (check_content_only){ + // Just need to know if there are child objects in dir + // For dir with children, expect "dir/" and "dir/child" + query_maxkey += "max-keys=2"; + }else{ + query_maxkey += "max-keys=" + str(max_keys_list_object); } - BodyData* body = s3fscurl.GetBodyData(); - // xmlDocPtr - if(NULL == (doc = xmlReadMemory(body->str(), static_cast(body->size()), "", NULL, 0))){ - S3FS_PRN_ERR("xmlReadMemory returns with error."); - return -1; - } - if(0 != append_objects_from_xml(path, doc, head)){ - S3FS_PRN_ERR("append_objects_from_xml returns with error."); - xmlFreeDoc(doc); - return -1; - } - if(true == (truncated = is_truncated(doc))){ - xmlChar* tmpch = get_next_marker(doc); - if(tmpch){ - next_marker = (char*)tmpch; - xmlFree(tmpch); - }else{ - // If did not specify "delimiter", s3 did not return "NextMarker". - // On this case, can use last name for next marker. - // - string lastname; - if(!head.GetLastName(lastname)){ - S3FS_PRN_WARN("Could not find next marker, thus break loop."); - truncated = false; - }else{ - next_marker = s3_realpath.substr(1); - if(0 == s3_realpath.length() || '/' != s3_realpath[s3_realpath.length() - 1]){ - next_marker += "/"; - } - next_marker += lastname; + while(truncated){ + string each_query = query_delimiter; + if(!next_marker.empty()){ + each_query += "marker=" + urlEncode(next_marker) + "&"; + next_marker = ""; } - } - } - S3FS_XMLFREEDOC(doc); + each_query += query_maxkey; + each_query += query_prefix; - // reset(initialize) curl object - s3fscurl.DestroyCurlHandle(); + // request + int result; + if(0 != (result = s3fscurl.ListBucketRequest(path, each_query.c_str()))){ + S3FS_PRN_ERR("ListBucketRequest returns with error."); + return result; + } + BodyData* body = s3fscurl.GetBodyData(); - if(check_content_only){ - break; - } - } - S3FS_MALLOCTRIM(0); - - return 0; -} - -static const char* c_strErrorObjectName = "FILE or SUBDIR in DIR"; - -static int append_objects_from_xml_ex(const char* path, xmlDocPtr doc, xmlXPathContextPtr ctx, - const char* ex_contents, const char* ex_key, const char* ex_etag, int isCPrefix, S3ObjList& head) -{ - xmlXPathObjectPtr contents_xp; - xmlNodeSetPtr content_nodes; - - if(NULL == (contents_xp = xmlXPathEvalExpression((xmlChar*)ex_contents, ctx))){ - S3FS_PRN_ERR("xmlXPathEvalExpression returns null."); - return -1; - } - if(xmlXPathNodeSetIsEmpty(contents_xp->nodesetval)){ - S3FS_PRN_DBG("contents_xp->nodesetval is empty."); - S3FS_XMLXPATHFREEOBJECT(contents_xp); - return 0; - } - content_nodes = contents_xp->nodesetval; - - bool is_dir; - string stretag; - int i; - for(i = 0; i < content_nodes->nodeNr; i++){ - ctx->node = content_nodes->nodeTab[i]; - - // object name - xmlXPathObjectPtr key; - if(NULL == (key = xmlXPathEvalExpression((xmlChar*)ex_key, ctx))){ - S3FS_PRN_WARN("key is null. but continue."); - continue; - } - if(xmlXPathNodeSetIsEmpty(key->nodesetval)){ - S3FS_PRN_WARN("node is empty. but continue."); - xmlXPathFreeObject(key); - continue; - } - xmlNodeSetPtr key_nodes = key->nodesetval; - char* name = get_object_name(doc, key_nodes->nodeTab[0]->xmlChildrenNode, path); - - if(!name){ - S3FS_PRN_WARN("name is something wrong. but continue."); - - }else if((const char*)name != c_strErrorObjectName){ - is_dir = isCPrefix ? true : false; - stretag = ""; - - if(!isCPrefix && ex_etag){ - // Get ETag - xmlXPathObjectPtr ETag; - if(NULL != (ETag = xmlXPathEvalExpression((xmlChar*)ex_etag, ctx))){ - if(xmlXPathNodeSetIsEmpty(ETag->nodesetval)){ - S3FS_PRN_INFO("ETag->nodesetval is empty."); - }else{ - xmlNodeSetPtr etag_nodes = ETag->nodesetval; - xmlChar* petag = xmlNodeListGetString(doc, etag_nodes->nodeTab[0]->xmlChildrenNode, 1); - if(petag){ - stretag = (char*)petag; - xmlFree(petag); + // xmlDocPtr + if(NULL == (doc = xmlReadMemory(body->str(), static_cast(body->size()), "", NULL, 0))){ + S3FS_PRN_ERR("xmlReadMemory returns with error."); + return -1; + } + if(0 != append_objects_from_xml(path, doc, head)){ + S3FS_PRN_ERR("append_objects_from_xml returns with error."); + xmlFreeDoc(doc); + return -1; + } + if(true == (truncated = is_truncated(doc))){ + xmlChar* tmpch = get_next_marker(doc); + if(tmpch){ + next_marker = (char*)tmpch; + xmlFree(tmpch); + }else{ + // If did not specify "delimiter", s3 did not return "NextMarker". + // On this case, can use last name for next marker. + // + string lastname; + if(!head.GetLastName(lastname)){ + S3FS_PRN_WARN("Could not find next marker, thus break loop."); + truncated = false; + }else{ + next_marker = s3_realpath.substr(1); + if(0 == s3_realpath.length() || '/' != s3_realpath[s3_realpath.length() - 1]){ + next_marker += "/"; + } + next_marker += lastname; + } } - } - xmlXPathFreeObject(ETag); } - } - if(!head.insert(name, (0 < stretag.length() ? stretag.c_str() : NULL), is_dir)){ - S3FS_PRN_ERR("insert_object returns with error."); - xmlXPathFreeObject(key); - xmlXPathFreeObject(contents_xp); - free(name); - S3FS_MALLOCTRIM(0); - return -1; - } - free(name); - }else{ - S3FS_PRN_DBG("name is file or subdir in dir. but continue."); - } - xmlXPathFreeObject(key); - } - S3FS_XMLXPATHFREEOBJECT(contents_xp); + S3FS_XMLFREEDOC(doc); - return 0; -} + // reset(initialize) curl object + s3fscurl.DestroyCurlHandle(); -static bool GetXmlNsUrl(xmlDocPtr doc, string& nsurl) -{ - static time_t tmLast = 0; // cache for 60 sec. - static string strNs; - bool result = false; - - if(!doc){ - return false; - } - if((tmLast + 60) < time(NULL)){ - // refresh - tmLast = time(NULL); - strNs = ""; - xmlNodePtr pRootNode = xmlDocGetRootElement(doc); - if(pRootNode){ - xmlNsPtr* nslist = xmlGetNsList(doc, pRootNode); - if(nslist){ - if(nslist[0] && nslist[0]->href){ - strNs = (const char*)(nslist[0]->href); + if(check_content_only){ + break; } - S3FS_XMLFREE(nslist); - } } - } - if(!strNs.empty()){ - nsurl = strNs; - result = true; - } - return result; -} + S3FS_MALLOCTRIM(0); -static int append_objects_from_xml(const char* path, xmlDocPtr doc, S3ObjList& head) -{ - string xmlnsurl; - string ex_contents = "//"; - string ex_key; - string ex_cprefix = "//"; - string ex_prefix; - string ex_etag; - - if(!doc){ - return -1; - } - - // If there is not , use path instead of it. - xmlChar* pprefix = get_prefix(doc); - string prefix = (pprefix ? (char*)pprefix : path ? path : ""); - if(pprefix){ - xmlFree(pprefix); - } - - xmlXPathContextPtr ctx = xmlXPathNewContext(doc); - - if(!noxmlns && GetXmlNsUrl(doc, xmlnsurl)){ - xmlXPathRegisterNs(ctx, (xmlChar*)"s3", (xmlChar*)xmlnsurl.c_str()); - ex_contents+= "s3:"; - ex_key += "s3:"; - ex_cprefix += "s3:"; - ex_prefix += "s3:"; - ex_etag += "s3:"; - } - ex_contents+= "Contents"; - ex_key += "Key"; - ex_cprefix += "CommonPrefixes"; - ex_prefix += "Prefix"; - ex_etag += "ETag"; - - if(-1 == append_objects_from_xml_ex(prefix.c_str(), doc, ctx, ex_contents.c_str(), ex_key.c_str(), ex_etag.c_str(), 0, head) || - -1 == append_objects_from_xml_ex(prefix.c_str(), doc, ctx, ex_cprefix.c_str(), ex_prefix.c_str(), NULL, 1, head) ) - { - S3FS_PRN_ERR("append_objects_from_xml_ex returns with error."); - S3FS_XMLXPATHFREECONTEXT(ctx); - return -1; - } - S3FS_XMLXPATHFREECONTEXT(ctx); - - return 0; -} - -static xmlChar* get_base_exp(xmlDocPtr doc, const char* exp) -{ - xmlXPathObjectPtr marker_xp; - string xmlnsurl; - string exp_string; - - if(!doc){ - return NULL; - } - xmlXPathContextPtr ctx = xmlXPathNewContext(doc); - - if(!noxmlns && GetXmlNsUrl(doc, xmlnsurl)){ - xmlXPathRegisterNs(ctx, (xmlChar*)"s3", (xmlChar*)xmlnsurl.c_str()); - exp_string = "/s3:ListBucketResult/s3:"; - } else { - exp_string = "/ListBucketResult/"; - } - - exp_string += exp; - - if(NULL == (marker_xp = xmlXPathEvalExpression((xmlChar *)exp_string.c_str(), ctx))){ - xmlXPathFreeContext(ctx); - return NULL; - } - if(xmlXPathNodeSetIsEmpty(marker_xp->nodesetval)){ - S3FS_PRN_ERR("marker_xp->nodesetval is empty."); - xmlXPathFreeObject(marker_xp); - xmlXPathFreeContext(ctx); - return NULL; - } - xmlNodeSetPtr nodes = marker_xp->nodesetval; - xmlChar* result = xmlNodeListGetString(doc, nodes->nodeTab[0]->xmlChildrenNode, 1); - - xmlXPathFreeObject(marker_xp); - xmlXPathFreeContext(ctx); - - return result; -} - -static xmlChar* get_prefix(xmlDocPtr doc) -{ - return get_base_exp(doc, "Prefix"); -} - -static xmlChar* get_next_marker(xmlDocPtr doc) -{ - return get_base_exp(doc, "NextMarker"); -} - -static bool is_truncated(xmlDocPtr doc) -{ - bool result = false; - - xmlChar* strTruncate = get_base_exp(doc, "IsTruncated"); - if(!strTruncate){ - return false; - } - if(0 == strcasecmp((const char*)strTruncate, "true")){ - result = true; - } - xmlFree(strTruncate); - return result; -} - -// return: the pointer to object name on allocated memory. -// the pointer to "c_strErrorObjectName".(not allocated) -// NULL(a case of something error occurred) -static char* get_object_name(xmlDocPtr doc, xmlNodePtr node, const char* path) -{ - // Get full path - xmlChar* fullpath = xmlNodeListGetString(doc, node, 1); - if(!fullpath){ - S3FS_PRN_ERR("could not get object full path name.."); - return NULL; - } - // basepath(path) is as same as fullpath. - if(0 == strcmp((char*)fullpath, path)){ - xmlFree(fullpath); - return (char*)c_strErrorObjectName; - } - - // Make dir path and filename - string strdirpath = mydirname(string((char*)fullpath)); - string strmybpath = mybasename(string((char*)fullpath)); - const char* dirpath = strdirpath.c_str(); - const char* mybname = strmybpath.c_str(); - const char* basepath= (path && '/' == path[0]) ? &path[1] : path; - xmlFree(fullpath); - - if(!mybname || '\0' == mybname[0]){ - return NULL; - } - - // check subdir & file in subdir - if(dirpath && 0 < strlen(dirpath)){ - // case of "/" - if(0 == strcmp(mybname, "/") && 0 == strcmp(dirpath, "/")){ - return (char*)c_strErrorObjectName; - } - // case of "." - if(0 == strcmp(mybname, ".") && 0 == strcmp(dirpath, ".")){ - return (char*)c_strErrorObjectName; - } - // case of ".." - if(0 == strcmp(mybname, "..") && 0 == strcmp(dirpath, ".")){ - return (char*)c_strErrorObjectName; - } - // case of "name" - if(0 == strcmp(dirpath, ".")){ - // OK - return strdup(mybname); - }else{ - if(basepath && 0 == strcmp(dirpath, basepath)){ - // OK - return strdup(mybname); - }else if(basepath && 0 < strlen(basepath) && '/' == basepath[strlen(basepath) - 1] && 0 == strncmp(dirpath, basepath, strlen(basepath) - 1)){ - string withdirname; - if(strlen(dirpath) > strlen(basepath)){ - withdirname = &dirpath[strlen(basepath)]; - } - if(0 < withdirname.length() && '/' != withdirname[withdirname.length() - 1]){ - withdirname += "/"; - } - withdirname += mybname; - return strdup(withdirname.c_str()); - } - } - } - // case of something wrong - return (char*)c_strErrorObjectName; + return 0; } static int remote_mountpath_exists(const char* path) { - struct stat stbuf; + struct stat stbuf; - S3FS_PRN_INFO1("[path=%s]", path); + S3FS_PRN_INFO1("[path=%s]", path); - // getattr will prefix the path with the remote mountpoint - if(0 != get_object_attribute("/", &stbuf, NULL)){ - return -1; - } - if(!S_ISDIR(stbuf.st_mode)){ - return -1; - } - return 0; + // getattr will prefix the path with the remote mountpoint + if(0 != get_object_attribute("/", &stbuf, NULL)){ + return -1; + } + if(!S_ISDIR(stbuf.st_mode)){ + return -1; + } + return 0; } static void free_xattrs(xattrs_t& xattrs) { - for(xattrs_t::iterator iter = xattrs.begin(); iter != xattrs.end(); ++iter){ - delete iter->second; - } - xattrs.clear(); + for(xattrs_t::iterator iter = xattrs.begin(); iter != xattrs.end(); ++iter){ + delete iter->second; + } + xattrs.clear(); } static bool parse_xattr_keyval(const std::string& xattrpair, string& key, PXATTRVAL& pval) { - // parse key and value - size_t pos; - string tmpval; - if(string::npos == (pos = xattrpair.find_first_of(':'))){ - S3FS_PRN_ERR("one of xattr pair(%s) is wrong format.", xattrpair.c_str()); - return false; - } - key = xattrpair.substr(0, pos); - tmpval = xattrpair.substr(pos + 1); + // parse key and value + size_t pos; + string tmpval; + if(string::npos == (pos = xattrpair.find_first_of(':'))){ + S3FS_PRN_ERR("one of xattr pair(%s) is wrong format.", xattrpair.c_str()); + return false; + } + key = xattrpair.substr(0, pos); + tmpval = xattrpair.substr(pos + 1); - if(!takeout_str_dquart(key) || !takeout_str_dquart(tmpval)){ - S3FS_PRN_ERR("one of xattr pair(%s) is wrong format.", xattrpair.c_str()); - return false; - } + if(!takeout_str_dquart(key) || !takeout_str_dquart(tmpval)){ + S3FS_PRN_ERR("one of xattr pair(%s) is wrong format.", xattrpair.c_str()); + return false; + } - pval = new XATTRVAL; - pval->length = 0; - pval->pvalue = s3fs_decode64(tmpval.c_str(), &pval->length); + pval = new XATTRVAL; + pval->length = 0; + pval->pvalue = s3fs_decode64(tmpval.c_str(), &pval->length); - return true; + return true; } static size_t parse_xattrs(const std::string& strxattrs, xattrs_t& xattrs) { - xattrs.clear(); + xattrs.clear(); - // decode - string jsonxattrs = urlDecode(strxattrs); + // decode + string jsonxattrs = urlDecode(strxattrs); - // get from "{" to "}" - string restxattrs; - { - size_t startpos; - size_t endpos = string::npos; - if(string::npos != (startpos = jsonxattrs.find_first_of('{'))){ - endpos = jsonxattrs.find_last_of('}'); + // get from "{" to "}" + string restxattrs; + { + size_t startpos; + size_t endpos = string::npos; + if(string::npos != (startpos = jsonxattrs.find_first_of('{'))){ + endpos = jsonxattrs.find_last_of('}'); + } + if(startpos == string::npos || endpos == string::npos || endpos <= startpos){ + S3FS_PRN_WARN("xattr header(%s) is not json format.", jsonxattrs.c_str()); + return 0; + } + restxattrs = jsonxattrs.substr(startpos + 1, endpos - (startpos + 1)); } - if(startpos == string::npos || endpos == string::npos || endpos <= startpos){ - S3FS_PRN_WARN("xattr header(%s) is not json format.", jsonxattrs.c_str()); - return 0; - } - restxattrs = jsonxattrs.substr(startpos + 1, endpos - (startpos + 1)); - } - // parse each key:val - for(size_t pair_nextpos = restxattrs.find_first_of(','); 0 < restxattrs.length(); restxattrs = (pair_nextpos != string::npos ? restxattrs.substr(pair_nextpos + 1) : string("")), pair_nextpos = restxattrs.find_first_of(',')){ - string pair = pair_nextpos != string::npos ? restxattrs.substr(0, pair_nextpos) : restxattrs; - string key; - PXATTRVAL pval = NULL; - if(!parse_xattr_keyval(pair, key, pval)){ - // something format error, so skip this. - continue; + // parse each key:val + for(size_t pair_nextpos = restxattrs.find_first_of(','); 0 < restxattrs.length(); restxattrs = (pair_nextpos != string::npos ? restxattrs.substr(pair_nextpos + 1) : string("")), pair_nextpos = restxattrs.find_first_of(',')){ + string pair = pair_nextpos != string::npos ? restxattrs.substr(0, pair_nextpos) : restxattrs; + string key; + PXATTRVAL pval = NULL; + if(!parse_xattr_keyval(pair, key, pval)){ + // something format error, so skip this. + continue; + } + xattrs[key] = pval; } - xattrs[key] = pval; - } - return xattrs.size(); + return xattrs.size(); } static std::string build_xattrs(const xattrs_t& xattrs) { - string strxattrs("{"); + string strxattrs("{"); - bool is_set = false; - for(xattrs_t::const_iterator iter = xattrs.begin(); iter != xattrs.end(); ++iter){ - if(is_set){ - strxattrs += ','; - }else{ - is_set = true; + bool is_set = false; + for(xattrs_t::const_iterator iter = xattrs.begin(); iter != xattrs.end(); ++iter){ + if(is_set){ + strxattrs += ','; + }else{ + is_set = true; + } + strxattrs += '\"'; + strxattrs += iter->first; + strxattrs += "\":\""; + + if(iter->second){ + char* base64val = s3fs_base64((iter->second)->pvalue, (iter->second)->length); + if(base64val){ + strxattrs += base64val; + delete[] base64val; + } + } + strxattrs += '\"'; } - strxattrs += '\"'; - strxattrs += iter->first; - strxattrs += "\":\""; + strxattrs += '}'; - if(iter->second){ - char* base64val = s3fs_base64((iter->second)->pvalue, (iter->second)->length); - if(base64val){ - strxattrs += base64val; - delete[] base64val; - } - } - strxattrs += '\"'; - } - strxattrs += '}'; + strxattrs = urlEncode(strxattrs); - strxattrs = urlEncode(strxattrs); - - return strxattrs; + return strxattrs; } static int set_xattrs_to_header(headers_t& meta, const char* name, const char* value, size_t size, int flags) { - string strxattrs; - xattrs_t xattrs; + string strxattrs; + xattrs_t xattrs; - headers_t::iterator iter; - if(meta.end() == (iter = meta.find("x-amz-meta-xattr"))){ + headers_t::iterator iter; + if(meta.end() == (iter = meta.find("x-amz-meta-xattr"))){ #if defined(XATTR_REPLACE) - if(XATTR_REPLACE == (flags & XATTR_REPLACE)){ - // there is no xattr header but flags is replace, so failure. - return -ENOATTR; - } + if(XATTR_REPLACE == (flags & XATTR_REPLACE)){ + // there is no xattr header but flags is replace, so failure. + return -ENOATTR; + } #endif - }else{ + }else{ #if defined(XATTR_CREATE) - if(XATTR_CREATE == (flags & XATTR_CREATE)){ - // found xattr header but flags is only creating, so failure. - return -EEXIST; - } + if(XATTR_CREATE == (flags & XATTR_CREATE)){ + // found xattr header but flags is only creating, so failure. + return -EEXIST; + } #endif - strxattrs = iter->second; - } + strxattrs = iter->second; + } - // get map as xattrs_t - parse_xattrs(strxattrs, xattrs); + // get map as xattrs_t + parse_xattrs(strxattrs, xattrs); - // add name(do not care overwrite and empty name/value) - xattrs_t::iterator xiter; - if(xattrs.end() != (xiter = xattrs.find(string(name)))){ - // found same head. free value. - delete xiter->second; - } + // add name(do not care overwrite and empty name/value) + xattrs_t::iterator xiter; + if(xattrs.end() != (xiter = xattrs.find(string(name)))){ + // found same head. free value. + delete xiter->second; + } - PXATTRVAL pval = new XATTRVAL; - pval->length = size; - if(0 < size){ - pval->pvalue = new unsigned char[size]; - memcpy(pval->pvalue, value, size); - }else{ - pval->pvalue = NULL; - } - xattrs[string(name)] = pval; + PXATTRVAL pval = new XATTRVAL; + pval->length = size; + if(0 < size){ + pval->pvalue = new unsigned char[size]; + memcpy(pval->pvalue, value, size); + }else{ + pval->pvalue = NULL; + } + xattrs[string(name)] = pval; - // build new strxattrs(not encoded) and set it to headers_t - meta["x-amz-meta-xattr"] = build_xattrs(xattrs); + // build new strxattrs(not encoded) and set it to headers_t + meta["x-amz-meta-xattr"] = build_xattrs(xattrs); - free_xattrs(xattrs); + free_xattrs(xattrs); - return 0; + return 0; } #if defined(__APPLE__) @@ -3177,129 +2819,128 @@ static int s3fs_setxattr(const char* path, const char* name, const char* value, static int s3fs_setxattr(const char* path, const char* name, const char* value, size_t size, int flags) #endif { - S3FS_PRN_INFO("[path=%s][name=%s][value=%p][size=%zu][flags=0x%x]", path, name, value, size, flags); + S3FS_PRN_INFO("[path=%s][name=%s][value=%p][size=%zu][flags=0x%x]", path, name, value, size, flags); - if((value && 0 == size) || (!value && 0 < size)){ - S3FS_PRN_ERR("Wrong parameter: value(%p), size(%zu)", value, size); - return 0; - } + if((value && 0 == size) || (!value && 0 < size)){ + S3FS_PRN_ERR("Wrong parameter: value(%p), size(%zu)", value, size); + return 0; + } #if defined(__APPLE__) - if (position != 0) { - // No resource fork support - return -EINVAL; - } + if (position != 0) { + // No resource fork support + return -EINVAL; + } #endif - int result; - string strpath; - string newpath; - string nowcache; - headers_t meta; - struct stat stbuf; - dirtype nDirType = DIRTYPE_UNKNOWN; + int result; + string strpath; + string newpath; + string nowcache; + headers_t meta; + struct stat stbuf; + dirtype nDirType = DIRTYPE_UNKNOWN; - if(0 == strcmp(path, "/")){ - S3FS_PRN_ERR("Could not change mode for mount point."); - return -EIO; - } - if(0 != (result = check_parent_object_access(path, X_OK))){ - return result; - } - if(0 != (result = check_object_owner(path, &stbuf))){ - return result; - } - - if(S_ISDIR(stbuf.st_mode)){ - result = chk_dir_object_type(path, newpath, strpath, nowcache, &meta, &nDirType); - }else{ - strpath = path; - nowcache = strpath; - result = get_object_attribute(strpath.c_str(), NULL, &meta); - } - if(0 != result){ - return result; - } - - if(S_ISDIR(stbuf.st_mode) && IS_REPLACEDIR(nDirType)){ - // Should rebuild directory object(except new type) - // Need to remove old dir("dir" etc) and make new dir("dir/") - - // At first, remove directory old object - if(0 != (result = remove_old_type_dir(strpath, nDirType))){ - return result; - } - StatCache::getStatCacheData()->DelStat(nowcache); - - // Make new directory object("dir/") - if(0 != (result = create_directory_object(newpath.c_str(), stbuf.st_mode, stbuf.st_mtime, stbuf.st_uid, stbuf.st_gid))){ - return result; - } - - // need to set xattr header for directory. - strpath = newpath; - nowcache = strpath; - } - - // set xattr all object - headers_t updatemeta; - updatemeta["x-amz-meta-ctime"] = str(time(NULL)); - updatemeta["x-amz-copy-source"] = urlEncode(service_path + bucket + get_realpath(strpath.c_str())); - updatemeta["x-amz-metadata-directive"] = "REPLACE"; - - // check opened file handle. - // - // If the file starts uploading by multipart when the disk capacity is insufficient, - // we need to put these header after finishing upload. - // Or if the file is only open, we must update to FdEntity's internal meta. - // - FdEntity* ent; - if(NULL != (ent = FdManager::get()->ExistOpen(path, -1, true))){ - // the file is opened now. - - // get xattr and make new xattr - string strxattr; - if(ent->GetXattr(strxattr)){ - updatemeta["x-amz-meta-xattr"] = strxattr; - }else{ - // [NOTE] - // Set an empty xattr. - // This requires the key to be present in order to add xattr. - ent->SetXattr(strxattr); - } - if(0 != (result = set_xattrs_to_header(updatemeta, name, value, size, flags))){ - return result; - } - - if(ent->MergeOrgMeta(updatemeta)){ - // now uploading - // the meta is pending and accumulated to be put after the upload is complete. - S3FS_PRN_INFO("meta pending until upload is complete"); - }else{ - // allow to put header - // updatemeta already merged the orgmeta of the opened files. - if(0 != put_headers(strpath.c_str(), updatemeta, true)){ + if(0 == strcmp(path, "/")){ + S3FS_PRN_ERR("Could not change mode for mount point."); return -EIO; - } - StatCache::getStatCacheData()->DelStat(nowcache); + } + if(0 != (result = check_parent_object_access(path, X_OK))){ + return result; + } + if(0 != (result = check_object_owner(path, &stbuf))){ + return result; } - }else{ - // not opened file, then put headers - merge_headers(meta, updatemeta, true); - - // NOTICE: modify xattr from base meta - if(0 != (result = set_xattrs_to_header(meta, name, value, size, flags))){ - return result; + if(S_ISDIR(stbuf.st_mode)){ + result = chk_dir_object_type(path, newpath, strpath, nowcache, &meta, &nDirType); + }else{ + strpath = path; + nowcache = strpath; + result = get_object_attribute(strpath.c_str(), NULL, &meta); + } + if(0 != result){ + return result; } - if(0 != put_headers(strpath.c_str(), meta, true)){ - return -EIO; - } - StatCache::getStatCacheData()->DelStat(nowcache); - } + if(S_ISDIR(stbuf.st_mode) && IS_REPLACEDIR(nDirType)){ + // Should rebuild directory object(except new type) + // Need to remove old dir("dir" etc) and make new dir("dir/") - return 0; + // At first, remove directory old object + if(0 != (result = remove_old_type_dir(strpath, nDirType))){ + return result; + } + StatCache::getStatCacheData()->DelStat(nowcache); + + // Make new directory object("dir/") + if(0 != (result = create_directory_object(newpath.c_str(), stbuf.st_mode, stbuf.st_mtime, stbuf.st_uid, stbuf.st_gid))){ + return result; + } + + // need to set xattr header for directory. + strpath = newpath; + nowcache = strpath; + } + + // set xattr all object + headers_t updatemeta; + updatemeta["x-amz-meta-ctime"] = str(time(NULL)); + updatemeta["x-amz-copy-source"] = urlEncode(service_path + bucket + get_realpath(strpath.c_str())); + updatemeta["x-amz-metadata-directive"] = "REPLACE"; + + // check opened file handle. + // + // If the file starts uploading by multipart when the disk capacity is insufficient, + // we need to put these header after finishing upload. + // Or if the file is only open, we must update to FdEntity's internal meta. + // + FdEntity* ent; + if(NULL != (ent = FdManager::get()->ExistOpen(path, -1, true))){ + // the file is opened now. + + // get xattr and make new xattr + string strxattr; + if(ent->GetXattr(strxattr)){ + updatemeta["x-amz-meta-xattr"] = strxattr; + }else{ + // [NOTE] + // Set an empty xattr. + // This requires the key to be present in order to add xattr. + ent->SetXattr(strxattr); + } + if(0 != (result = set_xattrs_to_header(updatemeta, name, value, size, flags))){ + return result; + } + + if(ent->MergeOrgMeta(updatemeta)){ + // now uploading + // the meta is pending and accumulated to be put after the upload is complete. + S3FS_PRN_INFO("meta pending until upload is complete"); + }else{ + // allow to put header + // updatemeta already merged the orgmeta of the opened files. + if(0 != put_headers(strpath.c_str(), updatemeta, true)){ + return -EIO; + } + StatCache::getStatCacheData()->DelStat(nowcache); + } + + }else{ + // not opened file, then put headers + merge_headers(meta, updatemeta, true); + + // NOTICE: modify xattr from base meta + if(0 != (result = set_xattrs_to_header(meta, name, value, size, flags))){ + return result; + } + + if(0 != put_headers(strpath.c_str(), meta, true)){ + return -EIO; + } + StatCache::getStatCacheData()->DelStat(nowcache); + } + return 0; } #if defined(__APPLE__) @@ -3308,602 +2949,374 @@ static int s3fs_getxattr(const char* path, const char* name, char* value, size_t static int s3fs_getxattr(const char* path, const char* name, char* value, size_t size) #endif { - S3FS_PRN_INFO("[path=%s][name=%s][value=%p][size=%zu]", path, name, value, size); + S3FS_PRN_INFO("[path=%s][name=%s][value=%p][size=%zu]", path, name, value, size); - if(!path || !name){ - return -EIO; - } + if(!path || !name){ + return -EIO; + } #if defined(__APPLE__) - if (position != 0) { - // No resource fork support - return -EINVAL; - } + if (position != 0) { + // No resource fork support + return -EINVAL; + } #endif - int result; - headers_t meta; - xattrs_t xattrs; + int result; + headers_t meta; + xattrs_t xattrs; - // check parent directory attribute. - if(0 != (result = check_parent_object_access(path, X_OK))){ - return result; - } + // check parent directory attribute. + if(0 != (result = check_parent_object_access(path, X_OK))){ + return result; + } - // get headers - if(0 != (result = get_object_attribute(path, NULL, &meta))){ - return result; - } + // get headers + if(0 != (result = get_object_attribute(path, NULL, &meta))){ + return result; + } - // get xattrs - headers_t::iterator hiter = meta.find("x-amz-meta-xattr"); - if(meta.end() == hiter){ - // object does not have xattrs - return -ENOATTR; - } - string strxattrs = hiter->second; + // get xattrs + headers_t::iterator hiter = meta.find("x-amz-meta-xattr"); + if(meta.end() == hiter){ + // object does not have xattrs + return -ENOATTR; + } + string strxattrs = hiter->second; - parse_xattrs(strxattrs, xattrs); + parse_xattrs(strxattrs, xattrs); - // search name - string strname = name; - xattrs_t::iterator xiter = xattrs.find(strname); - if(xattrs.end() == xiter){ - // not found name in xattrs + // search name + string strname = name; + xattrs_t::iterator xiter = xattrs.find(strname); + if(xattrs.end() == xiter){ + // not found name in xattrs + free_xattrs(xattrs); + return -ENOATTR; + } + + // decode + size_t length = 0; + unsigned char* pvalue = NULL; + if(NULL != xiter->second){ + length = xiter->second->length; + pvalue = xiter->second->pvalue; + } + + if(0 < size){ + if(static_cast(size) < length){ + // over buffer size + free_xattrs(xattrs); + return -ERANGE; + } + if(pvalue){ + memcpy(value, pvalue, length); + } + } free_xattrs(xattrs); - return -ENOATTR; - } - // decode - size_t length = 0; - unsigned char* pvalue = NULL; - if(NULL != xiter->second){ - length = xiter->second->length; - pvalue = xiter->second->pvalue; - } - - if(0 < size){ - if(static_cast(size) < length){ - // over buffer size - free_xattrs(xattrs); - return -ERANGE; - } - if(pvalue){ - memcpy(value, pvalue, length); - } - } - free_xattrs(xattrs); - - return static_cast(length); + return static_cast(length); } static int s3fs_listxattr(const char* path, char* list, size_t size) { - S3FS_PRN_INFO("[path=%s][list=%p][size=%zu]", path, list, size); + S3FS_PRN_INFO("[path=%s][list=%p][size=%zu]", path, list, size); - if(!path){ - return -EIO; - } - - int result; - headers_t meta; - xattrs_t xattrs; - - // check parent directory attribute. - if(0 != (result = check_parent_object_access(path, X_OK))){ - return result; - } - - // get headers - if(0 != (result = get_object_attribute(path, NULL, &meta))){ - return result; - } - - // get xattrs - headers_t::iterator iter; - if(meta.end() == (iter = meta.find("x-amz-meta-xattr"))){ - // object does not have xattrs - return 0; - } - string strxattrs = iter->second; - - parse_xattrs(strxattrs, xattrs); - - // calculate total name length - size_t total = 0; - for(xattrs_t::const_iterator xiter = xattrs.begin(); xiter != xattrs.end(); ++xiter){ - if(0 < xiter->first.length()){ - total += xiter->first.length() + 1; + if(!path){ + return -EIO; } - } - if(0 == total){ - free_xattrs(xattrs); - return 0; - } + int result; + headers_t meta; + xattrs_t xattrs; - // check parameters - if(0 == size){ + // check parent directory attribute. + if(0 != (result = check_parent_object_access(path, X_OK))){ + return result; + } + + // get headers + if(0 != (result = get_object_attribute(path, NULL, &meta))){ + return result; + } + + // get xattrs + headers_t::iterator iter; + if(meta.end() == (iter = meta.find("x-amz-meta-xattr"))){ + // object does not have xattrs + return 0; + } + string strxattrs = iter->second; + + parse_xattrs(strxattrs, xattrs); + + // calculate total name length + size_t total = 0; + for(xattrs_t::const_iterator xiter = xattrs.begin(); xiter != xattrs.end(); ++xiter){ + if(0 < xiter->first.length()){ + total += xiter->first.length() + 1; + } + } + + if(0 == total){ + free_xattrs(xattrs); + return 0; + } + + // check parameters + if(0 == size){ + free_xattrs(xattrs); + return total; + } + if(!list || size < total){ + free_xattrs(xattrs); + return -ERANGE; + } + + // copy to list + char* setpos = list; + for(xattrs_t::const_iterator xiter = xattrs.begin(); xiter != xattrs.end(); ++xiter){ + if(0 < xiter->first.length()){ + strcpy(setpos, xiter->first.c_str()); + setpos = &setpos[strlen(setpos) + 1]; + } + } free_xattrs(xattrs); + return total; - } - if(!list || size < total){ - free_xattrs(xattrs); - return -ERANGE; - } - - // copy to list - char* setpos = list; - for(xattrs_t::const_iterator xiter = xattrs.begin(); xiter != xattrs.end(); ++xiter){ - if(0 < xiter->first.length()){ - strcpy(setpos, xiter->first.c_str()); - setpos = &setpos[strlen(setpos) + 1]; - } - } - free_xattrs(xattrs); - - return total; } static int s3fs_removexattr(const char* path, const char* name) { - S3FS_PRN_INFO("[path=%s][name=%s]", path, name); + S3FS_PRN_INFO("[path=%s][name=%s]", path, name); - if(!path || !name){ - return -EIO; - } - - int result; - string strpath; - string newpath; - string nowcache; - headers_t meta; - xattrs_t xattrs; - struct stat stbuf; - dirtype nDirType = DIRTYPE_UNKNOWN; - - if(0 == strcmp(path, "/")){ - S3FS_PRN_ERR("Could not change mode for mount point."); - return -EIO; - } - if(0 != (result = check_parent_object_access(path, X_OK))){ - return result; - } - if(0 != (result = check_object_owner(path, &stbuf))){ - return result; - } - - if(S_ISDIR(stbuf.st_mode)){ - result = chk_dir_object_type(path, newpath, strpath, nowcache, &meta, &nDirType); - }else{ - strpath = path; - nowcache = strpath; - result = get_object_attribute(strpath.c_str(), NULL, &meta); - } - if(0 != result){ - return result; - } - - // get xattrs - headers_t::iterator hiter = meta.find("x-amz-meta-xattr"); - if(meta.end() == hiter){ - // object does not have xattrs - return -ENOATTR; - } - string strxattrs = hiter->second; - - parse_xattrs(strxattrs, xattrs); - - // check name xattrs - string strname = name; - xattrs_t::iterator xiter = xattrs.find(strname); - if(xattrs.end() == xiter){ - free_xattrs(xattrs); - return -ENOATTR; - } - - // make new header_t after deleting name xattr - delete xiter->second; - xattrs.erase(xiter); - - if(S_ISDIR(stbuf.st_mode) && IS_REPLACEDIR(nDirType)){ - // Should rebuild directory object(except new type) - // Need to remove old dir("dir" etc) and make new dir("dir/") - - // At first, remove directory old object - if(0 != (result = remove_old_type_dir(strpath, nDirType))){ - return result; - } - StatCache::getStatCacheData()->DelStat(nowcache); - - // Make new directory object("dir/") - if(0 != (result = create_directory_object(newpath.c_str(), stbuf.st_mode, stbuf.st_mtime, stbuf.st_uid, stbuf.st_gid))){ - free_xattrs(xattrs); - return result; - } - - // need to set xattr header for directory. - strpath = newpath; - nowcache = strpath; - } - - // set xattr all object - headers_t updatemeta; - updatemeta["x-amz-copy-source"] = urlEncode(service_path + bucket + get_realpath(strpath.c_str())); - updatemeta["x-amz-metadata-directive"] = "REPLACE"; - if(!xattrs.empty()){ - updatemeta["x-amz-meta-xattr"] = build_xattrs(xattrs); - }else{ - updatemeta["x-amz-meta-xattr"] = string(""); // This is a special case. If empty, this header will eventually be removed. - } - free_xattrs(xattrs); - - // check opened file handle. - // - // If the file starts uploading by multipart when the disk capacity is insufficient, - // we need to put these header after finishing upload. - // Or if the file is only open, we must update to FdEntity's internal meta. - // - FdEntity* ent; - if(NULL != (ent = FdManager::get()->ExistOpen(path, -1, true))){ - // the file is opened now. - if(ent->MergeOrgMeta(updatemeta)){ - // now uploading - // the meta is pending and accumulated to be put after the upload is complete. - S3FS_PRN_INFO("meta pending until upload is complete"); - }else{ - // allow to put header - // updatemeta already merged the orgmeta of the opened files. - if(updatemeta["x-amz-meta-xattr"].empty()){ - updatemeta.erase("x-amz-meta-xattr"); - } - if(0 != put_headers(strpath.c_str(), updatemeta, true)){ + if(!path || !name){ return -EIO; - } - StatCache::getStatCacheData()->DelStat(nowcache); } - }else{ - // not opened file, then put headers - if(updatemeta["x-amz-meta-xattr"].empty()){ - updatemeta.erase("x-amz-meta-xattr"); - } - merge_headers(meta, updatemeta, true); - if(0 != put_headers(strpath.c_str(), meta, true)){ - return -EIO; - } - StatCache::getStatCacheData()->DelStat(nowcache); - } + int result; + string strpath; + string newpath; + string nowcache; + headers_t meta; + xattrs_t xattrs; + struct stat stbuf; + dirtype nDirType = DIRTYPE_UNKNOWN; - return 0; + if(0 == strcmp(path, "/")){ + S3FS_PRN_ERR("Could not change mode for mount point."); + return -EIO; + } + if(0 != (result = check_parent_object_access(path, X_OK))){ + return result; + } + if(0 != (result = check_object_owner(path, &stbuf))){ + return result; + } + + if(S_ISDIR(stbuf.st_mode)){ + result = chk_dir_object_type(path, newpath, strpath, nowcache, &meta, &nDirType); + }else{ + strpath = path; + nowcache = strpath; + result = get_object_attribute(strpath.c_str(), NULL, &meta); + } + if(0 != result){ + return result; + } + + // get xattrs + headers_t::iterator hiter = meta.find("x-amz-meta-xattr"); + if(meta.end() == hiter){ + // object does not have xattrs + return -ENOATTR; + } + string strxattrs = hiter->second; + + parse_xattrs(strxattrs, xattrs); + + // check name xattrs + string strname = name; + xattrs_t::iterator xiter = xattrs.find(strname); + if(xattrs.end() == xiter){ + free_xattrs(xattrs); + return -ENOATTR; + } + + // make new header_t after deleting name xattr + delete xiter->second; + xattrs.erase(xiter); + + if(S_ISDIR(stbuf.st_mode) && IS_REPLACEDIR(nDirType)){ + // Should rebuild directory object(except new type) + // Need to remove old dir("dir" etc) and make new dir("dir/") + + // At first, remove directory old object + if(0 != (result = remove_old_type_dir(strpath, nDirType))){ + return result; + } + StatCache::getStatCacheData()->DelStat(nowcache); + + // Make new directory object("dir/") + if(0 != (result = create_directory_object(newpath.c_str(), stbuf.st_mode, stbuf.st_mtime, stbuf.st_uid, stbuf.st_gid))){ + free_xattrs(xattrs); + return result; + } + + // need to set xattr header for directory. + strpath = newpath; + nowcache = strpath; + } + + // set xattr all object + headers_t updatemeta; + updatemeta["x-amz-copy-source"] = urlEncode(service_path + bucket + get_realpath(strpath.c_str())); + updatemeta["x-amz-metadata-directive"] = "REPLACE"; + if(!xattrs.empty()){ + updatemeta["x-amz-meta-xattr"] = build_xattrs(xattrs); + }else{ + updatemeta["x-amz-meta-xattr"] = string(""); // This is a special case. If empty, this header will eventually be removed. + } + free_xattrs(xattrs); + + // check opened file handle. + // + // If the file starts uploading by multipart when the disk capacity is insufficient, + // we need to put these header after finishing upload. + // Or if the file is only open, we must update to FdEntity's internal meta. + // + FdEntity* ent; + if(NULL != (ent = FdManager::get()->ExistOpen(path, -1, true))){ + // the file is opened now. + if(ent->MergeOrgMeta(updatemeta)){ + // now uploading + // the meta is pending and accumulated to be put after the upload is complete. + S3FS_PRN_INFO("meta pending until upload is complete"); + }else{ + // allow to put header + // updatemeta already merged the orgmeta of the opened files. + if(updatemeta["x-amz-meta-xattr"].empty()){ + updatemeta.erase("x-amz-meta-xattr"); + } + if(0 != put_headers(strpath.c_str(), updatemeta, true)){ + return -EIO; + } + StatCache::getStatCacheData()->DelStat(nowcache); + } + + }else{ + // not opened file, then put headers + if(updatemeta["x-amz-meta-xattr"].empty()){ + updatemeta.erase("x-amz-meta-xattr"); + } + merge_headers(meta, updatemeta, true); + if(0 != put_headers(strpath.c_str(), meta, true)){ + return -EIO; + } + StatCache::getStatCacheData()->DelStat(nowcache); + } + + return 0; } // s3fs_init calls this function to exit cleanly from the fuse event loop. // // There's no way to pass an exit status to the high-level event loop API, so // this function stores the exit value in a global for main() -static void s3fs_exit_fuseloop(int exit_status) { - S3FS_PRN_ERR("Exiting FUSE event loop due to errors\n"); - s3fs_init_deferred_exit_status = exit_status; - struct fuse_context *ctx = fuse_get_context(); - if (NULL != ctx) { - fuse_exit(ctx->fuse); - } +static void s3fs_exit_fuseloop(int exit_status) +{ + S3FS_PRN_ERR("Exiting FUSE event loop due to errors\n"); + s3fs_init_deferred_exit_status = exit_status; + struct fuse_context *ctx = fuse_get_context(); + if (NULL != ctx) { + fuse_exit(ctx->fuse); + } } static void* s3fs_init(struct fuse_conn_info* conn) { - S3FS_PRN_INIT_INFO("init v%s(commit:%s) with %s", VERSION, COMMIT_HASH_VAL, s3fs_crypt_lib_name()); + S3FS_PRN_INIT_INFO("init v%s(commit:%s) with %s", VERSION, COMMIT_HASH_VAL, s3fs_crypt_lib_name()); - // cache(remove cache dirs at first) - if(is_remove_cache && (!CacheFileStat::DeleteCacheFileStatDirectory() || !FdManager::DeleteCacheDirectory())){ - S3FS_PRN_DBG("Could not initialize cache directory."); - } - - // check loading IAM role name - if(load_iamrole){ - // load IAM role name from http://169.254.169.254/latest/meta-data/iam/security-credentials - // - S3fsCurl s3fscurl; - if(!s3fscurl.LoadIAMRoleFromMetaData()){ - S3FS_PRN_CRIT("could not load IAM role name from meta data."); - s3fs_exit_fuseloop(EXIT_FAILURE); - return NULL; + // cache(remove cache dirs at first) + if(is_remove_cache && (!CacheFileStat::DeleteCacheFileStatDirectory() || !FdManager::DeleteCacheDirectory())){ + S3FS_PRN_DBG("Could not initialize cache directory."); } - S3FS_PRN_INFO("loaded IAM role name = %s", S3fsCurl::GetIAMRole()); - } - if (create_bucket){ - int result = do_create_bucket(); - if(result != 0){ - s3fs_exit_fuseloop(result); - return NULL; + // check loading IAM role name + if(load_iamrole){ + // load IAM role name from http://169.254.169.254/latest/meta-data/iam/security-credentials + // + S3fsCurl s3fscurl; + if(!s3fscurl.LoadIAMRoleFromMetaData()){ + S3FS_PRN_CRIT("could not load IAM role name from meta data."); + s3fs_exit_fuseloop(EXIT_FAILURE); + return NULL; + } + S3FS_PRN_INFO("loaded IAM role name = %s", S3fsCurl::GetIAMRole()); } - } - // Check Bucket - { - int result; - if(EXIT_SUCCESS != (result = s3fs_check_service())){ - s3fs_exit_fuseloop(result); - return NULL; + if (create_bucket){ + int result = do_create_bucket(); + if(result != 0){ + s3fs_exit_fuseloop(result); + return NULL; + } } - } - // Investigate system capabilities - #ifndef __APPLE__ - if((unsigned int)conn->capable & FUSE_CAP_ATOMIC_O_TRUNC){ - conn->want |= FUSE_CAP_ATOMIC_O_TRUNC; - } - #endif + // Check Bucket + { + int result; + if(EXIT_SUCCESS != (result = s3fs_check_service())){ + s3fs_exit_fuseloop(result); + return NULL; + } + } - if((unsigned int)conn->capable & FUSE_CAP_BIG_WRITES){ - conn->want |= FUSE_CAP_BIG_WRITES; - } + // Investigate system capabilities + #ifndef __APPLE__ + if((unsigned int)conn->capable & FUSE_CAP_ATOMIC_O_TRUNC){ + conn->want |= FUSE_CAP_ATOMIC_O_TRUNC; + } + #endif - // Signal object - if(S3fsSignals::Initialize()){ - S3FS_PRN_ERR("Failed to initialize signal object, but continue..."); - } + if((unsigned int)conn->capable & FUSE_CAP_BIG_WRITES){ + conn->want |= FUSE_CAP_BIG_WRITES; + } - return NULL; + // Signal object + if(S3fsSignals::Initialize()){ + S3FS_PRN_ERR("Failed to initialize signal object, but continue..."); + } + + return NULL; } static void s3fs_destroy(void*) { - S3FS_PRN_INFO("destroy"); + S3FS_PRN_INFO("destroy"); - // Signal object - if(S3fsSignals::Destroy()){ - S3FS_PRN_WARN("Failed to clean up signal object."); - } + // Signal object + if(S3fsSignals::Destroy()){ + S3FS_PRN_WARN("Failed to clean up signal object."); + } - // cache(remove at last) - if(is_remove_cache && (!CacheFileStat::DeleteCacheFileStatDirectory() || !FdManager::DeleteCacheDirectory())){ - S3FS_PRN_WARN("Could not remove cache directory."); - } + // cache(remove at last) + if(is_remove_cache && (!CacheFileStat::DeleteCacheFileStatDirectory() || !FdManager::DeleteCacheDirectory())){ + S3FS_PRN_WARN("Could not remove cache directory."); + } } static int s3fs_access(const char* path, int mask) { - S3FS_PRN_INFO("[path=%s][mask=%s%s%s%s]", path, - ((mask & R_OK) == R_OK) ? "R_OK " : "", - ((mask & W_OK) == W_OK) ? "W_OK " : "", - ((mask & X_OK) == X_OK) ? "X_OK " : "", - (mask == F_OK) ? "F_OK" : ""); + S3FS_PRN_INFO("[path=%s][mask=%s%s%s%s]", path, + ((mask & R_OK) == R_OK) ? "R_OK " : "", + ((mask & W_OK) == W_OK) ? "W_OK " : "", + ((mask & X_OK) == X_OK) ? "X_OK " : "", + (mask == F_OK) ? "F_OK" : ""); - int result = check_object_access(path, mask, NULL); - S3FS_MALLOCTRIM(0); - return result; -} - -static xmlChar* get_exp_value_xml(xmlDocPtr doc, xmlXPathContextPtr ctx, const char* exp_key) -{ - if(!doc || !ctx || !exp_key){ - return NULL; - } - - xmlXPathObjectPtr exp; - xmlNodeSetPtr exp_nodes; - xmlChar* exp_value; - - // search exp_key tag - if(NULL == (exp = xmlXPathEvalExpression((xmlChar*)exp_key, ctx))){ - S3FS_PRN_ERR("Could not find key(%s).", exp_key); - return NULL; - } - if(xmlXPathNodeSetIsEmpty(exp->nodesetval)){ - S3FS_PRN_ERR("Key(%s) node is empty.", exp_key); - S3FS_XMLXPATHFREEOBJECT(exp); - return NULL; - } - // get exp_key value & set in struct - exp_nodes = exp->nodesetval; - if(NULL == (exp_value = xmlNodeListGetString(doc, exp_nodes->nodeTab[0]->xmlChildrenNode, 1))){ - S3FS_PRN_ERR("Key(%s) value is empty.", exp_key); - S3FS_XMLXPATHFREEOBJECT(exp); - return NULL; - } - - S3FS_XMLXPATHFREEOBJECT(exp); - return exp_value; -} - -static void print_incomp_mpu_list(incomp_mpu_list_t& list) -{ - printf("\n"); - printf("Lists the parts that have been uploaded for a specific multipart upload.\n"); - printf("\n"); - - if(!list.empty()){ - printf("---------------------------------------------------------------\n"); - - int cnt = 0; - for(incomp_mpu_list_t::iterator iter = list.begin(); iter != list.end(); ++iter, ++cnt){ - printf(" Path : %s\n", (*iter).key.c_str()); - printf(" UploadId : %s\n", (*iter).id.c_str()); - printf(" Date : %s\n", (*iter).date.c_str()); - printf("\n"); - } - printf("---------------------------------------------------------------\n"); - - }else{ - printf("There is no list.\n"); - } -} - -static bool abort_incomp_mpu_list(incomp_mpu_list_t& list, time_t abort_time) -{ - if(list.empty()){ - return true; - } - time_t now_time = time(NULL); - - // do removing. - S3fsCurl s3fscurl; - bool result = true; - for(incomp_mpu_list_t::iterator iter = list.begin(); iter != list.end(); ++iter){ - const char* tpath = (*iter).key.c_str(); - string upload_id = (*iter).id; - - if(0 != abort_time){ // abort_time is 0, it means all. - time_t date = 0; - if(!get_unixtime_from_iso8601((*iter).date.c_str(), date)){ - S3FS_PRN_DBG("date format is not ISO 8601 for %s multipart uploading object, skip this.", tpath); - continue; - } - if(now_time <= (date + abort_time)){ - continue; - } - } - - if(0 != s3fscurl.AbortMultipartUpload(tpath, upload_id)){ - S3FS_PRN_EXIT("Failed to remove %s multipart uploading object.", tpath); - result = false; - }else{ - printf("Succeed to remove %s multipart uploading object.\n", tpath); - } - - // reset(initialize) curl object - s3fscurl.DestroyCurlHandle(); - } - - return result; -} - -static bool get_incomp_mpu_list(xmlDocPtr doc, incomp_mpu_list_t& list) -{ - if(!doc){ - return false; - } - - xmlXPathContextPtr ctx = xmlXPathNewContext(doc);; - - string xmlnsurl; - string ex_upload = "//"; - string ex_key; - string ex_id; - string ex_date; - - if(!noxmlns && GetXmlNsUrl(doc, xmlnsurl)){ - xmlXPathRegisterNs(ctx, (xmlChar*)"s3", (xmlChar*)xmlnsurl.c_str()); - ex_upload += "s3:"; - ex_key += "s3:"; - ex_id += "s3:"; - ex_date += "s3:"; - } - ex_upload += "Upload"; - ex_key += "Key"; - ex_id += "UploadId"; - ex_date += "Initiated"; - - // get "Upload" Tags - xmlXPathObjectPtr upload_xp; - if(NULL == (upload_xp = xmlXPathEvalExpression((xmlChar*)ex_upload.c_str(), ctx))){ - S3FS_PRN_ERR("xmlXPathEvalExpression returns null."); - return false; - } - if(xmlXPathNodeSetIsEmpty(upload_xp->nodesetval)){ - S3FS_PRN_INFO("upload_xp->nodesetval is empty."); - S3FS_XMLXPATHFREEOBJECT(upload_xp); - S3FS_XMLXPATHFREECONTEXT(ctx); - return true; - } - - // Make list - int cnt; - xmlNodeSetPtr upload_nodes; - list.clear(); - for(cnt = 0, upload_nodes = upload_xp->nodesetval; cnt < upload_nodes->nodeNr; cnt++){ - ctx->node = upload_nodes->nodeTab[cnt]; - - INCOMP_MPU_INFO part; - xmlChar* ex_value; - - // search "Key" tag - if(NULL == (ex_value = get_exp_value_xml(doc, ctx, ex_key.c_str()))){ - continue; - } - if('/' != *((char*)ex_value)){ - part.key = "/"; - }else{ - part.key = ""; - } - part.key += (char*)ex_value; - S3FS_XMLFREE(ex_value); - - // search "UploadId" tag - if(NULL == (ex_value = get_exp_value_xml(doc, ctx, ex_id.c_str()))){ - continue; - } - part.id = (char*)ex_value; - S3FS_XMLFREE(ex_value); - - // search "Initiated" tag - if(NULL == (ex_value = get_exp_value_xml(doc, ctx, ex_date.c_str()))){ - continue; - } - part.date = (char*)ex_value; - S3FS_XMLFREE(ex_value); - - list.push_back(part); - } - - S3FS_XMLXPATHFREEOBJECT(upload_xp); - S3FS_XMLXPATHFREECONTEXT(ctx); - - return true; -} - -static int s3fs_utility_processing(time_t abort_time) -{ - if(NO_UTILITY_MODE == utility_mode){ - return EXIT_FAILURE; - } - printf("\n*** s3fs run as utility mode.\n\n"); - - S3fsCurl s3fscurl; - string body; - int result = EXIT_SUCCESS; - if(0 != s3fscurl.MultipartListRequest(body)){ - S3FS_PRN_EXIT("Could not get list multipart upload.\nThere is no incomplete multipart uploaded object in bucket.\n"); - result = EXIT_FAILURE; - }else{ - // parse result(incomplete multipart upload information) - S3FS_PRN_DBG("response body = {\n%s\n}", body.c_str()); - - xmlDocPtr doc; - if(NULL == (doc = xmlReadMemory(body.c_str(), static_cast(body.size()), "", NULL, 0))){ - S3FS_PRN_DBG("xmlReadMemory exited with error."); - result = EXIT_FAILURE; - - }else{ - // make incomplete uploads list - incomp_mpu_list_t list; - if(!get_incomp_mpu_list(doc, list)){ - S3FS_PRN_DBG("get_incomp_mpu_list exited with error."); - result = EXIT_FAILURE; - - }else{ - if(INCOMP_TYPE_LIST == utility_mode){ - // print list - print_incomp_mpu_list(list); - }else if(INCOMP_TYPE_ABORT == utility_mode){ - // remove - if(!abort_incomp_mpu_list(list, abort_time)){ - S3FS_PRN_DBG("an error occurred during removal process."); - result = EXIT_FAILURE; - } - } - } - S3FS_XMLFREEDOC(doc); - } - } - - // ssl - s3fs_destroy_global_ssl(); - - return result; + int result = check_object_access(path, mask, NULL); + S3FS_MALLOCTRIM(0); + return result; } // @@ -3920,118 +3333,117 @@ static int s3fs_utility_processing(time_t abort_time) // static bool check_region_error(const char* pbody, size_t len, string& expectregion) { - if(!pbody){ - return false; - } + if(!pbody){ + return false; + } - std::string code; - if(!simple_parse_xml(pbody, len, "Code", code) || code != "AuthorizationHeaderMalformed"){ - return false; - } + std::string code; + if(!simple_parse_xml(pbody, len, "Code", code) || code != "AuthorizationHeaderMalformed"){ + return false; + } - if(!simple_parse_xml(pbody, len, "Region", expectregion)){ - return false; - } + if(!simple_parse_xml(pbody, len, "Region", expectregion)){ + return false; + } - return true; + return true; } static int s3fs_check_service() { - S3FS_PRN_INFO("check services."); + S3FS_PRN_INFO("check services."); - // At first time for access S3, we check IAM role if it sets. - if(!S3fsCurl::CheckIAMCredentialUpdate()){ - S3FS_PRN_CRIT("Failed to check IAM role name(%s).", S3fsCurl::GetIAMRole()); - return EXIT_FAILURE; - } + // At first time for access S3, we check IAM role if it sets. + if(!S3fsCurl::CheckIAMCredentialUpdate()){ + S3FS_PRN_CRIT("Failed to check IAM role name(%s).", S3fsCurl::GetIAMRole()); + return EXIT_FAILURE; + } - S3fsCurl s3fscurl; - int res; - if(0 > (res = s3fscurl.CheckBucket())){ - // get response code - long responseCode = s3fscurl.GetLastResponseCode(); + S3fsCurl s3fscurl; + int res; + if(0 > (res = s3fscurl.CheckBucket())){ + // get response code + long responseCode = s3fscurl.GetLastResponseCode(); - // check wrong endpoint, and automatically switch endpoint - if(300 <= responseCode && responseCode < 500){ + // check wrong endpoint, and automatically switch endpoint + if(300 <= responseCode && responseCode < 500){ - // check region error(for putting message or retrying) - BodyData* body = s3fscurl.GetBodyData(); - string expectregion; - if(check_region_error(body->str(), body->size(), expectregion)){ - // [NOTE] - // If endpoint is not specified(using us-east-1 region) and - // an error is encountered accessing a different region, we - // will retry the check on the expected region. - // see) https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro - // - if(is_specified_endpoint){ - const char* tmp_expect_ep = expectregion.c_str(); - S3FS_PRN_CRIT("The bucket region is not '%s', it is correctly '%s'. You should specify 'endpoint=%s' option.", - endpoint.c_str(), tmp_expect_ep, tmp_expect_ep); + // check region error(for putting message or retrying) + BodyData* body = s3fscurl.GetBodyData(); + string expectregion; + if(check_region_error(body->str(), body->size(), expectregion)){ + // [NOTE] + // If endpoint is not specified(using us-east-1 region) and + // an error is encountered accessing a different region, we + // will retry the check on the expected region. + // see) https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro + // + if(is_specified_endpoint){ + const char* tmp_expect_ep = expectregion.c_str(); + S3FS_PRN_CRIT("The bucket region is not '%s', it is correctly '%s'. You should specify 'endpoint=%s' option.", endpoint.c_str(), tmp_expect_ep, tmp_expect_ep); - }else{ - // current endpoint is wrong, so try to connect to expected region. - S3FS_PRN_CRIT("Failed to connect region '%s'(default), so retry to connect region '%s'.", endpoint.c_str(), expectregion.c_str()); - endpoint = expectregion; - if(S3fsCurl::IsSignatureV4()){ - if(host == "http://s3.amazonaws.com"){ - host = "http://s3-" + endpoint + ".amazonaws.com"; - }else if(host == "https://s3.amazonaws.com"){ - host = "https://s3-" + endpoint + ".amazonaws.com"; - } - } + }else{ + // current endpoint is wrong, so try to connect to expected region. + S3FS_PRN_CRIT("Failed to connect region '%s'(default), so retry to connect region '%s'.", endpoint.c_str(), expectregion.c_str()); + endpoint = expectregion; + if(S3fsCurl::IsSignatureV4()){ + if(s3host == "http://s3.amazonaws.com"){ + s3host = "http://s3-" + endpoint + ".amazonaws.com"; + }else if(s3host == "https://s3.amazonaws.com"){ + s3host = "https://s3-" + endpoint + ".amazonaws.com"; + } + } - // retry to check with new endpoint - s3fscurl.DestroyCurlHandle(); - res = s3fscurl.CheckBucket(); - responseCode = s3fscurl.GetLastResponseCode(); + // retry to check with new endpoint + s3fscurl.DestroyCurlHandle(); + res = s3fscurl.CheckBucket(); + responseCode = s3fscurl.GetLastResponseCode(); + } + } + } + + // try signature v2 + if(0 > res && (responseCode == 400 || responseCode == 403) && S3fsCurl::IsSignatureV4()){ + // switch sigv2 + S3FS_PRN_CRIT("Failed to connect by sigv4, so retry to connect by signature version 2."); + S3fsCurl::SetSignatureV4(false); + + // retry to check with sigv2 + s3fscurl.DestroyCurlHandle(); + res = s3fscurl.CheckBucket(); + responseCode = s3fscurl.GetLastResponseCode(); + } + + // check errors(after retrying) + if(0 > res && responseCode != 200 && responseCode != 301){ + if(responseCode == 400){ + S3FS_PRN_CRIT("Bad Request(host=%s) - result of checking service.", s3host.c_str()); + + }else if(responseCode == 403){ + S3FS_PRN_CRIT("invalid credentials(host=%s) - result of checking service.", s3host.c_str()); + + }else if(responseCode == 404){ + S3FS_PRN_CRIT("bucket not found(host=%s) - result of checking service.", s3host.c_str()); + + }else{ + // another error + S3FS_PRN_CRIT("unable to connect(host=%s) - result of checking service.", s3host.c_str()); + } + return EXIT_FAILURE; } - } } + s3fscurl.DestroyCurlHandle(); - // try signature v2 - if(0 > res && (responseCode == 400 || responseCode == 403) && S3fsCurl::IsSignatureV4()){ - // switch sigv2 - S3FS_PRN_CRIT("Failed to connect by sigv4, so retry to connect by signature version 2."); - S3fsCurl::SetSignatureV4(false); - - // retry to check with sigv2 - s3fscurl.DestroyCurlHandle(); - res = s3fscurl.CheckBucket(); - responseCode = s3fscurl.GetLastResponseCode(); + // make sure remote mountpath exists and is a directory + if(!mount_prefix.empty()){ + if(remote_mountpath_exists(mount_prefix.c_str()) != 0){ + S3FS_PRN_CRIT("remote mountpath %s not found.", mount_prefix.c_str()); + return EXIT_FAILURE; + } } + S3FS_MALLOCTRIM(0); - // check errors(after retrying) - if(0 > res && responseCode != 200 && responseCode != 301){ - if(responseCode == 400){ - S3FS_PRN_CRIT("Bad Request(host=%s) - result of checking service.", host.c_str()); - - }else if(responseCode == 403){ - S3FS_PRN_CRIT("invalid credentials(host=%s) - result of checking service.", host.c_str()); - - }else if(responseCode == 404){ - S3FS_PRN_CRIT("bucket not found(host=%s) - result of checking service.", host.c_str()); - - }else{ - // another error - S3FS_PRN_CRIT("unable to connect(host=%s) - result of checking service.", host.c_str()); - } - return EXIT_FAILURE; - } - } - s3fscurl.DestroyCurlHandle(); - - // make sure remote mountpath exists and is a directory - if(!mount_prefix.empty()){ - if(remote_mountpath_exists(mount_prefix.c_str()) != 0){ - S3FS_PRN_CRIT("remote mountpath %s not found.", mount_prefix.c_str()); - return EXIT_FAILURE; - } - } - S3FS_MALLOCTRIM(0); - - return EXIT_SUCCESS; + return EXIT_SUCCESS; } // @@ -4051,91 +3463,91 @@ static int s3fs_check_service() // static int parse_passwd_file(bucketkvmap_t& resmap) { - string line; - size_t first_pos; - readline_t linelist; - readline_t::iterator iter; + string line; + size_t first_pos; + readline_t linelist; + readline_t::iterator iter; - // open passwd file - ifstream PF(passwd_file.c_str()); - if(!PF.good()){ - S3FS_PRN_EXIT("could not open passwd file : %s", passwd_file.c_str()); - return -1; - } + // open passwd file + ifstream PF(passwd_file.c_str()); + if(!PF.good()){ + S3FS_PRN_EXIT("could not open passwd file : %s", passwd_file.c_str()); + return -1; + } - // read each line - while(getline(PF, line)){ - line = trim(line); - if(line.empty()){ - continue; + // read each line + while(getline(PF, line)){ + line = trim(line); + if(line.empty()){ + continue; + } + if('#' == line[0]){ + continue; + } + if(string::npos != line.find_first_of(" \t")){ + S3FS_PRN_EXIT("invalid line in passwd file, found whitespace character."); + return -1; + } + if('[' == line[0]){ + S3FS_PRN_EXIT("invalid line in passwd file, found a bracket \"[\" character."); + return -1; + } + linelist.push_back(line); } - if('#' == line[0]){ - continue; - } - if(string::npos != line.find_first_of(" \t")){ - S3FS_PRN_EXIT("invalid line in passwd file, found whitespace character."); - return -1; - } - if('[' == line[0]){ - S3FS_PRN_EXIT("invalid line in passwd file, found a bracket \"[\" character."); - return -1; - } - linelist.push_back(line); - } - // read '=' type - kvmap_t kv; - for(iter = linelist.begin(); iter != linelist.end(); ++iter){ - first_pos = iter->find_first_of("="); - if(first_pos == string::npos){ - continue; + // read '=' type + kvmap_t kv; + for(iter = linelist.begin(); iter != linelist.end(); ++iter){ + first_pos = iter->find_first_of("="); + if(first_pos == string::npos){ + continue; + } + // formatted by "key=val" + string key = trim(iter->substr(0, first_pos)); + string val = trim(iter->substr(first_pos + 1, string::npos)); + if(key.empty()){ + continue; + } + if(kv.end() != kv.find(key)){ + S3FS_PRN_WARN("same key name(%s) found in passwd file, skip this.", key.c_str()); + continue; + } + kv[key] = val; } - // formatted by "key=val" - string key = trim(iter->substr(0, first_pos)); - string val = trim(iter->substr(first_pos + 1, string::npos)); - if(key.empty()){ - continue; - } - if(kv.end() != kv.find(key)){ - S3FS_PRN_WARN("same key name(%s) found in passwd file, skip this.", key.c_str()); - continue; - } - kv[key] = val; - } - // set special key name - resmap[string(keyval_fields_type)] = kv; + // set special key name + resmap[string(keyval_fields_type)] = kv; - // read ':' type - for(iter = linelist.begin(); iter != linelist.end(); ++iter){ - first_pos = iter->find_first_of(":"); - size_t last_pos = iter->find_last_of(":"); - if(first_pos == string::npos){ - continue; + // read ':' type + for(iter = linelist.begin(); iter != linelist.end(); ++iter){ + first_pos = iter->find_first_of(":"); + size_t last_pos = iter->find_last_of(":"); + if(first_pos == string::npos){ + continue; + } + string bucketname; + string accesskey; + string secret; + if(first_pos != last_pos){ + // formatted by "bucket:accesskey:secretkey" + bucketname = trim(iter->substr(0, first_pos)); + accesskey = trim(iter->substr(first_pos + 1, last_pos - first_pos - 1)); + secret = trim(iter->substr(last_pos + 1, string::npos)); + }else{ + // formatted by "accesskey:secretkey" + bucketname = allbucket_fields_type; + accesskey = trim(iter->substr(0, first_pos)); + secret = trim(iter->substr(first_pos + 1, string::npos)); + } + if(resmap.end() != resmap.find(bucketname)){ + S3FS_PRN_EXIT("there are multiple entries for the same bucket(%s) in the passwd file.", (bucketname.empty() ? "default" : bucketname.c_str())); + return -1; + } + kv.clear(); + kv[string(aws_accesskeyid)] = accesskey; + kv[string(aws_secretkey)] = secret; + resmap[bucketname] = kv; } - string bucketname; - string accesskey; - string secret; - if(first_pos != last_pos){ - // formatted by "bucket:accesskey:secretkey" - bucketname = trim(iter->substr(0, first_pos)); - accesskey = trim(iter->substr(first_pos + 1, last_pos - first_pos - 1)); - secret = trim(iter->substr(last_pos + 1, string::npos)); - }else{ - // formatted by "accesskey:secretkey" - bucketname = allbucket_fields_type; - accesskey = trim(iter->substr(0, first_pos)); - secret = trim(iter->substr(first_pos + 1, string::npos)); - } - if(resmap.end() != resmap.find(bucketname)){ - S3FS_PRN_EXIT("there are multiple entries for the same bucket(%s) in the passwd file.", (bucketname.empty() ? "default" : bucketname.c_str())); - return -1; - } - kv.clear(); - kv[string(aws_accesskeyid)] = accesskey; - kv[string(aws_secretkey)] = secret; - resmap[bucketname] = kv; - } - return (resmap.empty() ? 0 : 1); + return (resmap.empty() ? 0 : 1); } // @@ -4145,26 +3557,26 @@ static int parse_passwd_file(bucketkvmap_t& resmap) // static int check_for_aws_format(const kvmap_t& kvmap) { - string str1(aws_accesskeyid); - string str2(aws_secretkey); + string str1(aws_accesskeyid); + string str2(aws_secretkey); - if(kvmap.empty()){ - return 0; - } - kvmap_t::const_iterator str1_it = kvmap.find(str1); - kvmap_t::const_iterator str2_it = kvmap.find(str2); - if(kvmap.end() == str1_it && kvmap.end() == str2_it){ - return 0; - } - if(kvmap.end() == str1_it || kvmap.end() == str2_it){ - S3FS_PRN_EXIT("AWSAccesskey or AWSSecretkey is not specified."); - return -1; - } - if(!S3fsCurl::SetAccessKey(str1_it->second.c_str(), str2_it->second.c_str())){ - S3FS_PRN_EXIT("failed to set access key/secret key."); - return -1; - } - return 1; + if(kvmap.empty()){ + return 0; + } + kvmap_t::const_iterator str1_it = kvmap.find(str1); + kvmap_t::const_iterator str2_it = kvmap.find(str2); + if(kvmap.end() == str1_it && kvmap.end() == str2_it){ + return 0; + } + if(kvmap.end() == str1_it || kvmap.end() == str2_it){ + S3FS_PRN_EXIT("AWSAccesskey or AWSSecretkey is not specified."); + return -1; + } + if(!S3fsCurl::SetAccessKey(str1_it->second.c_str(), str2_it->second.c_str())){ + S3FS_PRN_EXIT("failed to set access key/secret key."); + return -1; + } + return 1; } // @@ -4180,114 +3592,114 @@ static int check_for_aws_format(const kvmap_t& kvmap) // static int check_passwd_file_perms() { - struct stat info; + struct stat info; - // let's get the file info - if(stat(passwd_file.c_str(), &info) != 0){ - S3FS_PRN_EXIT("unexpected error from stat(%s).", passwd_file.c_str()); - return EXIT_FAILURE; - } - - // return error if any file has others permissions - if( (info.st_mode & S_IROTH) || - (info.st_mode & S_IWOTH) || - (info.st_mode & S_IXOTH)) { - S3FS_PRN_EXIT("credentials file %s should not have others permissions.", passwd_file.c_str()); - return EXIT_FAILURE; - } - - // Any local file should not have any group permissions - // /etc/passwd-s3fs can have group permissions - if(passwd_file != "/etc/passwd-s3fs"){ - if( (info.st_mode & S_IRGRP) || - (info.st_mode & S_IWGRP) || - (info.st_mode & S_IXGRP)) { - S3FS_PRN_EXIT("credentials file %s should not have group permissions.", passwd_file.c_str()); - return EXIT_FAILURE; + // let's get the file info + if(stat(passwd_file.c_str(), &info) != 0){ + S3FS_PRN_EXIT("unexpected error from stat(%s).", passwd_file.c_str()); + return EXIT_FAILURE; } - }else{ - // "/etc/passwd-s3fs" does not allow group write. - if((info.st_mode & S_IWGRP)){ - S3FS_PRN_EXIT("credentials file %s should not have group writable permissions.", passwd_file.c_str()); - return EXIT_FAILURE; + + // return error if any file has others permissions + if( (info.st_mode & S_IROTH) || + (info.st_mode & S_IWOTH) || + (info.st_mode & S_IXOTH)) { + S3FS_PRN_EXIT("credentials file %s should not have others permissions.", passwd_file.c_str()); + return EXIT_FAILURE; } - } - if((info.st_mode & S_IXUSR) || (info.st_mode & S_IXGRP)){ - S3FS_PRN_EXIT("credentials file %s should not have executable permissions.", passwd_file.c_str()); - return EXIT_FAILURE; - } - return EXIT_SUCCESS; + + // Any local file should not have any group permissions + // /etc/passwd-s3fs can have group permissions + if(passwd_file != "/etc/passwd-s3fs"){ + if( (info.st_mode & S_IRGRP) || + (info.st_mode & S_IWGRP) || + (info.st_mode & S_IXGRP)) { + S3FS_PRN_EXIT("credentials file %s should not have group permissions.", passwd_file.c_str()); + return EXIT_FAILURE; + } + }else{ + // "/etc/passwd-s3fs" does not allow group write. + if((info.st_mode & S_IWGRP)){ + S3FS_PRN_EXIT("credentials file %s should not have group writable permissions.", passwd_file.c_str()); + return EXIT_FAILURE; + } + } + if((info.st_mode & S_IXUSR) || (info.st_mode & S_IXGRP)){ + S3FS_PRN_EXIT("credentials file %s should not have executable permissions.", passwd_file.c_str()); + return EXIT_FAILURE; + } + return EXIT_SUCCESS; } static int read_aws_credentials_file(const std::string &filename) { - // open passwd file - ifstream PF(filename.c_str()); - if(!PF.good()){ - return -1; - } - - string profile; - string accesskey; - string secret; - string session_token; - - // read each line - string line; - while(getline(PF, line)){ - line = trim(line); - if(line.empty()){ - continue; - } - if('#' == line[0]){ - continue; + // open passwd file + ifstream PF(filename.c_str()); + if(!PF.good()){ + return -1; } - if(line.size() > 2 && line[0] == '[' && line[line.size() - 1] == ']') { - if(profile == aws_profile){ - break; - } - profile = line.substr(1, line.size() - 2); - accesskey.clear(); - secret.clear(); - session_token.clear(); + string profile; + string accesskey; + string secret; + string session_token; + + // read each line + string line; + while(getline(PF, line)){ + line = trim(line); + if(line.empty()){ + continue; + } + if('#' == line[0]){ + continue; + } + + if(line.size() > 2 && line[0] == '[' && line[line.size() - 1] == ']') { + if(profile == aws_profile){ + break; + } + profile = line.substr(1, line.size() - 2); + accesskey.clear(); + secret.clear(); + session_token.clear(); + } + + size_t pos = line.find_first_of('='); + if(pos == string::npos){ + continue; + } + string key = trim(line.substr(0, pos)); + string value = trim(line.substr(pos + 1, string::npos)); + if(key == "aws_access_key_id"){ + accesskey = value; + }else if(key == "aws_secret_access_key"){ + secret = value; + }else if(key == "aws_session_token"){ + session_token = value; + } } - size_t pos = line.find_first_of('='); - if(pos == string::npos){ - continue; - } - string key = trim(line.substr(0, pos)); - string value = trim(line.substr(pos + 1, string::npos)); - if(key == "aws_access_key_id"){ - accesskey = value; - }else if(key == "aws_secret_access_key"){ - secret = value; - }else if(key == "aws_session_token"){ - session_token = value; - } - } - - if(profile != aws_profile){ - return EXIT_FAILURE; - } - if (session_token.empty()) { - if (is_use_session_token) { - S3FS_PRN_EXIT("AWS session token was expected but wasn't provided in aws/credentials file for profile: %s.", aws_profile.c_str()); + if(profile != aws_profile){ return EXIT_FAILURE; } - if(!S3fsCurl::SetAccessKey(accesskey.c_str(), secret.c_str())){ - S3FS_PRN_EXIT("failed to set internal data for access key/secret key from aws credential file."); - return EXIT_FAILURE; + if (session_token.empty()) { + if (is_use_session_token) { + S3FS_PRN_EXIT("AWS session token was expected but wasn't provided in aws/credentials file for profile: %s.", aws_profile.c_str()); + return EXIT_FAILURE; + } + if(!S3fsCurl::SetAccessKey(accesskey.c_str(), secret.c_str())){ + S3FS_PRN_EXIT("failed to set internal data for access key/secret key from aws credential file."); + return EXIT_FAILURE; + } + } else { + if (!S3fsCurl::SetAccessKeyWithSessionToken(accesskey.c_str(), secret.c_str(), session_token.c_str())) { + S3FS_PRN_EXIT("session token is invalid."); + return EXIT_FAILURE; + } } - } else { - if (!S3fsCurl::SetAccessKeyWithSessionToken(accesskey.c_str(), secret.c_str(), session_token.c_str())) { - S3FS_PRN_EXIT("session token is invalid."); - return EXIT_FAILURE; - } - } - return EXIT_SUCCESS; + return EXIT_SUCCESS; } // @@ -4308,61 +3720,61 @@ static int read_aws_credentials_file(const std::string &filename) // static int read_passwd_file() { - bucketkvmap_t bucketmap; - kvmap_t keyval; - int result; + bucketkvmap_t bucketmap; + kvmap_t keyval; + int result; - // if you got here, the password file - // exists and is readable by the - // current user, check for permissions - if(EXIT_SUCCESS != check_passwd_file_perms()){ - return EXIT_FAILURE; - } - - // - // parse passwd file - // - result = parse_passwd_file(bucketmap); - if(-1 == result){ - return EXIT_FAILURE; - } - - // - // check key=value type format. - // - bucketkvmap_t::iterator it = bucketmap.find(keyval_fields_type); - if(bucketmap.end() != it){ - // aws format - result = check_for_aws_format(it->second); - if(-1 == result){ - return EXIT_FAILURE; - }else if(1 == result){ - // success to set - return EXIT_SUCCESS; + // if you got here, the password file + // exists and is readable by the + // current user, check for permissions + if(EXIT_SUCCESS != check_passwd_file_perms()){ + return EXIT_FAILURE; } - } - string bucket_key = allbucket_fields_type; - if(!bucket.empty() && bucketmap.end() != bucketmap.find(bucket)){ - bucket_key = bucket; - } - it = bucketmap.find(bucket_key); - if(bucketmap.end() == it){ - S3FS_PRN_EXIT("Not found access key/secret key in passwd file."); - return EXIT_FAILURE; - } - keyval = it->second; - kvmap_t::iterator aws_accesskeyid_it = keyval.find(aws_accesskeyid); - kvmap_t::iterator aws_secretkey_it = keyval.find(aws_secretkey); - if(keyval.end() == aws_accesskeyid_it || keyval.end() == aws_secretkey_it){ - S3FS_PRN_EXIT("Not found access key/secret key in passwd file."); - return EXIT_FAILURE; - } - if(!S3fsCurl::SetAccessKey(aws_accesskeyid_it->second.c_str(), aws_secretkey_it->second.c_str())){ - S3FS_PRN_EXIT("failed to set internal data for access key/secret key from passwd file."); - return EXIT_FAILURE; - } - return EXIT_SUCCESS; + // + // parse passwd file + // + result = parse_passwd_file(bucketmap); + if(-1 == result){ + return EXIT_FAILURE; + } + + // + // check key=value type format. + // + bucketkvmap_t::iterator it = bucketmap.find(keyval_fields_type); + if(bucketmap.end() != it){ + // aws format + result = check_for_aws_format(it->second); + if(-1 == result){ + return EXIT_FAILURE; + }else if(1 == result){ + // success to set + return EXIT_SUCCESS; + } + } + + string bucket_key = allbucket_fields_type; + if(!bucket.empty() && bucketmap.end() != bucketmap.find(bucket)){ + bucket_key = bucket; + } + it = bucketmap.find(bucket_key); + if(bucketmap.end() == it){ + S3FS_PRN_EXIT("Not found access key/secret key in passwd file."); + return EXIT_FAILURE; + } + keyval = it->second; + kvmap_t::iterator aws_accesskeyid_it = keyval.find(aws_accesskeyid); + kvmap_t::iterator aws_secretkey_it = keyval.find(aws_secretkey); + if(keyval.end() == aws_accesskeyid_it || keyval.end() == aws_secretkey_it){ + S3FS_PRN_EXIT("Not found access key/secret key in passwd file."); + return EXIT_FAILURE; + } + if(!S3fsCurl::SetAccessKey(aws_accesskeyid_it->second.c_str(), aws_secretkey_it->second.c_str())){ + S3FS_PRN_EXIT("failed to set internal data for access key/secret key from passwd file."); + return EXIT_FAILURE; + } + return EXIT_SUCCESS; } // @@ -4384,121 +3796,121 @@ static int read_passwd_file() // static int get_access_keys() { - // should be redundant - if(S3fsCurl::IsPublicBucket()){ - return EXIT_SUCCESS; - } + // should be redundant + if(S3fsCurl::IsPublicBucket()){ + return EXIT_SUCCESS; + } - // access key loading is deferred - if(load_iamrole || is_ecs){ - return EXIT_SUCCESS; - } + // access key loading is deferred + if(load_iamrole || is_ecs){ + return EXIT_SUCCESS; + } - // 1 - keys specified on the command line - if(S3fsCurl::IsSetAccessKeys()){ - return EXIT_SUCCESS; - } + // 1 - keys specified on the command line + if(S3fsCurl::IsSetAccessKeys()){ + return EXIT_SUCCESS; + } - // 2 - was specified on the command line - if(!passwd_file.empty()){ + // 2 - was specified on the command line + if(!passwd_file.empty()){ + ifstream PF(passwd_file.c_str()); + if(PF.good()){ + PF.close(); + return read_passwd_file(); + }else{ + S3FS_PRN_EXIT("specified passwd_file is not readable."); + return EXIT_FAILURE; + } + } + + // 3 - environment variables + char* AWSACCESSKEYID = getenv("AWSACCESSKEYID"); + char* AWSSECRETACCESSKEY = getenv("AWSSECRETACCESSKEY"); + char* AWSSESSIONTOKEN = getenv("AWSSESSIONTOKEN"); + if(AWSACCESSKEYID != NULL || AWSSECRETACCESSKEY != NULL){ + if( (AWSACCESSKEYID == NULL && AWSSECRETACCESSKEY != NULL) || + (AWSACCESSKEYID != NULL && AWSSECRETACCESSKEY == NULL) ){ + S3FS_PRN_EXIT("if environment variable AWSACCESSKEYID is set then AWSSECRETACCESSKEY must be set too."); + return EXIT_FAILURE; + } + S3FS_PRN_INFO2("access key from env variables"); + if (AWSSESSIONTOKEN != NULL) { + S3FS_PRN_INFO2("session token is available"); + if (!S3fsCurl::SetAccessKeyWithSessionToken(AWSACCESSKEYID, AWSSECRETACCESSKEY, AWSSESSIONTOKEN)) { + S3FS_PRN_EXIT("session token is invalid."); + return EXIT_FAILURE; + } + } else { + S3FS_PRN_INFO2("session token is not available"); + if (is_use_session_token) { + S3FS_PRN_EXIT("environment variable AWSSESSIONTOKEN is expected to be set."); + return EXIT_FAILURE; + } + } + if(!S3fsCurl::SetAccessKey(AWSACCESSKEYID, AWSSECRETACCESSKEY)){ + S3FS_PRN_EXIT("if one access key is specified, both keys need to be specified."); + return EXIT_FAILURE; + } + return EXIT_SUCCESS; + } + + // 3a - from the AWS_CREDENTIAL_FILE environment variable + char * AWS_CREDENTIAL_FILE; + AWS_CREDENTIAL_FILE = getenv("AWS_CREDENTIAL_FILE"); + if(AWS_CREDENTIAL_FILE != NULL){ + passwd_file.assign(AWS_CREDENTIAL_FILE); + if(!passwd_file.empty()){ + ifstream PF(passwd_file.c_str()); + if(PF.good()){ + PF.close(); + return read_passwd_file(); + }else{ + S3FS_PRN_EXIT("AWS_CREDENTIAL_FILE: \"%s\" is not readable.", passwd_file.c_str()); + return EXIT_FAILURE; + } + } + } + + // 3b - check ${HOME}/.aws/credentials + std::string aws_credentials = std::string(getpwuid(getuid())->pw_dir) + "/.aws/credentials"; + if(read_aws_credentials_file(aws_credentials) == EXIT_SUCCESS) { + return EXIT_SUCCESS; + }else if(aws_profile != "default"){ + S3FS_PRN_EXIT("Could not find profile: %s in file: %s", aws_profile.c_str(), aws_credentials.c_str()); + return EXIT_FAILURE; + } + + // 4 - from the default location in the users home directory + char * HOME; + HOME = getenv ("HOME"); + if(HOME != NULL){ + passwd_file.assign(HOME); + passwd_file.append("/.passwd-s3fs"); + ifstream PF(passwd_file.c_str()); + if(PF.good()){ + PF.close(); + if(EXIT_SUCCESS != read_passwd_file()){ + return EXIT_FAILURE; + } + // It is possible that the user's file was there but + // contained no key pairs i.e. commented out + // in that case, go look in the final location + if(S3fsCurl::IsSetAccessKeys()){ + return EXIT_SUCCESS; + } + } + } + + // 5 - from the system default location + passwd_file.assign("/etc/passwd-s3fs"); ifstream PF(passwd_file.c_str()); if(PF.good()){ - PF.close(); - return read_passwd_file(); - }else{ - S3FS_PRN_EXIT("specified passwd_file is not readable."); - return EXIT_FAILURE; + PF.close(); + return read_passwd_file(); } - } + S3FS_PRN_EXIT("could not determine how to establish security credentials."); - // 3 - environment variables - char* AWSACCESSKEYID = getenv("AWSACCESSKEYID"); - char* AWSSECRETACCESSKEY = getenv("AWSSECRETACCESSKEY"); - char* AWSSESSIONTOKEN = getenv("AWSSESSIONTOKEN"); - if(AWSACCESSKEYID != NULL || AWSSECRETACCESSKEY != NULL){ - if( (AWSACCESSKEYID == NULL && AWSSECRETACCESSKEY != NULL) || - (AWSACCESSKEYID != NULL && AWSSECRETACCESSKEY == NULL) ){ - S3FS_PRN_EXIT("if environment variable AWSACCESSKEYID is set then AWSSECRETACCESSKEY must be set too."); - return EXIT_FAILURE; - } - S3FS_PRN_INFO2("access key from env variables"); - if (AWSSESSIONTOKEN != NULL) { - S3FS_PRN_INFO2("session token is available"); - if (!S3fsCurl::SetAccessKeyWithSessionToken(AWSACCESSKEYID, AWSSECRETACCESSKEY, AWSSESSIONTOKEN)) { - S3FS_PRN_EXIT("session token is invalid."); - return EXIT_FAILURE; - } - } else { - S3FS_PRN_INFO2("session token is not available"); - if (is_use_session_token) { - S3FS_PRN_EXIT("environment variable AWSSESSIONTOKEN is expected to be set."); - return EXIT_FAILURE; - } - } - if(!S3fsCurl::SetAccessKey(AWSACCESSKEYID, AWSSECRETACCESSKEY)){ - S3FS_PRN_EXIT("if one access key is specified, both keys need to be specified."); - return EXIT_FAILURE; - } - return EXIT_SUCCESS; - } - - // 3a - from the AWS_CREDENTIAL_FILE environment variable - char * AWS_CREDENTIAL_FILE; - AWS_CREDENTIAL_FILE = getenv("AWS_CREDENTIAL_FILE"); - if(AWS_CREDENTIAL_FILE != NULL){ - passwd_file.assign(AWS_CREDENTIAL_FILE); - if(!passwd_file.empty()){ - ifstream PF(passwd_file.c_str()); - if(PF.good()){ - PF.close(); - return read_passwd_file(); - }else{ - S3FS_PRN_EXIT("AWS_CREDENTIAL_FILE: \"%s\" is not readable.", passwd_file.c_str()); - return EXIT_FAILURE; - } - } - } - - // 3b - check ${HOME}/.aws/credentials - std::string aws_credentials = std::string(getpwuid(getuid())->pw_dir) + "/.aws/credentials"; - if(read_aws_credentials_file(aws_credentials) == EXIT_SUCCESS) { - return EXIT_SUCCESS; - }else if(aws_profile != "default"){ - S3FS_PRN_EXIT("Could not find profile: %s in file: %s", aws_profile.c_str(), aws_credentials.c_str()); return EXIT_FAILURE; - } - - // 4 - from the default location in the users home directory - char * HOME; - HOME = getenv ("HOME"); - if(HOME != NULL){ - passwd_file.assign(HOME); - passwd_file.append("/.passwd-s3fs"); - ifstream PF(passwd_file.c_str()); - if(PF.good()){ - PF.close(); - if(EXIT_SUCCESS != read_passwd_file()){ - return EXIT_FAILURE; - } - // It is possible that the user's file was there but - // contained no key pairs i.e. commented out - // in that case, go look in the final location - if(S3fsCurl::IsSetAccessKeys()){ - return EXIT_SUCCESS; - } - } - } - - // 5 - from the system default location - passwd_file.assign("/etc/passwd-s3fs"); - ifstream PF(passwd_file.c_str()); - if(PF.good()){ - PF.close(); - return read_passwd_file(); - } - S3FS_PRN_EXIT("could not determine how to establish security credentials."); - - return EXIT_FAILURE; } // @@ -4506,28 +3918,28 @@ static int get_access_keys() // static bool set_mountpoint_attribute(struct stat& mpst) { - mp_uid = geteuid(); - mp_gid = getegid(); - mp_mode = S_IFDIR | (allow_other ? (is_mp_umask ? (~mp_umask & (S_IRWXU | S_IRWXG | S_IRWXO)) : (S_IRWXU | S_IRWXG | S_IRWXO)) : S_IRWXU); + mp_uid = geteuid(); + mp_gid = getegid(); + mp_mode = S_IFDIR | (allow_other ? (is_mp_umask ? (~mp_umask & (S_IRWXU | S_IRWXG | S_IRWXO)) : (S_IRWXU | S_IRWXG | S_IRWXO)) : S_IRWXU); - S3FS_PRN_INFO2("PROC(uid=%u, gid=%u) - MountPoint(uid=%u, gid=%u, mode=%04o)", - (unsigned int)mp_uid, (unsigned int)mp_gid, (unsigned int)(mpst.st_uid), (unsigned int)(mpst.st_gid), mpst.st_mode); + S3FS_PRN_INFO2("PROC(uid=%u, gid=%u) - MountPoint(uid=%u, gid=%u, mode=%04o)", + (unsigned int)mp_uid, (unsigned int)mp_gid, (unsigned int)(mpst.st_uid), (unsigned int)(mpst.st_gid), mpst.st_mode); - // check owner - if(0 == mp_uid || mpst.st_uid == mp_uid){ - return true; - } - // check group permission - if(mpst.st_gid == mp_gid || 1 == is_uid_include_group(mp_uid, mpst.st_gid)){ - if(S_IRWXG == (mpst.st_mode & S_IRWXG)){ - return true; + // check owner + if(0 == mp_uid || mpst.st_uid == mp_uid){ + return true; } - } - // check other permission - if(S_IRWXO == (mpst.st_mode & S_IRWXO)){ - return true; - } - return false; + // check group permission + if(mpst.st_gid == mp_gid || 1 == is_uid_include_group(mp_uid, mpst.st_gid)){ + if(S_IRWXG == (mpst.st_mode & S_IRWXG)){ + return true; + } + } + // check other permission + if(S_IRWXO == (mpst.st_mode & S_IRWXO)){ + return true; + } + return false; } // @@ -4535,32 +3947,31 @@ static bool set_mountpoint_attribute(struct stat& mpst) // static int set_bucket(const char* arg) { - char *bucket_name = (char*)arg; - if(strstr(arg, ":")){ - if(strstr(arg, "://")){ - S3FS_PRN_EXIT("bucket name and path(\"%s\") is wrong, it must be \"bucket[:/path]\".", arg); - return -1; + char *bucket_name = (char*)arg; + if(strstr(arg, ":")){ + if(strstr(arg, "://")){ + S3FS_PRN_EXIT("bucket name and path(\"%s\") is wrong, it must be \"bucket[:/path]\".", arg); + return -1; + } + bucket = strtok(bucket_name, ":"); + char* pmount_prefix = strtok(NULL, ""); + if(pmount_prefix){ + if(0 == strlen(pmount_prefix) || '/' != pmount_prefix[0]){ + S3FS_PRN_EXIT("path(%s) must be prefix \"/\".", pmount_prefix); + return -1; + } + mount_prefix = pmount_prefix; + // remove trailing slash + if(mount_prefix.at(mount_prefix.size() - 1) == '/'){ + mount_prefix = mount_prefix.substr(0, mount_prefix.size() - 1); + } + } + }else{ + bucket = arg; } - bucket = strtok(bucket_name, ":"); - char* pmount_prefix = strtok(NULL, ""); - if(pmount_prefix){ - if(0 == strlen(pmount_prefix) || '/' != pmount_prefix[0]){ - S3FS_PRN_EXIT("path(%s) must be prefix \"/\".", pmount_prefix); - return -1; - } - mount_prefix = pmount_prefix; - // remove trailing slash - if(mount_prefix.at(mount_prefix.size() - 1) == '/'){ - mount_prefix = mount_prefix.substr(0, mount_prefix.size() - 1); - } - } - }else{ - bucket = arg; - } - return 0; + return 0; } - // This is repeatedly called by the fuse option parser // if the key is equal to FUSE_OPT_KEY_OPT, it's an option passed in prefixed by // '-' or '--' e.g.: -f -d -ousecache=/tmp @@ -4569,1051 +3980,1047 @@ static int set_bucket(const char* arg) // or the mountpoint. The bucket name will always come before the mountpoint static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_args* outargs) { - int ret; - if(key == FUSE_OPT_KEY_NONOPT){ - // the first NONOPT option is the bucket name - if(bucket.empty()){ - if ((ret = set_bucket(arg))){ - return ret; - } - return 0; - } - else if (!strcmp(arg, "s3fs")) { - return 0; - } - - // the second NONOPT option is the mountpoint(not utility mode) - if(mountpoint.empty() && NO_UTILITY_MODE == utility_mode){ - // save the mountpoint and do some basic error checking - mountpoint = arg; - struct stat stbuf; - - if(stat(arg, &stbuf) == -1){ - S3FS_PRN_EXIT("unable to access MOUNTPOINT %s: %s", mountpoint.c_str(), strerror(errno)); - return -1; - } - if(!(S_ISDIR(stbuf.st_mode))){ - S3FS_PRN_EXIT("MOUNTPOINT: %s is not a directory.", mountpoint.c_str()); - return -1; - } - if(!set_mountpoint_attribute(stbuf)){ - S3FS_PRN_EXIT("MOUNTPOINT: %s permission denied.", mountpoint.c_str()); - return -1; - } - - if(!nonempty){ - struct dirent *ent; - DIR *dp = opendir(mountpoint.c_str()); - if(dp == NULL){ - S3FS_PRN_EXIT("failed to open MOUNTPOINT: %s: %s", mountpoint.c_str(), strerror(errno)); - return -1; + int ret; + if(key == FUSE_OPT_KEY_NONOPT){ + // the first NONOPT option is the bucket name + if(bucket.empty()){ + if ((ret = set_bucket(arg))){ + return ret; + } + return 0; + }else if (!strcmp(arg, "s3fs")) { + return 0; } - while((ent = readdir(dp)) != NULL){ - if(strcmp(ent->d_name, ".") != 0 && strcmp(ent->d_name, "..") != 0){ - closedir(dp); - S3FS_PRN_EXIT("MOUNTPOINT directory %s is not empty. if you are sure this is safe, can use the 'nonempty' mount option.", mountpoint.c_str()); + + // the second NONOPT option is the mountpoint(not utility mode) + if(mountpoint.empty() && NO_UTILITY_MODE == utility_mode){ + // save the mountpoint and do some basic error checking + mountpoint = arg; + struct stat stbuf; + + if(stat(arg, &stbuf) == -1){ + S3FS_PRN_EXIT("unable to access MOUNTPOINT %s: %s", mountpoint.c_str(), strerror(errno)); + return -1; + } + if(!(S_ISDIR(stbuf.st_mode))){ + S3FS_PRN_EXIT("MOUNTPOINT: %s is not a directory.", mountpoint.c_str()); + return -1; + } + if(!set_mountpoint_attribute(stbuf)){ + S3FS_PRN_EXIT("MOUNTPOINT: %s permission denied.", mountpoint.c_str()); + return -1; + } + + if(!nonempty){ + struct dirent *ent; + DIR *dp = opendir(mountpoint.c_str()); + if(dp == NULL){ + S3FS_PRN_EXIT("failed to open MOUNTPOINT: %s: %s", mountpoint.c_str(), strerror(errno)); + return -1; + } + while((ent = readdir(dp)) != NULL){ + if(strcmp(ent->d_name, ".") != 0 && strcmp(ent->d_name, "..") != 0){ + closedir(dp); + S3FS_PRN_EXIT("MOUNTPOINT directory %s is not empty. if you are sure this is safe, can use the 'nonempty' mount option.", mountpoint.c_str()); + return -1; + } + } + closedir(dp); + } + return 1; + } + + // Unknown option + if(NO_UTILITY_MODE == utility_mode){ + S3FS_PRN_EXIT("specified unknown third option(%s).", arg); + }else{ + S3FS_PRN_EXIT("specified unknown second option(%s). you don't need to specify second option(mountpoint) for utility mode(-u).", arg); + } + return -1; + + }else if(key == FUSE_OPT_KEY_OPT){ + if(0 == STR2NCMP(arg, "uid=")){ + s3fs_uid = get_uid(strchr(arg, '=') + sizeof(char)); + if(0 != geteuid() && 0 == s3fs_uid){ + S3FS_PRN_EXIT("root user can only specify uid=0."); + return -1; + } + is_s3fs_uid = true; + return 1; // continue for fuse option + } + if(0 == STR2NCMP(arg, "gid=")){ + s3fs_gid = get_gid(strchr(arg, '=') + sizeof(char)); + if(0 != getegid() && 0 == s3fs_gid){ + S3FS_PRN_EXIT("root user can only specify gid=0."); + return -1; + } + is_s3fs_gid = true; + return 1; // continue for fuse option + } + if(0 == STR2NCMP(arg, "umask=")){ + s3fs_umask = cvt_strtoofft(strchr(arg, '=') + sizeof(char), /*base=*/ 8); + s3fs_umask &= (S_IRWXU | S_IRWXG | S_IRWXO); + is_s3fs_umask = true; + return 1; // continue for fuse option + } + if(0 == strcmp(arg, "allow_other")){ + allow_other = true; + return 1; // continue for fuse option + } + if(0 == STR2NCMP(arg, "mp_umask=")){ + mp_umask = cvt_strtoofft(strchr(arg, '=') + sizeof(char), /*base=*/ 8); + mp_umask &= (S_IRWXU | S_IRWXG | S_IRWXO); + is_mp_umask = true; + return 0; + } + if(0 == STR2NCMP(arg, "default_acl=")){ + const char* acl_string = strchr(arg, '=') + sizeof(char); + acl_t acl = acl_t::from_str(acl_string); + if(acl == acl_t::UNKNOWN){ + S3FS_PRN_EXIT("unknown value for default_acl: %s", acl_string); + return -1; + } + S3fsCurl::SetDefaultAcl(acl); + return 0; + } + if(0 == STR2NCMP(arg, "retries=")){ + off_t retries = static_cast(cvt_strtoofft(strchr(arg, '=') + sizeof(char))); + if(retries == 0){ + S3FS_PRN_EXIT("retries must be greater than zero"); + return -1; + } + S3fsCurl::SetRetries(retries); + return 0; + } + if(0 == STR2NCMP(arg, "use_cache=")){ + FdManager::SetCacheDir(strchr(arg, '=') + sizeof(char)); + return 0; + } + if(0 == STR2NCMP(arg, "check_cache_dir_exist")){ + FdManager::SetCheckCacheDirExist(true); + return 0; + } + if(0 == strcmp(arg, "del_cache")){ + is_remove_cache = true; + return 0; + } + if(0 == STR2NCMP(arg, "multireq_max=")){ + int maxreq = static_cast(cvt_strtoofft(strchr(arg, '=') + sizeof(char))); + S3fsCurl::SetMaxMultiRequest(maxreq); + return 0; + } + if(0 == strcmp(arg, "nonempty")){ + nonempty = true; + return 1; // need to continue for fuse. + } + if(0 == strcmp(arg, "nomultipart")){ + nomultipart = true; + return 0; + } + // old format for storage_class + if(0 == strcmp(arg, "use_rrs") || 0 == STR2NCMP(arg, "use_rrs=")){ + off_t rrs = 1; + // for an old format. + if(0 == STR2NCMP(arg, "use_rrs=")){ + rrs = cvt_strtoofft(strchr(arg, '=') + sizeof(char)); + } + if(0 == rrs){ + S3fsCurl::SetStorageClass(storage_class_t::STANDARD); + }else if(1 == rrs){ + S3fsCurl::SetStorageClass(storage_class_t::REDUCED_REDUNDANCY); + }else{ + S3FS_PRN_EXIT("poorly formed argument to option: use_rrs"); + return -1; + } + return 0; + } + if(0 == STR2NCMP(arg, "storage_class=")){ + const char *storage_class_str = strchr(arg, '=') + sizeof(char); + storage_class_t storage_class = storage_class_t::from_str(storage_class_str); + if(storage_class == storage_class_t::UNKNOWN){ + S3FS_PRN_EXIT("unknown value for storage_class: %s", storage_class_str); + return -1; + } + S3fsCurl::SetStorageClass(storage_class); + return 0; + } + // + // [NOTE] + // use_sse Set Server Side Encrypting type to SSE-S3 + // use_sse=1 + // use_sse=file Set Server Side Encrypting type to Custom key(SSE-C) and load custom keys + // use_sse=custom(c):file + // use_sse=custom(c) Set Server Side Encrypting type to Custom key(SSE-C) + // use_sse=kmsid(k):kms-key-id Set Server Side Encrypting type to AWS Key Management key id(SSE-KMS) and load KMS id + // use_sse=kmsid(k) Set Server Side Encrypting type to AWS Key Management key id(SSE-KMS) + // + // load_sse_c=file Load Server Side Encrypting custom keys + // + // AWSSSECKEYS Loading Environment for Server Side Encrypting custom keys + // AWSSSEKMSID Loading Environment for Server Side Encrypting Key id + // + if(0 == STR2NCMP(arg, "use_sse")){ + if(0 == strcmp(arg, "use_sse") || 0 == strcmp(arg, "use_sse=1")){ // use_sse=1 is old type parameter + // sse type is SSE_S3 + if(!S3fsCurl::IsSseDisable() && !S3fsCurl::IsSseS3Type()){ + S3FS_PRN_EXIT("already set SSE another type, so conflict use_sse option or environment."); + return -1; + } + S3fsCurl::SetSseType(sse_type_t::SSE_S3); + + }else if(0 == strcmp(arg, "use_sse=kmsid") || 0 == strcmp(arg, "use_sse=k")){ + // sse type is SSE_KMS with out kmsid(expecting id is loaded by environment) + if(!S3fsCurl::IsSseDisable() && !S3fsCurl::IsSseKmsType()){ + S3FS_PRN_EXIT("already set SSE another type, so conflict use_sse option or environment."); + return -1; + } + if(!S3fsCurl::IsSetSseKmsId()){ + S3FS_PRN_EXIT("use_sse=kms but not loaded kms id by environment."); + return -1; + } + S3fsCurl::SetSseType(sse_type_t::SSE_KMS); + + }else if(0 == STR2NCMP(arg, "use_sse=kmsid:") || 0 == STR2NCMP(arg, "use_sse=k:")){ + // sse type is SSE_KMS with kmsid + if(!S3fsCurl::IsSseDisable() && !S3fsCurl::IsSseKmsType()){ + S3FS_PRN_EXIT("already set SSE another type, so conflict use_sse option or environment."); + return -1; + } + const char* kmsid; + if(0 == STR2NCMP(arg, "use_sse=kmsid:")){ + kmsid = &arg[strlen("use_sse=kmsid:")]; + }else{ + kmsid = &arg[strlen("use_sse=k:")]; + } + if(!S3fsCurl::SetSseKmsid(kmsid)){ + S3FS_PRN_EXIT("failed to load use_sse kms id."); + return -1; + } + S3fsCurl::SetSseType(sse_type_t::SSE_KMS); + + }else if(0 == strcmp(arg, "use_sse=custom") || 0 == strcmp(arg, "use_sse=c")){ + // sse type is SSE_C with out custom keys(expecting keys are loaded by environment or load_sse_c option) + if(!S3fsCurl::IsSseDisable() && !S3fsCurl::IsSseCType()){ + S3FS_PRN_EXIT("already set SSE another type, so conflict use_sse option or environment."); + return -1; + } + // [NOTE] + // do not check ckeys exists here. + // + S3fsCurl::SetSseType(sse_type_t::SSE_C); + + }else if(0 == STR2NCMP(arg, "use_sse=custom:") || 0 == STR2NCMP(arg, "use_sse=c:")){ + // sse type is SSE_C with custom keys + if(!S3fsCurl::IsSseDisable() && !S3fsCurl::IsSseCType()){ + S3FS_PRN_EXIT("already set SSE another type, so conflict use_sse option or environment."); + return -1; + } + const char* ssecfile; + if(0 == STR2NCMP(arg, "use_sse=custom:")){ + ssecfile = &arg[strlen("use_sse=custom:")]; + }else{ + ssecfile = &arg[strlen("use_sse=c:")]; + } + if(!S3fsCurl::SetSseCKeys(ssecfile)){ + S3FS_PRN_EXIT("failed to load use_sse custom key file(%s).", ssecfile); + return -1; + } + S3fsCurl::SetSseType(sse_type_t::SSE_C); + + }else if(0 == strcmp(arg, "use_sse=")){ // this type is old style(parameter is custom key file path) + // SSE_C with custom keys. + const char* ssecfile = &arg[strlen("use_sse=")]; + if(!S3fsCurl::SetSseCKeys(ssecfile)){ + S3FS_PRN_EXIT("failed to load use_sse custom key file(%s).", ssecfile); + return -1; + } + S3fsCurl::SetSseType(sse_type_t::SSE_C); + + }else{ + // never come here. + S3FS_PRN_EXIT("something wrong use_sse option."); + return -1; + } + return 0; + } + // [NOTE] + // Do only load SSE custom keys, care for set without set sse type. + if(0 == STR2NCMP(arg, "load_sse_c=")){ + const char* ssecfile = &arg[strlen("load_sse_c=")]; + if(!S3fsCurl::SetSseCKeys(ssecfile)){ + S3FS_PRN_EXIT("failed to load use_sse custom key file(%s).", ssecfile); + return -1; + } + return 0; + } + if(0 == STR2NCMP(arg, "ssl_verify_hostname=")){ + long sslvh = static_cast(cvt_strtoofft(strchr(arg, '=') + sizeof(char))); + if(-1 == S3fsCurl::SetSslVerifyHostname(sslvh)){ + S3FS_PRN_EXIT("poorly formed argument to option: ssl_verify_hostname."); + return -1; + } + return 0; + } + if(0 == STR2NCMP(arg, "passwd_file=")){ + passwd_file = strchr(arg, '=') + sizeof(char); + return 0; + } + if(0 == strcmp(arg, "ibm_iam_auth")){ + S3fsCurl::SetIsIBMIAMAuth(true); + S3fsCurl::SetIAMCredentialsURL("https://iam.bluemix.net/oidc/token"); + S3fsCurl::SetIAMTokenField("access_token"); + S3fsCurl::SetIAMExpiryField("expiration"); + S3fsCurl::SetIAMFieldCount(2); + is_ibm_iam_auth = true; + return 0; + } + if (0 == STR2NCMP(arg, "use_session_token")) { + is_use_session_token = true; + } + if(0 == STR2NCMP(arg, "ibm_iam_endpoint=")){ + std::string endpoint_url; + std::string iam_endpoint = strchr(arg, '=') + sizeof(char); + // Check url for http / https protocol string + if((iam_endpoint.compare(0, 8, "https://") != 0) && (iam_endpoint.compare(0, 7, "http://") != 0)) { + S3FS_PRN_EXIT("option ibm_iam_endpoint has invalid format, missing http / https protocol"); + return -1; + } + endpoint_url = iam_endpoint + "/oidc/token"; + S3fsCurl::SetIAMCredentialsURL(endpoint_url.c_str()); + return 0; + } + if(0 == strcmp(arg, "ecs")){ + if (is_ibm_iam_auth) { + S3FS_PRN_EXIT("option ecs cannot be used in conjunction with ibm"); + return -1; + } + S3fsCurl::SetIsECS(true); + S3fsCurl::SetIAMCredentialsURL("http://169.254.170.2"); + S3fsCurl::SetIAMFieldCount(5); + is_ecs = true; + return 0; + } + if(0 == STR2NCMP(arg, "iam_role")){ + if (is_ecs || is_ibm_iam_auth) { + S3FS_PRN_EXIT("option iam_role cannot be used in conjunction with ecs or ibm"); + return -1; + } + if(0 == strcmp(arg, "iam_role") || 0 == strcmp(arg, "iam_role=auto")){ + // loading IAM role name in s3fs_init(), because we need to wait initializing curl. + // + load_iamrole = true; + return 0; + + }else if(0 == STR2NCMP(arg, "iam_role=")){ + const char* role = strchr(arg, '=') + sizeof(char); + S3fsCurl::SetIAMRole(role); + load_iamrole = false; + return 0; + } + } + if(0 == STR2NCMP(arg, "profile=")){ + aws_profile = strchr(arg, '=') + sizeof(char); + return 0; + } + if(0 == STR2NCMP(arg, "public_bucket=")){ + off_t pubbucket = cvt_strtoofft(strchr(arg, '=') + sizeof(char)); + if(1 == pubbucket){ + S3fsCurl::SetPublicBucket(true); + // [NOTE] + // if bucket is public(without credential), s3 do not allow copy api. + // so s3fs sets nocopyapi mode. + // + nocopyapi = true; + }else if(0 == pubbucket){ + S3fsCurl::SetPublicBucket(false); + }else{ + S3FS_PRN_EXIT("poorly formed argument to option: public_bucket."); + return -1; + } + return 0; + } + if(0 == STR2NCMP(arg, "bucket=")){ + std::string bname = strchr(arg, '=') + sizeof(char); + if ((ret = set_bucket(bname.c_str()))){ + return ret; + } + return 0; + } + if(0 == strcmp(arg, "no_check_certificate")){ + S3fsCurl::SetCheckCertificate(false); + return 0; + } + if(0 == STR2NCMP(arg, "connect_timeout=")){ + long contimeout = static_cast(cvt_strtoofft(strchr(arg, '=') + sizeof(char))); + S3fsCurl::SetConnectTimeout(contimeout); + return 0; + } + if(0 == STR2NCMP(arg, "readwrite_timeout=")){ + time_t rwtimeout = static_cast(cvt_strtoofft(strchr(arg, '=') + sizeof(char))); + S3fsCurl::SetReadwriteTimeout(rwtimeout); + return 0; + } + if(0 == STR2NCMP(arg, "list_object_max_keys=")){ + int max_keys = static_cast(cvt_strtoofft(strchr(arg, '=') + sizeof(char))); + if(max_keys < 1000){ + S3FS_PRN_EXIT("argument should be over 1000: list_object_max_keys"); + return -1; + } + max_keys_list_object = max_keys; + return 0; + } + if(0 == STR2NCMP(arg, "max_stat_cache_size=")){ + unsigned long cache_size = static_cast(cvt_strtoofft(strchr(arg, '=') + sizeof(char))); + StatCache::getStatCacheData()->SetCacheSize(cache_size); + return 0; + } + if(0 == STR2NCMP(arg, "stat_cache_expire=")){ + time_t expr_time = static_cast(cvt_strtoofft(strchr(arg, '=') + sizeof(char))); + StatCache::getStatCacheData()->SetExpireTime(expr_time); + return 0; + } + // [NOTE] + // This option is for compatibility old version. + if(0 == STR2NCMP(arg, "stat_cache_interval_expire=")){ + time_t expr_time = static_cast(cvt_strtoofft(strchr(arg, '=') + sizeof(char))); + StatCache::getStatCacheData()->SetExpireTime(expr_time, true); + return 0; + } + if(0 == strcmp(arg, "enable_noobj_cache")){ + StatCache::getStatCacheData()->EnableCacheNoObject(); + return 0; + } + if(0 == strcmp(arg, "nodnscache")){ + S3fsCurl::SetDnsCache(false); + return 0; + } + if(0 == strcmp(arg, "nosscache")){ + S3fsCurl::SetSslSessionCache(false); + return 0; + } + if(0 == STR2NCMP(arg, "parallel_count=") || 0 == STR2NCMP(arg, "parallel_upload=")){ + int maxpara = static_cast(cvt_strtoofft(strchr(arg, '=') + sizeof(char))); + if(0 >= maxpara){ + S3FS_PRN_EXIT("argument should be over 1: parallel_count"); + return -1; + } + S3fsCurl::SetMaxParallelCount(maxpara); + return 0; + } + if(0 == STR2NCMP(arg, "fd_page_size=")){ + S3FS_PRN_ERR("option fd_page_size is no longer supported, so skip this option."); + return 0; + } + if(0 == STR2NCMP(arg, "multipart_size=")){ + off_t size = static_cast(cvt_strtoofft(strchr(arg, '=') + sizeof(char))); + if(!S3fsCurl::SetMultipartSize(size)){ + S3FS_PRN_EXIT("multipart_size option must be at least 5 MB."); + return -1; + } + return 0; + } + if(0 == STR2NCMP(arg, "ensure_diskfree=")){ + off_t dfsize = cvt_strtoofft(strchr(arg, '=') + sizeof(char)) * 1024 * 1024; + if(dfsize < S3fsCurl::GetMultipartSize()){ + S3FS_PRN_WARN("specified size to ensure disk free space is smaller than multipart size, so set multipart size to it."); + dfsize = S3fsCurl::GetMultipartSize(); + } + FdManager::SetEnsureFreeDiskSpace(dfsize); + return 0; + } + if(0 == STR2NCMP(arg, "singlepart_copy_limit=")){ + singlepart_copy_limit = static_cast(cvt_strtoofft(strchr(arg, '=') + sizeof(char))) * 1024; + return 0; + } + if(0 == STR2NCMP(arg, "ahbe_conf=")){ + string ahbe_conf = strchr(arg, '=') + sizeof(char); + if(!AdditionalHeader::get()->Load(ahbe_conf.c_str())){ + S3FS_PRN_EXIT("failed to load ahbe_conf file(%s).", ahbe_conf.c_str()); + return -1; + } + AdditionalHeader::get()->Dump(); + return 0; + } + if(0 == strcmp(arg, "noxmlns")){ + noxmlns = true; + return 0; + } + if(0 == strcmp(arg, "nomixupload")){ + FdEntity::SetNoMixMultipart(); + return 0; + } + if(0 == strcmp(arg, "nocopyapi")){ + nocopyapi = true; + return 0; + } + if(0 == strcmp(arg, "norenameapi")){ + norenameapi = true; + return 0; + } + if(0 == strcmp(arg, "complement_stat")){ + complement_stat = true; + return 0; + } + if(0 == strcmp(arg, "notsup_compat_dir")){ + support_compat_dir = false; + return 0; + } + if(0 == strcmp(arg, "enable_content_md5")){ + S3fsCurl::SetContentMd5(true); + return 0; + } + if(0 == STR2NCMP(arg, "host=")){ + s3host = strchr(arg, '=') + sizeof(char); + return 0; + } + if(0 == STR2NCMP(arg, "servicepath=")){ + service_path = strchr(arg, '=') + sizeof(char); + return 0; + } + if(0 == STR2NCMP(arg, "url=")){ + s3host = strchr(arg, '=') + sizeof(char); + // strip the trailing '/', if any, off the end of the host + // string + size_t found, length; + found = s3host.find_last_of('/'); + length = s3host.length(); + while(found == (length - 1) && length > 0){ + s3host.erase(found); + found = s3host.find_last_of('/'); + length = s3host.length(); + } + // Check url for http / https protocol string + if((s3host.compare(0, 8, "https://") != 0) && (s3host.compare(0, 7, "http://") != 0)) { + S3FS_PRN_EXIT("option url has invalid format, missing http / https protocol"); + return -1; + } + return 0; + } + if(0 == strcmp(arg, "sigv2")){ + S3fsCurl::SetSignatureV4(false); + return 0; + } + if(0 == strcmp(arg, "createbucket")){ + create_bucket = true; + return 0; + } + if(0 == STR2NCMP(arg, "endpoint=")){ + endpoint = strchr(arg, '=') + sizeof(char); + is_specified_endpoint = true; + return 0; + } + if(0 == strcmp(arg, "use_path_request_style")){ + pathrequeststyle = true; + return 0; + } + if(0 == STR2NCMP(arg, "noua")){ + S3fsCurl::SetUserAgentFlag(false); + return 0; + } + if(0 == strcmp(arg, "use_xattr")){ + is_use_xattr = true; + return 0; + }else if(0 == STR2NCMP(arg, "use_xattr=")){ + const char* strflag = strchr(arg, '=') + sizeof(char); + if(0 == strcmp(strflag, "1")){ + is_use_xattr = true; + }else if(0 == strcmp(strflag, "0")){ + is_use_xattr = false; + }else{ + S3FS_PRN_EXIT("option use_xattr has unknown parameter(%s).", strflag); + return -1; + } + return 0; + } + if(0 == STR2NCMP(arg, "cipher_suites=")){ + cipher_suites = strchr(arg, '=') + sizeof(char); + return 0; + } + if(0 == STR2NCMP(arg, "instance_name=")){ + instance_name = strchr(arg, '=') + sizeof(char); + instance_name = "[" + instance_name + "]"; + return 0; + } + if(0 == STR2NCMP(arg, "mime=")){ + mimetype_file = strchr(arg, '=') + sizeof(char); + return 0; + } + // + // debug option for s3fs + // + if(0 == STR2NCMP(arg, "dbglevel=")){ + const char* strlevel = strchr(arg, '=') + sizeof(char); + if(0 == strcasecmp(strlevel, "silent") || 0 == strcasecmp(strlevel, "critical") || 0 == strcasecmp(strlevel, "crit")){ + S3fsSignals::SetLogLevel(S3FS_LOG_CRIT); + }else if(0 == strcasecmp(strlevel, "error") || 0 == strcasecmp(strlevel, "err")){ + S3fsSignals::SetLogLevel(S3FS_LOG_ERR); + }else if(0 == strcasecmp(strlevel, "wan") || 0 == strcasecmp(strlevel, "warn") || 0 == strcasecmp(strlevel, "warning")){ + S3fsSignals::SetLogLevel(S3FS_LOG_WARN); + }else if(0 == strcasecmp(strlevel, "inf") || 0 == strcasecmp(strlevel, "info") || 0 == strcasecmp(strlevel, "information")){ + S3fsSignals::SetLogLevel(S3FS_LOG_INFO); + }else if(0 == strcasecmp(strlevel, "dbg") || 0 == strcasecmp(strlevel, "debug")){ + S3fsSignals::SetLogLevel(S3FS_LOG_DBG); + }else{ + S3FS_PRN_EXIT("option dbglevel has unknown parameter(%s).", strlevel); + return -1; + } + return 0; + } + // + // debug option + // + // debug_level is S3FS_LOG_INFO, after second -d is passed to fuse. + // + if(0 == strcmp(arg, "-d") || 0 == strcmp(arg, "--debug")){ + if(!IS_S3FS_LOG_INFO() && !IS_S3FS_LOG_DBG()){ + S3fsSignals::SetLogLevel(S3FS_LOG_INFO); + return 0; + } + if(0 == strcmp(arg, "--debug")){ + // fuse doesn't understand "--debug", but it understands -d. + // but we can't pass -d back to fuse. + return 0; + } + } + // "f2" is not used no more. + // (set S3FS_LOG_DBG) + if(0 == strcmp(arg, "f2")){ + S3fsSignals::SetLogLevel(S3FS_LOG_DBG); + return 0; + } + if(0 == strcmp(arg, "curldbg")){ + S3fsCurl::SetVerbose(true); + return 0; + }else if(0 == STR2NCMP(arg, "curldbg=")){ + const char* strlevel = strchr(arg, '=') + sizeof(char); + if(0 == strcasecmp(strlevel, "normal")){ + S3fsCurl::SetVerbose(true); + }else if(0 == strcasecmp(strlevel, "body")){ + S3fsCurl::SetVerbose(true); + S3fsCurl::SetDumpBody(true); + }else{ + S3FS_PRN_EXIT("option curldbg has unknown parameter(%s).", strlevel); + return -1; + } + return 0; + } + // + // Check cache file, using SIGUSR1 + // + if(0 == strcmp(arg, "set_check_cache_sigusr1")){ + if(!S3fsSignals::SetUsr1Handler(NULL)){ + S3FS_PRN_EXIT("could not set sigusr1 for checking cache."); + return -1; + } + return 0; + }else if(0 == STR2NCMP(arg, "set_check_cache_sigusr1=")){ + const char* strfilepath = strchr(arg, '=') + sizeof(char); + if(!S3fsSignals::SetUsr1Handler(strfilepath)){ + S3FS_PRN_EXIT("could not set sigusr1 for checking cache and output file(%s).", strfilepath); + return -1; + } + return 0; + } + if(0 == STR2NCMP(arg, "accessKeyId=")){ + S3FS_PRN_EXIT("option accessKeyId is no longer supported."); return -1; - } } - closedir(dp); - } - return 1; - } - - // Unknown option - if(NO_UTILITY_MODE == utility_mode){ - S3FS_PRN_EXIT("specified unknown third option(%s).", arg); - }else{ - S3FS_PRN_EXIT("specified unknown second option(%s). you don't need to specify second option(mountpoint) for utility mode(-u).", arg); - } - return -1; - - }else if(key == FUSE_OPT_KEY_OPT){ - if(0 == STR2NCMP(arg, "uid=")){ - s3fs_uid = get_uid(strchr(arg, '=') + sizeof(char)); - if(0 != geteuid() && 0 == s3fs_uid){ - S3FS_PRN_EXIT("root user can only specify uid=0."); - return -1; - } - is_s3fs_uid = true; - return 1; // continue for fuse option - } - if(0 == STR2NCMP(arg, "gid=")){ - s3fs_gid = get_gid(strchr(arg, '=') + sizeof(char)); - if(0 != getegid() && 0 == s3fs_gid){ - S3FS_PRN_EXIT("root user can only specify gid=0."); - return -1; - } - is_s3fs_gid = true; - return 1; // continue for fuse option - } - if(0 == STR2NCMP(arg, "umask=")){ - s3fs_umask = cvt_strtoofft(strchr(arg, '=') + sizeof(char), /*base=*/ 8); - s3fs_umask &= (S_IRWXU | S_IRWXG | S_IRWXO); - is_s3fs_umask = true; - return 1; // continue for fuse option - } - if(0 == strcmp(arg, "allow_other")){ - allow_other = true; - return 1; // continue for fuse option - } - if(0 == STR2NCMP(arg, "mp_umask=")){ - mp_umask = cvt_strtoofft(strchr(arg, '=') + sizeof(char), /*base=*/ 8); - mp_umask &= (S_IRWXU | S_IRWXG | S_IRWXO); - is_mp_umask = true; - return 0; - } - if(0 == STR2NCMP(arg, "default_acl=")){ - const char* acl_string = strchr(arg, '=') + sizeof(char); - acl_t acl = acl_t::from_str(acl_string); - if(acl == acl_t::UNKNOWN){ - S3FS_PRN_EXIT("unknown value for default_acl: %s", acl_string); - return -1; - } - S3fsCurl::SetDefaultAcl(acl); - return 0; - } - if(0 == STR2NCMP(arg, "retries=")){ - off_t retries = static_cast(cvt_strtoofft(strchr(arg, '=') + sizeof(char))); - if(retries == 0){ - S3FS_PRN_EXIT("retries must be greater than zero"); - return -1; - } - S3fsCurl::SetRetries(retries); - return 0; - } - if(0 == STR2NCMP(arg, "use_cache=")){ - FdManager::SetCacheDir(strchr(arg, '=') + sizeof(char)); - return 0; - } - if(0 == STR2NCMP(arg, "check_cache_dir_exist")){ - FdManager::SetCheckCacheDirExist(true); - return 0; - } - if(0 == strcmp(arg, "del_cache")){ - is_remove_cache = true; - return 0; - } - if(0 == STR2NCMP(arg, "multireq_max=")){ - int maxreq = static_cast(cvt_strtoofft(strchr(arg, '=') + sizeof(char))); - S3fsCurl::SetMaxMultiRequest(maxreq); - return 0; - } - if(0 == strcmp(arg, "nonempty")){ - nonempty = true; - return 1; // need to continue for fuse. - } - if(0 == strcmp(arg, "nomultipart")){ - nomultipart = true; - return 0; - } - // old format for storage_class - if(0 == strcmp(arg, "use_rrs") || 0 == STR2NCMP(arg, "use_rrs=")){ - off_t rrs = 1; - // for an old format. - if(0 == STR2NCMP(arg, "use_rrs=")){ - rrs = cvt_strtoofft(strchr(arg, '=') + sizeof(char)); - } - if(0 == rrs){ - S3fsCurl::SetStorageClass(storage_class_t::STANDARD); - }else if(1 == rrs){ - S3fsCurl::SetStorageClass(storage_class_t::REDUCED_REDUNDANCY); - }else{ - S3FS_PRN_EXIT("poorly formed argument to option: use_rrs"); - return -1; - } - return 0; - } - if(0 == STR2NCMP(arg, "storage_class=")){ - const char *storage_class_str = strchr(arg, '=') + sizeof(char); - storage_class_t storage_class = storage_class_t::from_str(storage_class_str); - if(storage_class == storage_class_t::UNKNOWN){ - S3FS_PRN_EXIT("unknown value for storage_class: %s", storage_class_str); - return -1; - } - S3fsCurl::SetStorageClass(storage_class); - return 0; - } - // - // [NOTE] - // use_sse Set Server Side Encrypting type to SSE-S3 - // use_sse=1 - // use_sse=file Set Server Side Encrypting type to Custom key(SSE-C) and load custom keys - // use_sse=custom(c):file - // use_sse=custom(c) Set Server Side Encrypting type to Custom key(SSE-C) - // use_sse=kmsid(k):kms-key-id Set Server Side Encrypting type to AWS Key Management key id(SSE-KMS) and load KMS id - // use_sse=kmsid(k) Set Server Side Encrypting type to AWS Key Management key id(SSE-KMS) - // - // load_sse_c=file Load Server Side Encrypting custom keys - // - // AWSSSECKEYS Loading Environment for Server Side Encrypting custom keys - // AWSSSEKMSID Loading Environment for Server Side Encrypting Key id - // - if(0 == STR2NCMP(arg, "use_sse")){ - if(0 == strcmp(arg, "use_sse") || 0 == strcmp(arg, "use_sse=1")){ // use_sse=1 is old type parameter - // sse type is SSE_S3 - if(!S3fsCurl::IsSseDisable() && !S3fsCurl::IsSseS3Type()){ - S3FS_PRN_EXIT("already set SSE another type, so conflict use_sse option or environment."); - return -1; + if(0 == STR2NCMP(arg, "secretAccessKey=")){ + S3FS_PRN_EXIT("option secretAccessKey is no longer supported."); + return -1; } - S3fsCurl::SetSseType(sse_type_t::SSE_S3); - - }else if(0 == strcmp(arg, "use_sse=kmsid") || 0 == strcmp(arg, "use_sse=k")){ - // sse type is SSE_KMS with out kmsid(expecting id is loaded by environment) - if(!S3fsCurl::IsSseDisable() && !S3fsCurl::IsSseKmsType()){ - S3FS_PRN_EXIT("already set SSE another type, so conflict use_sse option or environment."); - return -1; + if(0 == strcmp(arg, "use_wtf8")){ + use_wtf8 = true; + return 0; } - if(!S3fsCurl::IsSetSseKmsId()){ - S3FS_PRN_EXIT("use_sse=kms but not loaded kms id by environment."); - return -1; - } - S3fsCurl::SetSseType(sse_type_t::SSE_KMS); - - }else if(0 == STR2NCMP(arg, "use_sse=kmsid:") || 0 == STR2NCMP(arg, "use_sse=k:")){ - // sse type is SSE_KMS with kmsid - if(!S3fsCurl::IsSseDisable() && !S3fsCurl::IsSseKmsType()){ - S3FS_PRN_EXIT("already set SSE another type, so conflict use_sse option or environment."); - return -1; - } - const char* kmsid; - if(0 == STR2NCMP(arg, "use_sse=kmsid:")){ - kmsid = &arg[strlen("use_sse=kmsid:")]; - }else{ - kmsid = &arg[strlen("use_sse=k:")]; - } - if(!S3fsCurl::SetSseKmsid(kmsid)){ - S3FS_PRN_EXIT("failed to load use_sse kms id."); - return -1; - } - S3fsCurl::SetSseType(sse_type_t::SSE_KMS); - - }else if(0 == strcmp(arg, "use_sse=custom") || 0 == strcmp(arg, "use_sse=c")){ - // sse type is SSE_C with out custom keys(expecting keys are loaded by environment or load_sse_c option) - if(!S3fsCurl::IsSseDisable() && !S3fsCurl::IsSseCType()){ - S3FS_PRN_EXIT("already set SSE another type, so conflict use_sse option or environment."); - return -1; + if(0 == strcmp(arg, "requester_pays")){ + S3fsCurl::SetRequesterPays(true); + return 0; } // [NOTE] - // do not check ckeys exists here. + // following option will be discarding, because these are not for fuse. + // (Referenced sshfs.c) // - S3fsCurl::SetSseType(sse_type_t::SSE_C); - - }else if(0 == STR2NCMP(arg, "use_sse=custom:") || 0 == STR2NCMP(arg, "use_sse=c:")){ - // sse type is SSE_C with custom keys - if(!S3fsCurl::IsSseDisable() && !S3fsCurl::IsSseCType()){ - S3FS_PRN_EXIT("already set SSE another type, so conflict use_sse option or environment."); - return -1; + if(0 == strcmp(arg, "auto") || + 0 == strcmp(arg, "noauto") || + 0 == strcmp(arg, "user") || + 0 == strcmp(arg, "nouser") || + 0 == strcmp(arg, "users") || + 0 == strcmp(arg, "_netdev")) + { + return 0; } - const char* ssecfile; - if(0 == STR2NCMP(arg, "use_sse=custom:")){ - ssecfile = &arg[strlen("use_sse=custom:")]; - }else{ - ssecfile = &arg[strlen("use_sse=c:")]; - } - if(!S3fsCurl::SetSseCKeys(ssecfile)){ - S3FS_PRN_EXIT("failed to load use_sse custom key file(%s).", ssecfile); - return -1; - } - S3fsCurl::SetSseType(sse_type_t::SSE_C); - - }else if(0 == strcmp(arg, "use_sse=")){ // this type is old style(parameter is custom key file path) - // SSE_C with custom keys. - const char* ssecfile = &arg[strlen("use_sse=")]; - if(!S3fsCurl::SetSseCKeys(ssecfile)){ - S3FS_PRN_EXIT("failed to load use_sse custom key file(%s).", ssecfile); - return -1; - } - S3fsCurl::SetSseType(sse_type_t::SSE_C); - - }else{ - // never come here. - S3FS_PRN_EXIT("something wrong use_sse option."); - return -1; - } - return 0; } - // [NOTE] - // Do only load SSE custom keys, care for set without set sse type. - if(0 == STR2NCMP(arg, "load_sse_c=")){ - const char* ssecfile = &arg[strlen("load_sse_c=")]; - if(!S3fsCurl::SetSseCKeys(ssecfile)){ - S3FS_PRN_EXIT("failed to load use_sse custom key file(%s).", ssecfile); - return -1; - } - return 0; - } - if(0 == STR2NCMP(arg, "ssl_verify_hostname=")){ - long sslvh = static_cast(cvt_strtoofft(strchr(arg, '=') + sizeof(char))); - if(-1 == S3fsCurl::SetSslVerifyHostname(sslvh)){ - S3FS_PRN_EXIT("poorly formed argument to option: ssl_verify_hostname."); - return -1; - } - return 0; - } - if(0 == STR2NCMP(arg, "passwd_file=")){ - passwd_file = strchr(arg, '=') + sizeof(char); - return 0; - } - if(0 == strcmp(arg, "ibm_iam_auth")){ - S3fsCurl::SetIsIBMIAMAuth(true); - S3fsCurl::SetIAMCredentialsURL("https://iam.bluemix.net/oidc/token"); - S3fsCurl::SetIAMTokenField("access_token"); - S3fsCurl::SetIAMExpiryField("expiration"); - S3fsCurl::SetIAMFieldCount(2); - is_ibm_iam_auth = true; - return 0; - } - if (0 == STR2NCMP(arg, "use_session_token")) { - is_use_session_token = true; - } - if(0 == STR2NCMP(arg, "ibm_iam_endpoint=")){ - std::string endpoint_url; - std::string iam_endpoint = strchr(arg, '=') + sizeof(char); - // Check url for http / https protocol string - if((iam_endpoint.compare(0, 8, "https://") != 0) && (iam_endpoint.compare(0, 7, "http://") != 0)) { - S3FS_PRN_EXIT("option ibm_iam_endpoint has invalid format, missing http / https protocol"); - return -1; - } - endpoint_url = iam_endpoint + "/oidc/token"; - S3fsCurl::SetIAMCredentialsURL(endpoint_url.c_str()); - return 0; - } - if(0 == strcmp(arg, "ecs")){ - if (is_ibm_iam_auth) { - S3FS_PRN_EXIT("option ecs cannot be used in conjunction with ibm"); - return -1; - } - S3fsCurl::SetIsECS(true); - S3fsCurl::SetIAMCredentialsURL("http://169.254.170.2"); - S3fsCurl::SetIAMFieldCount(5); - is_ecs = true; - return 0; - } - if(0 == STR2NCMP(arg, "iam_role")){ - if (is_ecs || is_ibm_iam_auth) { - S3FS_PRN_EXIT("option iam_role cannot be used in conjunction with ecs or ibm"); - return -1; - } - if(0 == strcmp(arg, "iam_role") || 0 == strcmp(arg, "iam_role=auto")){ - // loading IAM role name in s3fs_init(), because we need to wait initializing curl. - // - load_iamrole = true; - return 0; - - }else if(0 == STR2NCMP(arg, "iam_role=")){ - const char* role = strchr(arg, '=') + sizeof(char); - S3fsCurl::SetIAMRole(role); - load_iamrole = false; - return 0; - } - } - if(0 == STR2NCMP(arg, "profile=")){ - aws_profile = strchr(arg, '=') + sizeof(char); - return 0; - } - if(0 == STR2NCMP(arg, "public_bucket=")){ - off_t pubbucket = cvt_strtoofft(strchr(arg, '=') + sizeof(char)); - if(1 == pubbucket){ - S3fsCurl::SetPublicBucket(true); - // [NOTE] - // if bucket is public(without credential), s3 do not allow copy api. - // so s3fs sets nocopyapi mode. - // - nocopyapi = true; - }else if(0 == pubbucket){ - S3fsCurl::SetPublicBucket(false); - }else{ - S3FS_PRN_EXIT("poorly formed argument to option: public_bucket."); - return -1; - } - return 0; - } - if(0 == STR2NCMP(arg, "bucket=")){ - std::string bname = strchr(arg, '=') + sizeof(char); - if ((ret = set_bucket(bname.c_str()))){ - return ret; - } - return 0; - } - if(0 == strcmp(arg, "no_check_certificate")){ - S3fsCurl::SetCheckCertificate(false); - return 0; - } - if(0 == STR2NCMP(arg, "connect_timeout=")){ - long contimeout = static_cast(cvt_strtoofft(strchr(arg, '=') + sizeof(char))); - S3fsCurl::SetConnectTimeout(contimeout); - return 0; - } - if(0 == STR2NCMP(arg, "readwrite_timeout=")){ - time_t rwtimeout = static_cast(cvt_strtoofft(strchr(arg, '=') + sizeof(char))); - S3fsCurl::SetReadwriteTimeout(rwtimeout); - return 0; - } - if(0 == STR2NCMP(arg, "list_object_max_keys=")){ - int max_keys = static_cast(cvt_strtoofft(strchr(arg, '=') + sizeof(char))); - if(max_keys < 1000){ - S3FS_PRN_EXIT("argument should be over 1000: list_object_max_keys"); - return -1; - } - max_keys_list_object = max_keys; - return 0; - } - if(0 == STR2NCMP(arg, "max_stat_cache_size=")){ - unsigned long cache_size = static_cast(cvt_strtoofft(strchr(arg, '=') + sizeof(char))); - StatCache::getStatCacheData()->SetCacheSize(cache_size); - return 0; - } - if(0 == STR2NCMP(arg, "stat_cache_expire=")){ - time_t expr_time = static_cast(cvt_strtoofft(strchr(arg, '=') + sizeof(char))); - StatCache::getStatCacheData()->SetExpireTime(expr_time); - return 0; - } - // [NOTE] - // This option is for compatibility old version. - if(0 == STR2NCMP(arg, "stat_cache_interval_expire=")){ - time_t expr_time = static_cast(cvt_strtoofft(strchr(arg, '=') + sizeof(char))); - StatCache::getStatCacheData()->SetExpireTime(expr_time, true); - return 0; - } - if(0 == strcmp(arg, "enable_noobj_cache")){ - StatCache::getStatCacheData()->EnableCacheNoObject(); - return 0; - } - if(0 == strcmp(arg, "nodnscache")){ - S3fsCurl::SetDnsCache(false); - return 0; - } - if(0 == strcmp(arg, "nosscache")){ - S3fsCurl::SetSslSessionCache(false); - return 0; - } - if(0 == STR2NCMP(arg, "parallel_count=") || 0 == STR2NCMP(arg, "parallel_upload=")){ - int maxpara = static_cast(cvt_strtoofft(strchr(arg, '=') + sizeof(char))); - if(0 >= maxpara){ - S3FS_PRN_EXIT("argument should be over 1: parallel_count"); - return -1; - } - S3fsCurl::SetMaxParallelCount(maxpara); - return 0; - } - if(0 == STR2NCMP(arg, "fd_page_size=")){ - S3FS_PRN_ERR("option fd_page_size is no longer supported, so skip this option."); - return 0; - } - if(0 == STR2NCMP(arg, "multipart_size=")){ - off_t size = static_cast(cvt_strtoofft(strchr(arg, '=') + sizeof(char))); - if(!S3fsCurl::SetMultipartSize(size)){ - S3FS_PRN_EXIT("multipart_size option must be at least 5 MB."); - return -1; - } - return 0; - } - if(0 == STR2NCMP(arg, "ensure_diskfree=")){ - off_t dfsize = cvt_strtoofft(strchr(arg, '=') + sizeof(char)) * 1024 * 1024; - if(dfsize < S3fsCurl::GetMultipartSize()){ - S3FS_PRN_WARN("specified size to ensure disk free space is smaller than multipart size, so set multipart size to it."); - dfsize = S3fsCurl::GetMultipartSize(); - } - FdManager::SetEnsureFreeDiskSpace(dfsize); - return 0; - } - if(0 == STR2NCMP(arg, "singlepart_copy_limit=")){ - singlepart_copy_limit = static_cast(cvt_strtoofft(strchr(arg, '=') + sizeof(char))) * 1024; - return 0; - } - if(0 == STR2NCMP(arg, "ahbe_conf=")){ - string ahbe_conf = strchr(arg, '=') + sizeof(char); - if(!AdditionalHeader::get()->Load(ahbe_conf.c_str())){ - S3FS_PRN_EXIT("failed to load ahbe_conf file(%s).", ahbe_conf.c_str()); - return -1; - } - AdditionalHeader::get()->Dump(); - return 0; - } - if(0 == strcmp(arg, "noxmlns")){ - noxmlns = true; - return 0; - } - if(0 == strcmp(arg, "nomixupload")){ - FdEntity::SetNoMixMultipart(); - return 0; - } - if(0 == strcmp(arg, "nocopyapi")){ - nocopyapi = true; - return 0; - } - if(0 == strcmp(arg, "norenameapi")){ - norenameapi = true; - return 0; - } - if(0 == strcmp(arg, "complement_stat")){ - complement_stat = true; - return 0; - } - if(0 == strcmp(arg, "notsup_compat_dir")){ - support_compat_dir = false; - return 0; - } - if(0 == strcmp(arg, "enable_content_md5")){ - S3fsCurl::SetContentMd5(true); - return 0; - } - if(0 == STR2NCMP(arg, "host=")){ - host = strchr(arg, '=') + sizeof(char); - return 0; - } - if(0 == STR2NCMP(arg, "servicepath=")){ - service_path = strchr(arg, '=') + sizeof(char); - return 0; - } - if(0 == STR2NCMP(arg, "url=")){ - host = strchr(arg, '=') + sizeof(char); - // strip the trailing '/', if any, off the end of the host - // string - size_t found, length; - found = host.find_last_of('/'); - length = host.length(); - while(found == (length - 1) && length > 0){ - host.erase(found); - found = host.find_last_of('/'); - length = host.length(); - } - // Check url for http / https protocol string - if((host.compare(0, 8, "https://") != 0) && (host.compare(0, 7, "http://") != 0)) { - S3FS_PRN_EXIT("option url has invalid format, missing http / https protocol"); - return -1; - } - return 0; - } - if(0 == strcmp(arg, "sigv2")){ - S3fsCurl::SetSignatureV4(false); - return 0; - } - if(0 == strcmp(arg, "createbucket")){ - create_bucket = true; - return 0; - } - if(0 == STR2NCMP(arg, "endpoint=")){ - endpoint = strchr(arg, '=') + sizeof(char); - is_specified_endpoint = true; - return 0; - } - if(0 == strcmp(arg, "use_path_request_style")){ - pathrequeststyle = true; - return 0; - } - if(0 == STR2NCMP(arg, "noua")){ - S3fsCurl::SetUserAgentFlag(false); - return 0; - } - if(0 == strcmp(arg, "use_xattr")){ - is_use_xattr = true; - return 0; - }else if(0 == STR2NCMP(arg, "use_xattr=")){ - const char* strflag = strchr(arg, '=') + sizeof(char); - if(0 == strcmp(strflag, "1")){ - is_use_xattr = true; - }else if(0 == strcmp(strflag, "0")){ - is_use_xattr = false; - }else{ - S3FS_PRN_EXIT("option use_xattr has unknown parameter(%s).", strflag); - return -1; - } - return 0; - } - if(0 == STR2NCMP(arg, "cipher_suites=")){ - cipher_suites = strchr(arg, '=') + sizeof(char); - return 0; - } - if(0 == STR2NCMP(arg, "instance_name=")){ - instance_name = strchr(arg, '=') + sizeof(char); - instance_name = "[" + instance_name + "]"; - return 0; - } - if(0 == STR2NCMP(arg, "mime=")){ - mimetype_file = strchr(arg, '=') + sizeof(char); - return 0; - } - // - // debug option for s3fs - // - if(0 == STR2NCMP(arg, "dbglevel=")){ - const char* strlevel = strchr(arg, '=') + sizeof(char); - if(0 == strcasecmp(strlevel, "silent") || 0 == strcasecmp(strlevel, "critical") || 0 == strcasecmp(strlevel, "crit")){ - S3fsSignals::SetLogLevel(S3FS_LOG_CRIT); - }else if(0 == strcasecmp(strlevel, "error") || 0 == strcasecmp(strlevel, "err")){ - S3fsSignals::SetLogLevel(S3FS_LOG_ERR); - }else if(0 == strcasecmp(strlevel, "wan") || 0 == strcasecmp(strlevel, "warn") || 0 == strcasecmp(strlevel, "warning")){ - S3fsSignals::SetLogLevel(S3FS_LOG_WARN); - }else if(0 == strcasecmp(strlevel, "inf") || 0 == strcasecmp(strlevel, "info") || 0 == strcasecmp(strlevel, "information")){ - S3fsSignals::SetLogLevel(S3FS_LOG_INFO); - }else if(0 == strcasecmp(strlevel, "dbg") || 0 == strcasecmp(strlevel, "debug")){ - S3fsSignals::SetLogLevel(S3FS_LOG_DBG); - }else{ - S3FS_PRN_EXIT("option dbglevel has unknown parameter(%s).", strlevel); - return -1; - } - return 0; - } - // - // debug option - // - // debug_level is S3FS_LOG_INFO, after second -d is passed to fuse. - // - if(0 == strcmp(arg, "-d") || 0 == strcmp(arg, "--debug")){ - if(!IS_S3FS_LOG_INFO() && !IS_S3FS_LOG_DBG()){ - S3fsSignals::SetLogLevel(S3FS_LOG_INFO); - return 0; - } - if(0 == strcmp(arg, "--debug")){ - // fuse doesn't understand "--debug", but it understands -d. - // but we can't pass -d back to fuse. - return 0; - } - } - // "f2" is not used no more. - // (set S3FS_LOG_DBG) - if(0 == strcmp(arg, "f2")){ - S3fsSignals::SetLogLevel(S3FS_LOG_DBG); - return 0; - } - if(0 == strcmp(arg, "curldbg")){ - S3fsCurl::SetVerbose(true); - return 0; - }else if(0 == STR2NCMP(arg, "curldbg=")){ - const char* strlevel = strchr(arg, '=') + sizeof(char); - if(0 == strcasecmp(strlevel, "normal")){ - S3fsCurl::SetVerbose(true); - }else if(0 == strcasecmp(strlevel, "body")){ - S3fsCurl::SetVerbose(true); - S3fsCurl::SetDumpBody(true); - }else{ - S3FS_PRN_EXIT("option curldbg has unknown parameter(%s).", strlevel); - return -1; - } - return 0; - } - // - // Check cache file, using SIGUSR1 - // - if(0 == strcmp(arg, "set_check_cache_sigusr1")){ - if(!S3fsSignals::SetUsr1Handler(NULL)){ - S3FS_PRN_EXIT("could not set sigusr1 for checking cache."); - return -1; - } - return 0; - }else if(0 == STR2NCMP(arg, "set_check_cache_sigusr1=")){ - const char* strfilepath = strchr(arg, '=') + sizeof(char); - if(!S3fsSignals::SetUsr1Handler(strfilepath)){ - S3FS_PRN_EXIT("could not set sigusr1 for checking cache and output file(%s).", strfilepath); - return -1; - } - return 0; - } - - if(0 == STR2NCMP(arg, "accessKeyId=")){ - S3FS_PRN_EXIT("option accessKeyId is no longer supported."); - return -1; - } - if(0 == STR2NCMP(arg, "secretAccessKey=")){ - S3FS_PRN_EXIT("option secretAccessKey is no longer supported."); - return -1; - } - if(0 == strcmp(arg, "use_wtf8")){ - use_wtf8 = true; - return 0; - } - if(0 == strcmp(arg, "requester_pays")){ - S3fsCurl::SetRequesterPays(true); - return 0; - } - - // [NOTE] - // following option will be discarding, because these are not for fuse. - // (Referenced sshfs.c) - // - if(0 == strcmp(arg, "auto") || - 0 == strcmp(arg, "noauto") || - 0 == strcmp(arg, "user") || - 0 == strcmp(arg, "nouser") || - 0 == strcmp(arg, "users") || - 0 == strcmp(arg, "_netdev")) - { - return 0; - } - } - return 1; + return 1; } int main(int argc, char* argv[]) { - int ch; - int fuse_res; - int option_index = 0; - struct fuse_operations s3fs_oper; - time_t incomp_abort_time = (24 * 60 * 60); + int ch; + int fuse_res; + int option_index = 0; + struct fuse_operations s3fs_oper; + time_t incomp_abort_time = (24 * 60 * 60); - static const struct option long_opts[] = { - {"help", no_argument, NULL, 'h'}, - {"version", no_argument, 0, 0}, - {"debug", no_argument, NULL, 'd'}, - {"incomplete-mpu-list", no_argument, NULL, 'u'}, - {"incomplete-mpu-abort", optional_argument, NULL, 'a'}, // 'a' is only identifier and is not option. - {NULL, 0, NULL, 0} - }; + static const struct option long_opts[] = { + {"help", no_argument, NULL, 'h'}, + {"version", no_argument, 0, 0}, + {"debug", no_argument, NULL, 'd'}, + {"incomplete-mpu-list", no_argument, NULL, 'u'}, + {"incomplete-mpu-abort", optional_argument, NULL, 'a'}, // 'a' is only identifier and is not option. + {NULL, 0, NULL, 0} + }; - // init syslog(default CRIT) - openlog("s3fs", LOG_PID | LOG_ODELAY | LOG_NOWAIT, LOG_USER); - S3fsSignals::SetLogLevel(debug_level); + // init syslog(default CRIT) + openlog("s3fs", LOG_PID | LOG_ODELAY | LOG_NOWAIT, LOG_USER); + S3fsSignals::SetLogLevel(debug_level); - // init xml2 - xmlInitParser(); - LIBXML_TEST_VERSION + // init xml2 + xmlInitParser(); + LIBXML_TEST_VERSION - init_sysconf_vars(); + init_sysconf_vars(); - // get program name - emulate basename - program_name.assign(argv[0]); - size_t found = program_name.find_last_of('/'); - if(found != string::npos){ - program_name.replace(0, found+1, ""); - } - - while((ch = getopt_long(argc, argv, "dho:fsu", long_opts, &option_index)) != -1){ - switch(ch){ - case 0: - if(strcmp(long_opts[option_index].name, "version") == 0){ - show_version(); - exit(EXIT_SUCCESS); - } - break; - case 'h': - show_help(); - exit(EXIT_SUCCESS); - case 'o': - break; - case 'd': - break; - case 'f': - foreground = true; - break; - case 's': - break; - case 'u': // --incomplete-mpu-list - if(NO_UTILITY_MODE != utility_mode){ - S3FS_PRN_EXIT("already utility mode option is specified."); - exit(EXIT_FAILURE); - } - utility_mode = INCOMP_TYPE_LIST; - break; - case 'a': // --incomplete-mpu-abort - if(NO_UTILITY_MODE != utility_mode){ - S3FS_PRN_EXIT("already utility mode option is specified."); - exit(EXIT_FAILURE); - } - utility_mode = INCOMP_TYPE_ABORT; - - // check expire argument - if(NULL != optarg && 0 == strcasecmp(optarg, "all")){ // all is 0s - incomp_abort_time = 0; - }else if(NULL != optarg){ - if(!convert_unixtime_from_option_arg(optarg, incomp_abort_time)){ - S3FS_PRN_EXIT("--incomplete-mpu-abort option argument is wrong."); - exit(EXIT_FAILURE); - } - } - // if optarg is null, incomp_abort_time is 24H(default) - break; - default: - exit(EXIT_FAILURE); - } - } - - // Load SSE environment - if(!S3fsCurl::LoadEnvSse()){ - S3FS_PRN_EXIT("something wrong about SSE environment."); - exit(EXIT_FAILURE); - } - - // ssl init - if(!s3fs_init_global_ssl()){ - S3FS_PRN_EXIT("could not initialize for ssl libraries."); - exit(EXIT_FAILURE); - } - - // init curl (without mime types) - // - // [NOTE] - // The curl initialization here does not load mime types. - // The mime types file parameter are dynamic values according - // to the user's environment, and are analyzed by the my_fuse_opt_proc - // function. - // The my_fuse_opt_proc function is executed after this curl - // initialization. Because the curl method is used in the - // my_fuse_opt_proc function, then it must be called here to - // initialize. Fortunately, the processing using mime types - // is only PUT/POST processing, and it is not used until the - // call of my_fuse_opt_proc function is completed. Therefore, - // the mime type is loaded just after calling the my_fuse_opt_proc - // function. - // - if(!S3fsCurl::InitS3fsCurl()){ - S3FS_PRN_EXIT("Could not initiate curl library."); - s3fs_destroy_global_ssl(); - exit(EXIT_FAILURE); - } - - // clear this structure - memset(&s3fs_oper, 0, sizeof(s3fs_oper)); - - // This is the fuse-style parser for the arguments - // after which the bucket name and mountpoint names - // should have been set - struct fuse_args custom_args = FUSE_ARGS_INIT(argc, argv); - if(0 != fuse_opt_parse(&custom_args, NULL, NULL, my_fuse_opt_proc)){ - S3fsCurl::DestroyS3fsCurl(); - s3fs_destroy_global_ssl(); - exit(EXIT_FAILURE); - } - - // init mime types for curl - if(!S3fsCurl::InitMimeType(mimetype_file)){ - S3FS_PRN_WARN("Missing MIME types prevents setting Content-Type on uploaded objects."); - } - - // [NOTE] - // exclusive option check here. - // - if(storage_class_t::REDUCED_REDUNDANCY == S3fsCurl::GetStorageClass() && !S3fsCurl::IsSseDisable()){ - S3FS_PRN_EXIT("use_sse option could not be specified with storage class reduced_redundancy."); - S3fsCurl::DestroyS3fsCurl(); - s3fs_destroy_global_ssl(); - exit(EXIT_FAILURE); - } - if(!S3fsCurl::FinalCheckSse()){ - S3FS_PRN_EXIT("something wrong about SSE options."); - S3fsCurl::DestroyS3fsCurl(); - s3fs_destroy_global_ssl(); - exit(EXIT_FAILURE); - } - - // The first plain argument is the bucket - if(bucket.empty()){ - S3FS_PRN_EXIT("missing BUCKET argument."); - show_usage(); - S3fsCurl::DestroyS3fsCurl(); - s3fs_destroy_global_ssl(); - exit(EXIT_FAILURE); - } - - // bucket names cannot contain upper case characters in virtual-hosted style - if((!pathrequeststyle) && (lower(bucket) != bucket)){ - S3FS_PRN_EXIT("BUCKET %s, name not compatible with virtual-hosted style.", bucket.c_str()); - S3fsCurl::DestroyS3fsCurl(); - s3fs_destroy_global_ssl(); - exit(EXIT_FAILURE); - } - - // check bucket name for illegal characters - found = bucket.find_first_of("/:\\;!@#$%^&*?|+="); - if(found != string::npos){ - S3FS_PRN_EXIT("BUCKET %s -- bucket name contains an illegal character.", bucket.c_str()); - S3fsCurl::DestroyS3fsCurl(); - s3fs_destroy_global_ssl(); - exit(EXIT_FAILURE); - } - - if(!pathrequeststyle && STR2NCMP(host.c_str(), "https://") == 0 && bucket.find_first_of('.') != string::npos) { - S3FS_PRN_EXIT("BUCKET %s -- cannot mount bucket with . while using HTTPS without use_path_request_style", bucket.c_str()); - S3fsCurl::DestroyS3fsCurl(); - s3fs_destroy_global_ssl(); - exit(EXIT_FAILURE); - } - - // The second plain argument is the mountpoint - // if the option was given, we all ready checked for a - // readable, non-empty directory, this checks determines - // if the mountpoint option was ever supplied - if(NO_UTILITY_MODE == utility_mode){ - if(mountpoint.empty()){ - S3FS_PRN_EXIT("missing MOUNTPOINT argument."); - show_usage(); - S3fsCurl::DestroyS3fsCurl(); - s3fs_destroy_global_ssl(); - exit(EXIT_FAILURE); - } - } - - // error checking of command line arguments for compatibility - if(S3fsCurl::IsPublicBucket() && S3fsCurl::IsSetAccessKeys()){ - S3FS_PRN_EXIT("specifying both public_bucket and the access keys options is invalid."); - S3fsCurl::DestroyS3fsCurl(); - s3fs_destroy_global_ssl(); - exit(EXIT_FAILURE); - } - if(!passwd_file.empty() && S3fsCurl::IsSetAccessKeys()){ - S3FS_PRN_EXIT("specifying both passwd_file and the access keys options is invalid."); - S3fsCurl::DestroyS3fsCurl(); - s3fs_destroy_global_ssl(); - exit(EXIT_FAILURE); - } - if(!S3fsCurl::IsPublicBucket() && !load_iamrole && !is_ecs){ - if(EXIT_SUCCESS != get_access_keys()){ - S3fsCurl::DestroyS3fsCurl(); - s3fs_destroy_global_ssl(); - exit(EXIT_FAILURE); - } - if(!S3fsCurl::IsSetAccessKeys()){ - S3FS_PRN_EXIT("could not establish security credentials, check documentation."); - S3fsCurl::DestroyS3fsCurl(); - s3fs_destroy_global_ssl(); - exit(EXIT_FAILURE); - } - // More error checking on the access key pair can be done - // like checking for appropriate lengths and characters - } - - // check cache dir permission - if(!FdManager::CheckCacheDirExist() || !FdManager::CheckCacheTopDir() || !CacheFileStat::CheckCacheFileStatTopDir()){ - S3FS_PRN_EXIT("could not allow cache directory permission, check permission of cache directories."); - S3fsCurl::DestroyS3fsCurl(); - s3fs_destroy_global_ssl(); - exit(EXIT_FAILURE); - } - - // check IBM IAM requirements - if(is_ibm_iam_auth){ - - // check that default ACL is either public-read or private - acl_t defaultACL = S3fsCurl::GetDefaultAcl(); - if(defaultACL != acl_t::PRIVATE && defaultACL != acl_t::PUBLIC_READ){ - S3FS_PRN_EXIT("can only use 'public-read' or 'private' ACL while using ibm_iam_auth"); - S3fsCurl::DestroyS3fsCurl(); - s3fs_destroy_global_ssl(); - exit(EXIT_FAILURE); - } - - if(create_bucket && !S3fsCurl::IsSetAccessKeyID()){ - S3FS_PRN_EXIT("missing service instance ID for bucket creation"); - S3fsCurl::DestroyS3fsCurl(); - s3fs_destroy_global_ssl(); - exit(EXIT_FAILURE); - } - } - - // set user agent - S3fsCurl::InitUserAgent(); - - // There's room for more command line error checking - - // Check to see if the bucket name contains periods and https (SSL) is - // being used. This is a known limitation: - // https://docs.amazonwebservices.com/AmazonS3/latest/dev/ - // The Developers Guide suggests that either use HTTP of for us to write - // our own certificate verification logic. - // For now, this will be unsupported unless we get a request for it to - // be supported. In that case, we have a couple of options: - // - implement a command line option that bypasses the verify host - // but doesn't bypass verifying the certificate - // - write our own host verification (this might be complex) - // See issue #128strncasecmp - /* - if(1 == S3fsCurl::GetSslVerifyHostname()){ - found = bucket.find_first_of("."); + // get program name - emulate basename + program_name.assign(argv[0]); + size_t found = program_name.find_last_of('/'); if(found != string::npos){ - found = host.find("https:"); - if(found != string::npos){ - S3FS_PRN_EXIT("Using https and a bucket name with periods is unsupported."); - exit(1); - } + program_name.replace(0, found+1, ""); } - } - */ - if(NO_UTILITY_MODE != utility_mode){ - int exitcode = s3fs_utility_processing(incomp_abort_time); + while((ch = getopt_long(argc, argv, "dho:fsu", long_opts, &option_index)) != -1){ + switch(ch){ + case 0: + if(strcmp(long_opts[option_index].name, "version") == 0){ + show_version(); + exit(EXIT_SUCCESS); + } + break; + case 'h': + show_help(); + exit(EXIT_SUCCESS); + case 'o': + break; + case 'd': + break; + case 'f': + foreground = true; + break; + case 's': + break; + case 'u': // --incomplete-mpu-list + if(NO_UTILITY_MODE != utility_mode){ + S3FS_PRN_EXIT("already utility mode option is specified."); + exit(EXIT_FAILURE); + } + utility_mode = INCOMP_TYPE_LIST; + break; + case 'a': // --incomplete-mpu-abort + if(NO_UTILITY_MODE != utility_mode){ + S3FS_PRN_EXIT("already utility mode option is specified."); + exit(EXIT_FAILURE); + } + utility_mode = INCOMP_TYPE_ABORT; - S3fsCurl::DestroyS3fsCurl(); + // check expire argument + if(NULL != optarg && 0 == strcasecmp(optarg, "all")){ // all is 0s + incomp_abort_time = 0; + }else if(NULL != optarg){ + if(!convert_unixtime_from_option_arg(optarg, incomp_abort_time)){ + S3FS_PRN_EXIT("--incomplete-mpu-abort option argument is wrong."); + exit(EXIT_FAILURE); + } + } + // if optarg is null, incomp_abort_time is 24H(default) + break; + default: + exit(EXIT_FAILURE); + } + } + + // Load SSE environment + if(!S3fsCurl::LoadEnvSse()){ + S3FS_PRN_EXIT("something wrong about SSE environment."); + exit(EXIT_FAILURE); + } + + // ssl init + if(!s3fs_init_global_ssl()){ + S3FS_PRN_EXIT("could not initialize for ssl libraries."); + exit(EXIT_FAILURE); + } + + // init curl (without mime types) + // + // [NOTE] + // The curl initialization here does not load mime types. + // The mime types file parameter are dynamic values according + // to the user's environment, and are analyzed by the my_fuse_opt_proc + // function. + // The my_fuse_opt_proc function is executed after this curl + // initialization. Because the curl method is used in the + // my_fuse_opt_proc function, then it must be called here to + // initialize. Fortunately, the processing using mime types + // is only PUT/POST processing, and it is not used until the + // call of my_fuse_opt_proc function is completed. Therefore, + // the mime type is loaded just after calling the my_fuse_opt_proc + // function. + // + if(!S3fsCurl::InitS3fsCurl()){ + S3FS_PRN_EXIT("Could not initiate curl library."); + s3fs_destroy_global_ssl(); + exit(EXIT_FAILURE); + } + + // clear this structure + memset(&s3fs_oper, 0, sizeof(s3fs_oper)); + + // This is the fuse-style parser for the arguments + // after which the bucket name and mountpoint names + // should have been set + struct fuse_args custom_args = FUSE_ARGS_INIT(argc, argv); + if(0 != fuse_opt_parse(&custom_args, NULL, NULL, my_fuse_opt_proc)){ + S3fsCurl::DestroyS3fsCurl(); + s3fs_destroy_global_ssl(); + exit(EXIT_FAILURE); + } + + // init mime types for curl + if(!S3fsCurl::InitMimeType(mimetype_file)){ + S3FS_PRN_WARN("Missing MIME types prevents setting Content-Type on uploaded objects."); + } + + // [NOTE] + // exclusive option check here. + // + if(storage_class_t::REDUCED_REDUNDANCY == S3fsCurl::GetStorageClass() && !S3fsCurl::IsSseDisable()){ + S3FS_PRN_EXIT("use_sse option could not be specified with storage class reduced_redundancy."); + S3fsCurl::DestroyS3fsCurl(); + s3fs_destroy_global_ssl(); + exit(EXIT_FAILURE); + } + if(!S3fsCurl::FinalCheckSse()){ + S3FS_PRN_EXIT("something wrong about SSE options."); + S3fsCurl::DestroyS3fsCurl(); + s3fs_destroy_global_ssl(); + exit(EXIT_FAILURE); + } + + // The first plain argument is the bucket + if(bucket.empty()){ + S3FS_PRN_EXIT("missing BUCKET argument."); + show_usage(); + S3fsCurl::DestroyS3fsCurl(); + s3fs_destroy_global_ssl(); + exit(EXIT_FAILURE); + } + + // bucket names cannot contain upper case characters in virtual-hosted style + if((!pathrequeststyle) && (lower(bucket) != bucket)){ + S3FS_PRN_EXIT("BUCKET %s, name not compatible with virtual-hosted style.", bucket.c_str()); + S3fsCurl::DestroyS3fsCurl(); + s3fs_destroy_global_ssl(); + exit(EXIT_FAILURE); + } + + // check bucket name for illegal characters + found = bucket.find_first_of("/:\\;!@#$%^&*?|+="); + if(found != string::npos){ + S3FS_PRN_EXIT("BUCKET %s -- bucket name contains an illegal character.", bucket.c_str()); + S3fsCurl::DestroyS3fsCurl(); + s3fs_destroy_global_ssl(); + exit(EXIT_FAILURE); + } + + if(!pathrequeststyle && STR2NCMP(s3host.c_str(), "https://") == 0 && bucket.find_first_of('.') != string::npos) { + S3FS_PRN_EXIT("BUCKET %s -- cannot mount bucket with . while using HTTPS without use_path_request_style", bucket.c_str()); + S3fsCurl::DestroyS3fsCurl(); + s3fs_destroy_global_ssl(); + exit(EXIT_FAILURE); + } + + // The second plain argument is the mountpoint + // if the option was given, we all ready checked for a + // readable, non-empty directory, this checks determines + // if the mountpoint option was ever supplied + if(NO_UTILITY_MODE == utility_mode){ + if(mountpoint.empty()){ + S3FS_PRN_EXIT("missing MOUNTPOINT argument."); + show_usage(); + S3fsCurl::DestroyS3fsCurl(); + s3fs_destroy_global_ssl(); + exit(EXIT_FAILURE); + } + } + + // error checking of command line arguments for compatibility + if(S3fsCurl::IsPublicBucket() && S3fsCurl::IsSetAccessKeys()){ + S3FS_PRN_EXIT("specifying both public_bucket and the access keys options is invalid."); + S3fsCurl::DestroyS3fsCurl(); + s3fs_destroy_global_ssl(); + exit(EXIT_FAILURE); + } + if(!passwd_file.empty() && S3fsCurl::IsSetAccessKeys()){ + S3FS_PRN_EXIT("specifying both passwd_file and the access keys options is invalid."); + S3fsCurl::DestroyS3fsCurl(); + s3fs_destroy_global_ssl(); + exit(EXIT_FAILURE); + } + if(!S3fsCurl::IsPublicBucket() && !load_iamrole && !is_ecs){ + if(EXIT_SUCCESS != get_access_keys()){ + S3fsCurl::DestroyS3fsCurl(); + s3fs_destroy_global_ssl(); + exit(EXIT_FAILURE); + } + if(!S3fsCurl::IsSetAccessKeys()){ + S3FS_PRN_EXIT("could not establish security credentials, check documentation."); + S3fsCurl::DestroyS3fsCurl(); + s3fs_destroy_global_ssl(); + exit(EXIT_FAILURE); + } + // More error checking on the access key pair can be done + // like checking for appropriate lengths and characters + } + + // check cache dir permission + if(!FdManager::CheckCacheDirExist() || !FdManager::CheckCacheTopDir() || !CacheFileStat::CheckCacheFileStatTopDir()){ + S3FS_PRN_EXIT("could not allow cache directory permission, check permission of cache directories."); + S3fsCurl::DestroyS3fsCurl(); + s3fs_destroy_global_ssl(); + exit(EXIT_FAILURE); + } + + // check IBM IAM requirements + if(is_ibm_iam_auth){ + // check that default ACL is either public-read or private + acl_t defaultACL = S3fsCurl::GetDefaultAcl(); + if(defaultACL != acl_t::PRIVATE && defaultACL != acl_t::PUBLIC_READ){ + S3FS_PRN_EXIT("can only use 'public-read' or 'private' ACL while using ibm_iam_auth"); + S3fsCurl::DestroyS3fsCurl(); + s3fs_destroy_global_ssl(); + exit(EXIT_FAILURE); + } + + if(create_bucket && !S3fsCurl::IsSetAccessKeyID()){ + S3FS_PRN_EXIT("missing service instance ID for bucket creation"); + S3fsCurl::DestroyS3fsCurl(); + s3fs_destroy_global_ssl(); + exit(EXIT_FAILURE); + } + } + + // set user agent + S3fsCurl::InitUserAgent(); + + // There's room for more command line error checking + + // Check to see if the bucket name contains periods and https (SSL) is + // being used. This is a known limitation: + // https://docs.amazonwebservices.com/AmazonS3/latest/dev/ + // The Developers Guide suggests that either use HTTP of for us to write + // our own certificate verification logic. + // For now, this will be unsupported unless we get a request for it to + // be supported. In that case, we have a couple of options: + // - implement a command line option that bypasses the verify host + // but doesn't bypass verifying the certificate + // - write our own host verification (this might be complex) + // See issue #128strncasecmp + /* + if(1 == S3fsCurl::GetSslVerifyHostname()){ + found = bucket.find_first_of("."); + if(found != string::npos){ + found = s3host.find("https:"); + if(found != string::npos){ + S3FS_PRN_EXIT("Using https and a bucket name with periods is unsupported."); + exit(1); + } + } + } + */ + + if(NO_UTILITY_MODE != utility_mode){ + int exitcode = s3fs_utility_processing(incomp_abort_time); + + S3fsCurl::DestroyS3fsCurl(); + s3fs_destroy_global_ssl(); + exit(exitcode); + } + + // Check multipart / copy api for mix multipart uploading + if(nomultipart || nocopyapi || norenameapi){ + FdEntity::SetNoMixMultipart(); + } + + // check free disk space + if(!FdManager::IsSafeDiskSpace(NULL, S3fsCurl::GetMultipartSize() * S3fsCurl::GetMaxParallelCount())){ + S3FS_PRN_EXIT("There is no enough disk space for used as cache(or temporary) directory by s3fs."); + S3fsCurl::DestroyS3fsCurl(); + s3fs_destroy_global_ssl(); + exit(EXIT_FAILURE); + } + + s3fs_oper.getattr = s3fs_getattr; + s3fs_oper.readlink = s3fs_readlink; + s3fs_oper.mknod = s3fs_mknod; + s3fs_oper.mkdir = s3fs_mkdir; + s3fs_oper.unlink = s3fs_unlink; + s3fs_oper.rmdir = s3fs_rmdir; + s3fs_oper.symlink = s3fs_symlink; + s3fs_oper.rename = s3fs_rename; + s3fs_oper.link = s3fs_link; + if(!nocopyapi){ + s3fs_oper.chmod = s3fs_chmod; + s3fs_oper.chown = s3fs_chown; + s3fs_oper.utimens = s3fs_utimens; + }else{ + s3fs_oper.chmod = s3fs_chmod_nocopy; + s3fs_oper.chown = s3fs_chown_nocopy; + s3fs_oper.utimens = s3fs_utimens_nocopy; + } + s3fs_oper.truncate = s3fs_truncate; + s3fs_oper.open = s3fs_open; + s3fs_oper.read = s3fs_read; + s3fs_oper.write = s3fs_write; + s3fs_oper.statfs = s3fs_statfs; + s3fs_oper.flush = s3fs_flush; + s3fs_oper.fsync = s3fs_fsync; + s3fs_oper.release = s3fs_release; + s3fs_oper.opendir = s3fs_opendir; + s3fs_oper.readdir = s3fs_readdir; + s3fs_oper.init = s3fs_init; + s3fs_oper.destroy = s3fs_destroy; + s3fs_oper.access = s3fs_access; + s3fs_oper.create = s3fs_create; + // extended attributes + if(is_use_xattr){ + s3fs_oper.setxattr = s3fs_setxattr; + s3fs_oper.getxattr = s3fs_getxattr; + s3fs_oper.listxattr = s3fs_listxattr; + s3fs_oper.removexattr = s3fs_removexattr; + } + + // now passing things off to fuse, fuse will finish evaluating the command line args + fuse_res = fuse_main(custom_args.argc, custom_args.argv, &s3fs_oper, NULL); + fuse_opt_free_args(&custom_args); + + // Destroy curl + if(!S3fsCurl::DestroyS3fsCurl()){ + S3FS_PRN_WARN("Could not release curl library."); + } s3fs_destroy_global_ssl(); - exit(exitcode); - } - // Check multipart / copy api for mix multipart uploading - if(nomultipart || nocopyapi || norenameapi){ - FdEntity::SetNoMixMultipart(); - } + // cleanup xml2 + xmlCleanupParser(); + S3FS_MALLOCTRIM(0); - // check free disk space - if(!FdManager::IsSafeDiskSpace(NULL, S3fsCurl::GetMultipartSize() * S3fsCurl::GetMaxParallelCount())){ - S3FS_PRN_EXIT("There is no enough disk space for used as cache(or temporary) directory by s3fs."); - S3fsCurl::DestroyS3fsCurl(); - s3fs_destroy_global_ssl(); - exit(EXIT_FAILURE); - } - - s3fs_oper.getattr = s3fs_getattr; - s3fs_oper.readlink = s3fs_readlink; - s3fs_oper.mknod = s3fs_mknod; - s3fs_oper.mkdir = s3fs_mkdir; - s3fs_oper.unlink = s3fs_unlink; - s3fs_oper.rmdir = s3fs_rmdir; - s3fs_oper.symlink = s3fs_symlink; - s3fs_oper.rename = s3fs_rename; - s3fs_oper.link = s3fs_link; - if(!nocopyapi){ - s3fs_oper.chmod = s3fs_chmod; - s3fs_oper.chown = s3fs_chown; - s3fs_oper.utimens = s3fs_utimens; - }else{ - s3fs_oper.chmod = s3fs_chmod_nocopy; - s3fs_oper.chown = s3fs_chown_nocopy; - s3fs_oper.utimens = s3fs_utimens_nocopy; - } - s3fs_oper.truncate = s3fs_truncate; - s3fs_oper.open = s3fs_open; - s3fs_oper.read = s3fs_read; - s3fs_oper.write = s3fs_write; - s3fs_oper.statfs = s3fs_statfs; - s3fs_oper.flush = s3fs_flush; - s3fs_oper.fsync = s3fs_fsync; - s3fs_oper.release = s3fs_release; - s3fs_oper.opendir = s3fs_opendir; - s3fs_oper.readdir = s3fs_readdir; - s3fs_oper.init = s3fs_init; - s3fs_oper.destroy = s3fs_destroy; - s3fs_oper.access = s3fs_access; - s3fs_oper.create = s3fs_create; - // extended attributes - if(is_use_xattr){ - s3fs_oper.setxattr = s3fs_setxattr; - s3fs_oper.getxattr = s3fs_getxattr; - s3fs_oper.listxattr = s3fs_listxattr; - s3fs_oper.removexattr = s3fs_removexattr; - } - - // now passing things off to fuse, fuse will finish evaluating the command line args - fuse_res = fuse_main(custom_args.argc, custom_args.argv, &s3fs_oper, NULL); - fuse_opt_free_args(&custom_args); - - // Destroy curl - if(!S3fsCurl::DestroyS3fsCurl()){ - S3FS_PRN_WARN("Could not release curl library."); - } - s3fs_destroy_global_ssl(); - - // cleanup xml2 - xmlCleanupParser(); - S3FS_MALLOCTRIM(0); - - exit(fuse_res); + exit(fuse_res); } /* * Local variables: -* tab-width: 2 -* c-basic-offset: 2 +* tab-width: 4 +* c-basic-offset: 4 * End: -* vim600: expandtab sw=2 ts=2 fdm=marker -* vim<600: expandtab sw=2 ts=2 +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 */ diff --git a/src/s3fs.h b/src/s3fs.h index 2bff010..29c84f4 100644 --- a/src/s3fs.h +++ b/src/s3fs.h @@ -17,22 +17,21 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ -#ifndef S3FS_S3_H_ -#define S3FS_S3_H_ + +#ifndef S3FS_S3FS_H_ +#define S3FS_S3FS_H_ #define FUSE_USE_VERSION 26 -static const int64_t FIVE_GB = 5LL * 1024LL * 1024LL * 1024LL; - #include #define S3FS_FUSE_EXIT() \ -do{ \ - struct fuse_context* pcxt = fuse_get_context(); \ - if(pcxt){ \ - fuse_exit(pcxt->fuse); \ - } \ -}while(0) + do{ \ + struct fuse_context* pcxt = fuse_get_context(); \ + if(pcxt){ \ + fuse_exit(pcxt->fuse); \ + } \ + }while(0) // [NOTE] // s3fs use many small allocated chunk in heap area for stats @@ -81,13 +80,13 @@ do{ \ S3FS_MALLOCTRIM(0); \ }while(0) -#endif // S3FS_S3_H_ +#endif // S3FS_S3FS_H_ /* * Local variables: -* tab-width: 2 -* c-basic-offset: 2 +* tab-width: 4 +* c-basic-offset: 4 * End: -* vim600: expandtab sw=2 ts=2 fdm=marker -* vim<600: expandtab sw=2 ts=2 +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 */ diff --git a/src/s3fs_auth.h b/src/s3fs_auth.h index bd5af01..a798a0f 100644 --- a/src/s3fs_auth.h +++ b/src/s3fs_auth.h @@ -17,6 +17,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ + #ifndef S3FS_AUTH_H_ #define S3FS_AUTH_H_ @@ -53,9 +54,9 @@ unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size); /* * Local variables: -* tab-width: 2 -* c-basic-offset: 2 +* tab-width: 4 +* c-basic-offset: 4 * End: -* vim600: expandtab sw=2 ts=2 fdm=marker -* vim<600: expandtab sw=2 ts=2 +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 */ diff --git a/src/s3fs_global.cpp b/src/s3fs_global.cpp new file mode 100644 index 0000000..7fdbe80 --- /dev/null +++ b/src/s3fs_global.cpp @@ -0,0 +1,51 @@ +/* + * s3fs - FUSE-based file system backed by Amazon S3 + * + * Copyright(C) 2007 Takeshi Nakatani + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include +#include + +//------------------------------------------------------------------- +// Global variables +//------------------------------------------------------------------- +int64_t FIVE_GB = 5LL * 1024LL * 1024LL * 1024LL; +off_t MIN_MULTIPART_SIZE = 5 * 1024 * 1024; + +bool foreground = false; +bool nomultipart = false; +bool pathrequeststyle = false; +bool complement_stat = false; +bool noxmlns = false; +std::string program_name; +std::string service_path = "/"; +std::string s3host = "https://s3.amazonaws.com"; +std::string bucket; +std::string endpoint = "us-east-1"; +std::string cipher_suites; +std::string instance_name; +std::string aws_profile = "default"; + +/* +* Local variables: +* tab-width: 4 +* c-basic-offset: 4 +* End: +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 +*/ diff --git a/src/s3fs_help.cpp b/src/s3fs_help.cpp new file mode 100644 index 0000000..2c4c3eb --- /dev/null +++ b/src/s3fs_help.cpp @@ -0,0 +1,524 @@ +/* + * s3fs - FUSE-based file system backed by Amazon S3 + * + * Copyright(C) 2007 Takeshi Nakatani + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include +#include + +#include + +#include "common.h" +#include "s3fs.h" +#include "s3fs_help.h" +#include "s3fs_auth.h" + +using namespace std; + +//------------------------------------------------------------------- +// Contents +//------------------------------------------------------------------- +static const char help_string[] = + "\n" + "Mount an Amazon S3 bucket as a file system.\n" + "\n" + "Usage:\n" + " mounting\n" + " s3fs bucket[:/path] mountpoint [options]\n" + " s3fs mountpoint [options (must specify bucket= option)]\n" + "\n" + " unmounting\n" + " umount mountpoint\n" + "\n" + " General forms for s3fs and FUSE/mount options:\n" + " -o opt[,opt...]\n" + " -o opt [-o opt] ...\n" + "\n" + " utility mode (remove interrupted multipart uploading objects)\n" + " s3fs --incomplete-mpu-list (-u) bucket\n" + " s3fs --incomplete-mpu-abort[=all | =] bucket\n" + "\n" + "s3fs Options:\n" + "\n" + " Most s3fs options are given in the form where \"opt\" is:\n" + "\n" + " =\n" + "\n" + " bucket\n" + " - if it is not specified bucket name (and path) in command line,\n" + " must specify this option after -o option for bucket name.\n" + "\n" + " default_acl (default=\"private\")\n" + " - the default canned acl to apply to all written s3 objects,\n" + " e.g., private, public-read. see\n" + " https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl\n" + " for the full list of canned acls\n" + "\n" + " retries (default=\"5\")\n" + " - number of times to retry a failed S3 transaction\n" + "\n" + " use_cache (default=\"\" which means disabled)\n" + " - local folder to use for local file cache\n" + "\n" + " check_cache_dir_exist (default is disable)\n" + " - if use_cache is set, check if the cache directory exists.\n" + " If this option is not specified, it will be created at runtime\n" + " when the cache directory does not exist.\n" + "\n" + " del_cache (delete local file cache)\n" + " - delete local file cache when s3fs starts and exits.\n" + "\n" + " storage_class (default=\"standard\")\n" + " - store object with specified storage class. Possible values:\n" + " standard, standard_ia, onezone_ia, reduced_redundancy,\n" + " intelligent_tiering, glacier, and deep_archive.\n" + "\n" + " use_rrs (default is disable)\n" + " - use Amazon's Reduced Redundancy Storage.\n" + " this option can not be specified with use_sse.\n" + " (can specify use_rrs=1 for old version)\n" + " this option has been replaced by new storage_class option.\n" + "\n" + " use_sse (default is disable)\n" + " - Specify three type Amazon's Server-Site Encryption: SSE-S3,\n" + " SSE-C or SSE-KMS. SSE-S3 uses Amazon S3-managed encryption\n" + " keys, SSE-C uses customer-provided encryption keys, and\n" + " SSE-KMS uses the master key which you manage in AWS KMS.\n" + " You can specify \"use_sse\" or \"use_sse=1\" enables SSE-S3\n" + " type (use_sse=1 is old type parameter).\n" + " Case of setting SSE-C, you can specify \"use_sse=custom\",\n" + " \"use_sse=custom:\" or\n" + " \"use_sse=\" (only \n" + " specified is old type parameter). You can use \"c\" for\n" + " short \"custom\".\n" + " The custom key file must be 600 permission. The file can\n" + " have some lines, each line is one SSE-C key. The first line\n" + " in file is used as Customer-Provided Encryption Keys for\n" + " uploading and changing headers etc. If there are some keys\n" + " after first line, those are used downloading object which\n" + " are encrypted by not first key. So that, you can keep all\n" + " SSE-C keys in file, that is SSE-C key history.\n" + " If you specify \"custom\" (\"c\") without file path, you\n" + " need to set custom key by load_sse_c option or AWSSSECKEYS\n" + " environment. (AWSSSECKEYS environment has some SSE-C keys\n" + " with \":\" separator.) This option is used to decide the\n" + " SSE type. So that if you do not want to encrypt a object\n" + " object at uploading, but you need to decrypt encrypted\n" + " object at downloading, you can use load_sse_c option instead\n" + " of this option.\n" + " For setting SSE-KMS, specify \"use_sse=kmsid\" or\n" + " \"use_sse=kmsid:\". You can use \"k\" for short \"kmsid\".\n" + " If you san specify SSE-KMS type with your in AWS\n" + " KMS, you can set it after \"kmsid:\" (or \"k:\"). If you\n" + " specify only \"kmsid\" (\"k\"), you need to set AWSSSEKMSID\n" + " environment which value is . You must be careful\n" + " about that you can not use the KMS id which is not same EC2\n" + " region.\n" + "\n" + " load_sse_c - specify SSE-C keys\n" + " Specify the custom-provided encryption keys file path for decrypting\n" + " at downloading.\n" + " If you use the custom-provided encryption key at uploading, you\n" + " specify with \"use_sse=custom\". The file has many lines, one line\n" + " means one custom key. So that you can keep all SSE-C keys in file,\n" + " that is SSE-C key history. AWSSSECKEYS environment is as same as this\n" + " file contents.\n" + "\n" + " public_bucket (default=\"\" which means disabled)\n" + " - anonymously mount a public bucket when set to 1, ignores the \n" + " $HOME/.passwd-s3fs and /etc/passwd-s3fs files.\n" + " S3 does not allow copy object api for anonymous users, then\n" + " s3fs sets nocopyapi option automatically when public_bucket=1\n" + " option is specified.\n" + "\n" + " passwd_file (default=\"\")\n" + " - specify which s3fs password file to use\n" + "\n" + " ahbe_conf (default=\"\" which means disabled)\n" + " - This option specifies the configuration file path which\n" + " file is the additional HTTP header by file (object) extension.\n" + " The configuration file format is below:\n" + " -----------\n" + " line = [file suffix or regex] HTTP-header [HTTP-values]\n" + " file suffix = file (object) suffix, if this field is empty,\n" + " it means \"reg:(.*)\".(=all object).\n" + " regex = regular expression to match the file (object) path.\n" + " this type starts with \"reg:\" prefix.\n" + " HTTP-header = additional HTTP header name\n" + " HTTP-values = additional HTTP header value\n" + " -----------\n" + " Sample:\n" + " -----------\n" + " .gz Content-Encoding gzip\n" + " .Z Content-Encoding compress\n" + " reg:^/MYDIR/(.*)[.]t2$ Content-Encoding text2\n" + " -----------\n" + " A sample configuration file is uploaded in \"test\" directory.\n" + " If you specify this option for set \"Content-Encoding\" HTTP \n" + " header, please take care for RFC 2616.\n" + "\n" + " profile (default=\"default\")\n" + " - Choose a profile from ${HOME}/.aws/credentials to authenticate\n" + " against S3. Note that this format matches the AWS CLI format and\n" + " differs from the s3fs passwd format.\n" + "\n" + " connect_timeout (default=\"300\" seconds)\n" + " - time to wait for connection before giving up\n" + "\n" + " readwrite_timeout (default=\"120\" seconds)\n" + " - time to wait between read/write activity before giving up\n" + "\n" + " list_object_max_keys (default=\"1000\")\n" + " - specify the maximum number of keys returned by S3 list object\n" + " API. The default is 1000. you can set this value to 1000 or more.\n" + "\n" + " max_stat_cache_size (default=\"100,000\" entries (about 40MB))\n" + " - maximum number of entries in the stat cache, and this maximum is\n" + " also treated as the number of symbolic link cache.\n" + "\n" + " stat_cache_expire (default is 900))\n" + " - specify expire time (seconds) for entries in the stat cache.\n" + " This expire time indicates the time since stat cached. and this\n" + " is also set to the expire time of the symbolic link cache.\n" + "\n" + " stat_cache_interval_expire (default is 900)\n" + " - specify expire time (seconds) for entries in the stat cache(and\n" + " symbolic link cache).\n" + " This expire time is based on the time from the last access time\n" + " of the stat cache. This option is exclusive with stat_cache_expire,\n" + " and is left for compatibility with older versions.\n" + "\n" + " enable_noobj_cache (default is disable)\n" + " - enable cache entries for the object which does not exist.\n" + " s3fs always has to check whether file (or sub directory) exists \n" + " under object (path) when s3fs does some command, since s3fs has \n" + " recognized a directory which does not exist and has files or \n" + " sub directories under itself. It increases ListBucket request \n" + " and makes performance bad.\n" + " You can specify this option for performance, s3fs memorizes \n" + " in stat cache that the object (file or directory) does not exist.\n" + "\n" + " no_check_certificate\n" + " - server certificate won't be checked against the available \n" + " certificate authorities.\n" + "\n" + " ssl_verify_hostname (default=\"2\")\n" + " - When 0, do not verify the SSL certificate against the hostname.\n" + "\n" + " nodnscache (disable DNS cache)\n" + " - s3fs is always using DNS cache, this option make DNS cache disable.\n" + "\n" + " nosscache (disable SSL session cache)\n" + " - s3fs is always using SSL session cache, this option make SSL \n" + " session cache disable.\n" + "\n" + " multireq_max (default=\"20\")\n" + " - maximum number of parallel request for listing objects.\n" + "\n" + " parallel_count (default=\"5\")\n" + " - number of parallel request for uploading big objects.\n" + " s3fs uploads large object (over 20MB) by multipart post request, \n" + " and sends parallel requests.\n" + " This option limits parallel request count which s3fs requests \n" + " at once. It is necessary to set this value depending on a CPU \n" + " and a network band.\n" + "\n" + " multipart_size (default=\"10\")\n" + " - part size, in MB, for each multipart request.\n" + " The minimum value is 5 MB and the maximum value is 5 GB.\n" + "\n" + " ensure_diskfree (default 0)\n" + " - sets MB to ensure disk free space. This option means the\n" + " threshold of free space size on disk which is used for the\n" + " cache file by s3fs. s3fs makes file for\n" + " downloading, uploading and caching files. If the disk free\n" + " space is smaller than this value, s3fs do not use diskspace\n" + " as possible in exchange for the performance.\n" + "\n" + " singlepart_copy_limit (default=\"512\")\n" + " - maximum size, in MB, of a single-part copy before trying \n" + " multipart copy.\n" + "\n" + " host (default=\"https://s3.amazonaws.com\")\n" + " - Set a non-Amazon host, e.g., https://example.com.\n" + "\n" + " servicepath (default=\"/\")\n" + " - Set a service path when the non-Amazon host requires a prefix.\n" + "\n" + " url (default=\"https://s3.amazonaws.com\")\n" + " - sets the url to use to access Amazon S3. If you want to use HTTP,\n" + " then you can set \"url=http://s3.amazonaws.com\".\n" + " If you do not use https, please specify the URL with the url\n" + " option.\n" + "\n" + " endpoint (default=\"us-east-1\")\n" + " - sets the endpoint to use on signature version 4\n" + " If this option is not specified, s3fs uses \"us-east-1\" region as\n" + " the default. If the s3fs could not connect to the region specified\n" + " by this option, s3fs could not run. But if you do not specify this\n" + " option, and if you can not connect with the default region, s3fs\n" + " will retry to automatically connect to the other region. So s3fs\n" + " can know the correct region name, because s3fs can find it in an\n" + " error from the S3 server.\n" + "\n" + " sigv2 (default is signature version 4)\n" + " - sets signing AWS requests by using Signature Version 2\n" + "\n" + " mp_umask (default is \"0000\")\n" + " - sets umask for the mount point directory.\n" + " If allow_other option is not set, s3fs allows access to the mount\n" + " point only to the owner. In the opposite case s3fs allows access\n" + " to all users as the default. But if you set the allow_other with\n" + " this option, you can control the permissions of the\n" + " mount point by this option like umask.\n" + "\n" + " umask (default is \"0000\")\n" + " - sets umask for files under the mountpoint. This can allow\n" + " users other than the mounting user to read and write to files\n" + " that they did not create.\n" + "\n" + " nomultipart (disable multipart uploads)\n" + "\n" + " enable_content_md5 (default is disable)\n" + " Allow S3 server to check data integrity of uploads via the\n" + " Content-MD5 header. This can add CPU overhead to transfers.\n" + "\n" + " ecs (default is disable)\n" + " - This option instructs s3fs to query the ECS container credential\n" + " metadata address instead of the instance metadata address.\n" + "\n" + " iam_role (default is no IAM role)\n" + " - This option requires the IAM role name or \"auto\". If you specify\n" + " \"auto\", s3fs will automatically use the IAM role names that are set\n" + " to an instance. If you specify this option without any argument, it\n" + " is the same as that you have specified the \"auto\".\n" + "\n" + " ibm_iam_auth (default is not using IBM IAM authentication)\n" + " - This option instructs s3fs to use IBM IAM authentication.\n" + " In this mode, the AWSAccessKey and AWSSecretKey will be used as\n" + " IBM's Service-Instance-ID and APIKey, respectively.\n" + "\n" + " ibm_iam_endpoint (default is https://iam.bluemix.net)\n" + " - sets the URL to use for IBM IAM authentication.\n" + "\n" + " use_xattr (default is not handling the extended attribute)\n" + " Enable to handle the extended attribute (xattrs).\n" + " If you set this option, you can use the extended attribute.\n" + " For example, encfs and ecryptfs need to support the extended attribute.\n" + " Notice: if s3fs handles the extended attribute, s3fs can not work to\n" + " copy command with preserve=mode.\n" + "\n" + " noxmlns (disable registering xml name space)\n" + " disable registering xml name space for response of \n" + " ListBucketResult and ListVersionsResult etc. Default name \n" + " space is looked up from \"http://s3.amazonaws.com/doc/2006-03-01\".\n" + " This option should not be specified now, because s3fs looks up\n" + " xmlns automatically after v1.66.\n" + "\n" + " nomixupload (disable copy in multipart uploads)\n" + " Disable to use PUT (copy api) when multipart uploading large size objects.\n" + " By default, when doing multipart upload, the range of unchanged data\n" + " will use PUT (copy api) whenever possible.\n" + " When nocopyapi or norenameapi is specified, use of PUT (copy api) is\n" + " invalidated even if this option is not specified.\n" + "\n" + " nocopyapi (for other incomplete compatibility object storage)\n" + " For a distributed object storage which is compatibility S3\n" + " API without PUT (copy api).\n" + " If you set this option, s3fs do not use PUT with \n" + " \"x-amz-copy-source\" (copy api). Because traffic is increased\n" + " 2-3 times by this option, we do not recommend this.\n" + "\n" + " norenameapi (for other incomplete compatibility object storage)\n" + " For a distributed object storage which is compatibility S3\n" + " API without PUT (copy api).\n" + " This option is a subset of nocopyapi option. The nocopyapi\n" + " option does not use copy-api for all command (ex. chmod, chown,\n" + " touch, mv, etc), but this option does not use copy-api for\n" + " only rename command (ex. mv). If this option is specified with\n" + " nocopyapi, then s3fs ignores it.\n" + "\n" + " use_path_request_style (use legacy API calling style)\n" + " Enable compatibility with S3-like APIs which do not support\n" + " the virtual-host request style, by using the older path request\n" + " style.\n" + "\n" + " noua (suppress User-Agent header)\n" + " Usually s3fs outputs of the User-Agent in \"s3fs/ (commit\n" + " hash ; )\" format.\n" + " If this option is specified, s3fs suppresses the output of the\n" + " User-Agent.\n" + "\n" + " cipher_suites\n" + " Customize the list of TLS cipher suites.\n" + " Expects a colon separated list of cipher suite names.\n" + " A list of available cipher suites, depending on your TLS engine,\n" + " can be found on the CURL library documentation:\n" + " https://curl.haxx.se/docs/ssl-ciphers.html\n" + "\n" + " instance_name - The instance name of the current s3fs mountpoint.\n" + " This name will be added to logging messages and user agent headers sent by s3fs.\n" + "\n" + " complement_stat (complement lack of file/directory mode)\n" + " s3fs complements lack of information about file/directory mode\n" + " if a file or a directory object does not have x-amz-meta-mode\n" + " header. As default, s3fs does not complements stat information\n" + " for a object, then the object will not be able to be allowed to\n" + " list/modify.\n" + "\n" + " notsup_compat_dir (not support compatibility directory types)\n" + " As a default, s3fs supports objects of the directory type as\n" + " much as possible and recognizes them as directories.\n" + " Objects that can be recognized as directory objects are \"dir/\",\n" + " \"dir\", \"dir_$folder$\", and there is a file object that does\n" + " not have a directory object but contains that directory path.\n" + " s3fs needs redundant communication to support all these\n" + " directory types. The object as the directory created by s3fs\n" + " is \"dir/\". By restricting s3fs to recognize only \"dir/\" as\n" + " a directory, communication traffic can be reduced. This option\n" + " is used to give this restriction to s3fs.\n" + " However, if there is a directory object other than \"dir/\" in\n" + " the bucket, specifying this option is not recommended. s3fs may\n" + " not be able to recognize the object correctly if an object\n" + " created by s3fs exists in the bucket.\n" + " Please use this option when the directory in the bucket is\n" + " only \"dir/\" object.\n" + "\n" + " use_wtf8 - support arbitrary file system encoding.\n" + " S3 requires all object names to be valid UTF-8. But some\n" + " clients, notably Windows NFS clients, use their own encoding.\n" + " This option re-encodes invalid UTF-8 object names into valid\n" + " UTF-8 by mapping offending codes into a 'private' codepage of the\n" + " Unicode set.\n" + " Useful on clients not using UTF-8 as their file system encoding.\n" + "\n" + " use_session_token - indicate that session token should be provided.\n" + " If credentials are provided by environment variables this switch\n" + " forces presence check of AWSSESSIONTOKEN variable.\n" + " Otherwise an error is returned.\n" + "\n" + " requester_pays (default is disable)\n" + " This option instructs s3fs to enable requests involving\n" + " Requester Pays buckets.\n" + " It includes the 'x-amz-request-payer=requester' entry in the\n" + " request header.\n" + "\n" + " mime (default is \"/etc/mime.types\")\n" + " Specify the path of the mime.types file.\n" + " If this option is not specified, the existence of \"/etc/mime.types\"\n" + " is checked, and that file is loaded as mime information.\n" + " If this file does not exist on macOS, then \"/etc/apache2/mime.types\"\n" + " is checked as well.\n" + "\n" + " dbglevel (default=\"crit\")\n" + " Set the debug message level. set value as crit (critical), err\n" + " (error), warn (warning), info (information) to debug level.\n" + " default debug level is critical. If s3fs run with \"-d\" option,\n" + " the debug level is set information. When s3fs catch the signal\n" + " SIGUSR2, the debug level is bumpup.\n" + "\n" + " curldbg - put curl debug message\n" + " Put the debug message from libcurl when this option is specified.\n" + " Specify \"normal\" or \"body\" for the parameter.\n" + " If the parameter is omitted, it is the same as \"normal\".\n" + " If \"body\" is specified, some API communication body data will be\n" + " output in addition to the debug message output as \"normal\".\n" + "\n" + " set_check_cache_sigusr1 (default is stdout)\n" + " If the cache is enabled, you can check the integrity of the\n" + " cache file and the cache file's stats info file.\n" + " This option is specified and when sending the SIGUSR1 signal\n" + " to the s3fs process checks the cache status at that time.\n" + " This option can take a file path as parameter to output the\n" + " check result to that file. The file path parameter can be omitted.\n" + " If omitted, the result will be output to stdout or syslog.\n" + "\n" + "FUSE/mount Options:\n" + "\n" + " Most of the generic mount options described in 'man mount' are\n" + " supported (ro, rw, suid, nosuid, dev, nodev, exec, noexec, atime,\n" + " noatime, sync async, dirsync). Filesystems are mounted with\n" + " '-onodev,nosuid' by default, which can only be overridden by a\n" + " privileged user.\n" + " \n" + " There are many FUSE specific mount options that can be specified.\n" + " e.g. allow_other See the FUSE's README for the full set.\n" + "\n" + "Utility mode Options:\n" + "\n" + " -u, --incomplete-mpu-list\n" + " Lists multipart incomplete objects uploaded to the specified\n" + " bucket.\n" + " --incomplete-mpu-abort (=all or =)\n" + " Delete the multipart incomplete object uploaded to the specified\n" + " bucket.\n" + " If \"all\" is specified for this option, all multipart incomplete\n" + " objects will be deleted. If you specify no argument as an option,\n" + " objects older than 24 hours (24H) will be deleted (This is the\n" + " default value). You can specify an optional date format. It can\n" + " be specified as year, month, day, hour, minute, second, and it is\n" + " expressed as \"Y\", \"M\", \"D\", \"h\", \"m\", \"s\" respectively.\n" + " For example, \"1Y6M10D12h30m30s\".\n" + "\n" + "Miscellaneous Options:\n" + "\n" + " -h, --help Output this help.\n" + " --version Output version info.\n" + " -d --debug Turn on DEBUG messages to syslog. Specifying -d\n" + " twice turns on FUSE debug messages to STDOUT.\n" + " -f FUSE foreground option - do not run as daemon.\n" + " -s FUSE single-threaded option\n" + " disable multi-threaded operation\n" + "\n" + "\n" + "s3fs home page: \n" + ; + +//------------------------------------------------------------------- +// Functions +//------------------------------------------------------------------- +void show_usage() +{ + printf("Usage: %s BUCKET:[PATH] MOUNTPOINT [OPTION]...\n", program_name.c_str()); +} + +void show_help() +{ + show_usage(); + printf(help_string); +} + +void show_version() +{ + printf( + "Amazon Simple Storage Service File System V%s (commit:%s) with %s\n" + "Copyright (C) 2010 Randy Rizun \n" + "License GPL2: GNU GPL version 2 \n" + "This is free software: you are free to change and redistribute it.\n" + "There is NO WARRANTY, to the extent permitted by law.\n", + VERSION, COMMIT_HASH_VAL, s3fs_crypt_lib_name()); +} + +/* +* Local variables: +* tab-width: 4 +* c-basic-offset: 4 +* End: +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 +*/ diff --git a/src/s3fs_help.h b/src/s3fs_help.h new file mode 100644 index 0000000..844603f --- /dev/null +++ b/src/s3fs_help.h @@ -0,0 +1,40 @@ +/* + * s3fs - FUSE-based file system backed by Amazon S3 + * + * Copyright(C) 2007 Randy Rizun + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef S3FS_S3FS_HELP_H_ +#define S3FS_S3FS_HELP_H_ + +//------------------------------------------------------------------- +// Functions +//------------------------------------------------------------------- +void show_usage(void); +void show_help(void); +void show_version(void); + +#endif // S3FS_S3FS_HELP_H_ + +/* +* Local variables: +* tab-width: 4 +* c-basic-offset: 4 +* End: +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 +*/ diff --git a/src/s3fs_logger.cpp b/src/s3fs_logger.cpp new file mode 100644 index 0000000..b520911 --- /dev/null +++ b/src/s3fs_logger.cpp @@ -0,0 +1,36 @@ +/* + * s3fs - FUSE-based file system backed by Amazon S3 + * + * Copyright(C) 2007 Randy Rizun + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "s3fs_logger.h" + +//------------------------------------------------------------------- +// Global variables +//------------------------------------------------------------------- +s3fs_log_level debug_level = S3FS_LOG_CRIT; +const char* s3fs_log_nest[S3FS_LOG_NEST_MAX] = {"", " ", " ", " "}; + +/* +* Local variables: +* tab-width: 4 +* c-basic-offset: 4 +* End: +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 +*/ diff --git a/src/s3fs_logger.h b/src/s3fs_logger.h new file mode 100644 index 0000000..a887376 --- /dev/null +++ b/src/s3fs_logger.h @@ -0,0 +1,154 @@ +/* + * s3fs - FUSE-based file system backed by Amazon S3 + * + * Copyright(C) 2007 Randy Rizun + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef S3FS_LOGGER_H_ +#define S3FS_LOGGER_H_ + +#include + +//------------------------------------------------------------------- +// Debug level +//------------------------------------------------------------------- +enum s3fs_log_level{ + S3FS_LOG_CRIT = 0, // LOG_CRIT + S3FS_LOG_ERR = 1, // LOG_ERR + S3FS_LOG_WARN = 3, // LOG_WARNING + S3FS_LOG_INFO = 7, // LOG_INFO + S3FS_LOG_DBG = 15 // LOG_DEBUG +}; + +//------------------------------------------------------------------- +// Debug macros +//------------------------------------------------------------------- +#define IS_S3FS_LOG_CRIT() (S3FS_LOG_CRIT == debug_level) +#define IS_S3FS_LOG_ERR() (S3FS_LOG_ERR == (debug_level & S3FS_LOG_DBG)) +#define IS_S3FS_LOG_WARN() (S3FS_LOG_WARN == (debug_level & S3FS_LOG_DBG)) +#define IS_S3FS_LOG_INFO() (S3FS_LOG_INFO == (debug_level & S3FS_LOG_DBG)) +#define IS_S3FS_LOG_DBG() (S3FS_LOG_DBG == (debug_level & S3FS_LOG_DBG)) + +#define S3FS_LOG_LEVEL_TO_SYSLOG(level) \ + ( S3FS_LOG_DBG == (level & S3FS_LOG_DBG) ? LOG_DEBUG : \ + S3FS_LOG_INFO == (level & S3FS_LOG_DBG) ? LOG_INFO : \ + S3FS_LOG_WARN == (level & S3FS_LOG_DBG) ? LOG_WARNING : \ + S3FS_LOG_ERR == (level & S3FS_LOG_DBG) ? LOG_ERR : LOG_CRIT ) + +#define S3FS_LOG_LEVEL_STRING(level) \ + ( S3FS_LOG_DBG == (level & S3FS_LOG_DBG) ? "[DBG] " : \ + S3FS_LOG_INFO == (level & S3FS_LOG_DBG) ? "[INF] " : \ + S3FS_LOG_WARN == (level & S3FS_LOG_DBG) ? "[WAN] " : \ + S3FS_LOG_ERR == (level & S3FS_LOG_DBG) ? "[ERR] " : "[CRT] " ) + +#define S3FS_LOG_NEST_MAX 4 +#define S3FS_LOG_NEST(nest) (nest < S3FS_LOG_NEST_MAX ? s3fs_log_nest[nest] : s3fs_log_nest[S3FS_LOG_NEST_MAX - 1]) + +#define S3FS_LOW_LOGPRN(level, fmt, ...) \ + do{ \ + if(S3FS_LOG_CRIT == level || (S3FS_LOG_CRIT != debug_level && level == (debug_level & level))){ \ + if(foreground){ \ + fprintf(stdout, "%s%s:%s(%d): " fmt "%s\n", S3FS_LOG_LEVEL_STRING(level), __FILE__, __func__, __LINE__, __VA_ARGS__); \ + }else{ \ + syslog(S3FS_LOG_LEVEL_TO_SYSLOG(level), "%s%s:%s(%d): " fmt "%s", instance_name.c_str(), __FILE__, __func__, __LINE__, __VA_ARGS__); \ + } \ + } \ + }while(0) + +#define S3FS_LOW_LOGPRN2(level, nest, fmt, ...) \ + do{ \ + if(S3FS_LOG_CRIT == level || (S3FS_LOG_CRIT != debug_level && level == (debug_level & level))){ \ + if(foreground){ \ + fprintf(stdout, "%s%s%s:%s(%d): " fmt "%s\n", S3FS_LOG_LEVEL_STRING(level), S3FS_LOG_NEST(nest), __FILE__, __func__, __LINE__, __VA_ARGS__); \ + }else{ \ + syslog(S3FS_LOG_LEVEL_TO_SYSLOG(level), "%s%s" fmt "%s", instance_name.c_str(), S3FS_LOG_NEST(nest), __VA_ARGS__); \ + } \ + } \ + }while(0) + +#define S3FS_LOW_CURLDBG(fmt, ...) \ + do{ \ + if(foreground){ \ + fprintf(stdout, "[CURL DBG] " fmt "%s\n", __VA_ARGS__); \ + }else{ \ + syslog(S3FS_LOG_LEVEL_TO_SYSLOG(S3FS_LOG_CRIT), "%s" fmt "%s", instance_name.c_str(), __VA_ARGS__); \ + } \ + }while(0) + +#define S3FS_LOW_LOGPRN_EXIT(fmt, ...) \ + do{ \ + if(foreground){ \ + fprintf(stderr, "s3fs: " fmt "%s\n", __VA_ARGS__); \ + }else{ \ + fprintf(stderr, "s3fs: " fmt "%s\n", __VA_ARGS__); \ + syslog(S3FS_LOG_LEVEL_TO_SYSLOG(S3FS_LOG_CRIT), "%ss3fs: " fmt "%s", instance_name.c_str(), __VA_ARGS__); \ + } \ + }while(0) + +// Special macro for init message +#define S3FS_PRN_INIT_INFO(fmt, ...) \ + do{ \ + if(foreground){ \ + fprintf(stdout, "%s%s%s:%s(%d): " fmt "%s\n", S3FS_LOG_LEVEL_STRING(S3FS_LOG_INFO), S3FS_LOG_NEST(0), __FILE__, __func__, __LINE__, __VA_ARGS__, ""); \ + }else{ \ + syslog(S3FS_LOG_LEVEL_TO_SYSLOG(S3FS_LOG_INFO), "%s%s" fmt "%s", instance_name.c_str(), S3FS_LOG_NEST(0), __VA_ARGS__, ""); \ + } \ + }while(0) + +// Special macro for checking cache files +#define S3FS_LOW_CACHE(fp, fmt, ...) \ + do{ \ + if(foreground){ \ + fprintf(fp, fmt "%s\n", __VA_ARGS__); \ + }else{ \ + syslog(S3FS_LOG_LEVEL_TO_SYSLOG(S3FS_LOG_INFO), "%s: " fmt "%s", instance_name.c_str(), __VA_ARGS__); \ + } \ + }while(0) + +// [NOTE] +// small trick for VA_ARGS +// +#define S3FS_PRN_EXIT(fmt, ...) S3FS_LOW_LOGPRN_EXIT(fmt, ##__VA_ARGS__, "") +#define S3FS_PRN_CRIT(fmt, ...) S3FS_LOW_LOGPRN(S3FS_LOG_CRIT, fmt, ##__VA_ARGS__, "") +#define S3FS_PRN_ERR(fmt, ...) S3FS_LOW_LOGPRN(S3FS_LOG_ERR, fmt, ##__VA_ARGS__, "") +#define S3FS_PRN_WARN(fmt, ...) S3FS_LOW_LOGPRN(S3FS_LOG_WARN, fmt, ##__VA_ARGS__, "") +#define S3FS_PRN_DBG(fmt, ...) S3FS_LOW_LOGPRN(S3FS_LOG_DBG, fmt, ##__VA_ARGS__, "") +#define S3FS_PRN_INFO(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_INFO, 0, fmt, ##__VA_ARGS__, "") +#define S3FS_PRN_INFO0(fmt, ...) S3FS_LOG_INFO(fmt, __VA_ARGS__) +#define S3FS_PRN_INFO1(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_INFO, 1, fmt, ##__VA_ARGS__, "") +#define S3FS_PRN_INFO2(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_INFO, 2, fmt, ##__VA_ARGS__, "") +#define S3FS_PRN_INFO3(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_INFO, 3, fmt, ##__VA_ARGS__, "") +#define S3FS_PRN_CURL(fmt, ...) S3FS_LOW_CURLDBG(fmt, ##__VA_ARGS__, "") +#define S3FS_PRN_CACHE(fp, ...) S3FS_LOW_CACHE(fp, ##__VA_ARGS__, "") + +//------------------------------------------------------------------- +// Global variables +//------------------------------------------------------------------- +// TODO: namespace these +extern s3fs_log_level debug_level; +extern const char* s3fs_log_nest[S3FS_LOG_NEST_MAX]; + +#endif // S3FS_LOGGER_H_ + +/* +* Local variables: +* tab-width: 4 +* c-basic-offset: 4 +* End: +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 +*/ diff --git a/src/s3fs_util.cpp b/src/s3fs_util.cpp index 2a281b2..7ccaa78 100644 --- a/src/s3fs_util.cpp +++ b/src/s3fs_util.cpp @@ -20,32 +20,22 @@ #include #include -#include #include #include -#include -#include -#include #include -#include -#include +#include +#include +#include #include #include -#include -#include -#include -#include #include #include -#include -#include #include "common.h" +#include "s3fs.h" #include "s3fs_util.h" #include "string_util.h" -#include "s3fs.h" -#include "s3fs_auth.h" using namespace std; @@ -58,442 +48,43 @@ static size_t max_password_size; static size_t max_group_name_length; //------------------------------------------------------------------- -// Utility +// Utilities //------------------------------------------------------------------- -string get_realpath(const char *path) { - string realpath = mount_prefix; - realpath += path; - - return realpath; -} - -//------------------------------------------------------------------- -// Class S3ObjList -//------------------------------------------------------------------- -// New class S3ObjList is base on old s3_object struct. -// This class is for S3 compatible clients. -// -// If name is terminated by "/", it is forced dir type. -// If name is terminated by "_$folder$", it is forced dir type. -// If is_dir is true and name is not terminated by "/", the name is added "/". -// -bool S3ObjList::insert(const char* name, const char* etag, bool is_dir) +string get_realpath(const char *path) { - if(!name || '\0' == name[0]){ - return false; - } + string realpath = mount_prefix; + realpath += path; - s3obj_t::iterator iter; - string newname; - string orgname = name; - - // Normalization - string::size_type pos = orgname.find("_$folder$"); - if(string::npos != pos){ - newname = orgname.substr(0, pos); - is_dir = true; - }else{ - newname = orgname; - } - if(is_dir){ - if('/' != newname[newname.length() - 1]){ - newname += "/"; - } - }else{ - if('/' == newname[newname.length() - 1]){ - is_dir = true; - } - } - - // Check derived name object. - if(is_dir){ - string chkname = newname.substr(0, newname.length() - 1); - if(objects.end() != (iter = objects.find(chkname))){ - // found "dir" object --> remove it. - objects.erase(iter); - } - }else{ - string chkname = newname + "/"; - if(objects.end() != (iter = objects.find(chkname))){ - // found "dir/" object --> not add new object. - // and add normalization - return insert_normalized(orgname.c_str(), chkname.c_str(), true); - } - } - - // Add object - if(objects.end() != (iter = objects.find(newname))){ - // Found same object --> update information. - (*iter).second.normalname.erase(); - (*iter).second.orgname = orgname; - (*iter).second.is_dir = is_dir; - if(etag){ - (*iter).second.etag = string(etag); // over write - } - }else{ - // add new object - s3obj_entry newobject; - newobject.orgname = orgname; - newobject.is_dir = is_dir; - if(etag){ - newobject.etag = etag; - } - objects[newname] = newobject; - } - - // add normalization - return insert_normalized(orgname.c_str(), newname.c_str(), is_dir); -} - -bool S3ObjList::insert_normalized(const char* name, const char* normalized, bool is_dir) -{ - if(!name || '\0' == name[0] || !normalized || '\0' == normalized[0]){ - return false; - } - if(0 == strcmp(name, normalized)){ - return true; - } - - s3obj_t::iterator iter; - if(objects.end() != (iter = objects.find(name))){ - // found name --> over write - iter->second.orgname.erase(); - iter->second.etag.erase(); - iter->second.normalname = normalized; - iter->second.is_dir = is_dir; - }else{ - // not found --> add new object - s3obj_entry newobject; - newobject.normalname = normalized; - newobject.is_dir = is_dir; - objects[name] = newobject; - } - return true; -} - -const s3obj_entry* S3ObjList::GetS3Obj(const char* name) const -{ - s3obj_t::const_iterator iter; - - if(!name || '\0' == name[0]){ - return NULL; - } - if(objects.end() == (iter = objects.find(name))){ - return NULL; - } - return &((*iter).second); -} - -string S3ObjList::GetOrgName(const char* name) const -{ - const s3obj_entry* ps3obj; - - if(!name || '\0' == name[0]){ - return string(""); - } - if(NULL == (ps3obj = GetS3Obj(name))){ - return string(""); - } - return ps3obj->orgname; -} - -string S3ObjList::GetNormalizedName(const char* name) const -{ - const s3obj_entry* ps3obj; - - if(!name || '\0' == name[0]){ - return string(""); - } - if(NULL == (ps3obj = GetS3Obj(name))){ - return string(""); - } - if(0 == (ps3obj->normalname).length()){ - return string(name); - } - return ps3obj->normalname; -} - -string S3ObjList::GetETag(const char* name) const -{ - const s3obj_entry* ps3obj; - - if(!name || '\0' == name[0]){ - return string(""); - } - if(NULL == (ps3obj = GetS3Obj(name))){ - return string(""); - } - return ps3obj->etag; -} - -bool S3ObjList::IsDir(const char* name) const -{ - const s3obj_entry* ps3obj; - - if(NULL == (ps3obj = GetS3Obj(name))){ - return false; - } - return ps3obj->is_dir; -} - -bool S3ObjList::GetLastName(std::string& lastname) const -{ - bool result = false; - lastname = ""; - for(s3obj_t::const_iterator iter = objects.begin(); iter != objects.end(); ++iter){ - if((*iter).second.orgname.length()){ - if(0 > strcmp(lastname.c_str(), (*iter).second.orgname.c_str())){ - lastname = (*iter).second.orgname; - result = true; - } - }else{ - if(0 > strcmp(lastname.c_str(), (*iter).second.normalname.c_str())){ - lastname = (*iter).second.normalname; - result = true; - } - } - } - return result; -} - -bool S3ObjList::GetNameList(s3obj_list_t& list, bool OnlyNormalized, bool CutSlash) const -{ - s3obj_t::const_iterator iter; - - for(iter = objects.begin(); objects.end() != iter; ++iter){ - if(OnlyNormalized && 0 != (*iter).second.normalname.length()){ - continue; - } - string name = (*iter).first; - if(CutSlash && 1 < name.length() && '/' == name[name.length() - 1]){ - // only "/" string is skipped this. - name = name.substr(0, name.length() - 1); - } - list.push_back(name); - } - return true; -} - -typedef std::map s3obj_h_t; - -bool S3ObjList::MakeHierarchizedList(s3obj_list_t& list, bool haveSlash) -{ - s3obj_h_t h_map; - s3obj_h_t::iterator hiter; - s3obj_list_t::const_iterator liter; - - for(liter = list.begin(); list.end() != liter; ++liter){ - string strtmp = (*liter); - if(1 < strtmp.length() && '/' == strtmp[strtmp.length() - 1]){ - strtmp = strtmp.substr(0, strtmp.length() - 1); - } - h_map[strtmp] = true; - - // check hierarchized directory - for(string::size_type pos = strtmp.find_last_of('/'); string::npos != pos; pos = strtmp.find_last_of('/')){ - strtmp = strtmp.substr(0, pos); - if(0 == strtmp.length() || "/" == strtmp){ - break; - } - if(h_map.end() == h_map.find(strtmp)){ - // not found - h_map[strtmp] = false; - } - } - } - - // check map and add lost hierarchized directory. - for(hiter = h_map.begin(); hiter != h_map.end(); ++hiter){ - if(false == (*hiter).second){ - // add hierarchized directory. - string strtmp = (*hiter).first; - if(haveSlash){ - strtmp += "/"; - } - list.push_back(strtmp); - } - } - return true; -} - -//------------------------------------------------------------------- -// Utility functions for moving objects -//------------------------------------------------------------------- -MVNODE *create_mvnode(const char *old_path, const char *new_path, bool is_dir, bool normdir) -{ - MVNODE *p; - char *p_old_path; - char *p_new_path; - - p = new MVNODE(); - - if(NULL == (p_old_path = strdup(old_path))){ - delete p; - printf("create_mvnode: could not allocation memory for p_old_path\n"); - S3FS_FUSE_EXIT(); - return NULL; - } - - if(NULL == (p_new_path = strdup(new_path))){ - delete p; - free(p_old_path); - printf("create_mvnode: could not allocation memory for p_new_path\n"); - S3FS_FUSE_EXIT(); - return NULL; - } - - p->old_path = p_old_path; - p->new_path = p_new_path; - p->is_dir = is_dir; - p->is_normdir = normdir; - p->prev = NULL; - p->next = NULL; - return p; -} - -// -// Add sorted MVNODE data(Ascending order) -// -MVNODE *add_mvnode(MVNODE** head, MVNODE** tail, const char *old_path, const char *new_path, bool is_dir, bool normdir) -{ - if(!head || !tail){ - return NULL; - } - - MVNODE* cur; - MVNODE* mvnew; - for(cur = *head; cur; cur = cur->next){ - if(cur->is_dir == is_dir){ - int nResult = strcmp(cur->old_path, old_path); - if(0 == nResult){ - // Found same old_path. - return cur; - - }else if(0 > nResult){ - // next check. - // ex: cur("abc"), mvnew("abcd") - // ex: cur("abc"), mvnew("abd") - continue; - - }else{ - // Add into before cur-pos. - // ex: cur("abc"), mvnew("ab") - // ex: cur("abc"), mvnew("abb") - if(NULL == (mvnew = create_mvnode(old_path, new_path, is_dir, normdir))){ - return NULL; - } - if(cur->prev){ - (cur->prev)->next = mvnew; - }else{ - *head = mvnew; - } - mvnew->prev = cur->prev; - mvnew->next = cur; - cur->prev = mvnew; - - return mvnew; - } - } - } - // Add into tail. - if(NULL == (mvnew = create_mvnode(old_path, new_path, is_dir, normdir))){ - return NULL; - } - mvnew->prev = (*tail); - if(*tail){ - (*tail)->next = mvnew; - } - (*tail) = mvnew; - if(!(*head)){ - (*head) = mvnew; - } - return mvnew; -} - -void free_mvnodes(MVNODE *head) -{ - MVNODE *my_head; - MVNODE *next; - - for(my_head = head, next = NULL; my_head; my_head = next){ - next = my_head->next; - free(my_head->old_path); - free(my_head->new_path); - delete my_head; - } -} - -//------------------------------------------------------------------- -// Class AutoLock -//------------------------------------------------------------------- -AutoLock::AutoLock(pthread_mutex_t* pmutex, Type type) : auto_mutex(pmutex) -{ - if (type == ALREADY_LOCKED) { - is_lock_acquired = false; - } else if (type == NO_WAIT) { - int res = pthread_mutex_trylock(auto_mutex); - if(res == 0){ - is_lock_acquired = true; - }else if(res == EBUSY){ - is_lock_acquired = false; - }else{ - S3FS_PRN_CRIT("pthread_mutex_trylock returned: %d", res); - abort(); - } - } else { - int res = pthread_mutex_lock(auto_mutex); - if(res == 0){ - is_lock_acquired = true; - }else{ - S3FS_PRN_CRIT("pthread_mutex_lock returned: %d", res); - abort(); - } - } -} - -bool AutoLock::isLockAcquired() const -{ - return is_lock_acquired; -} - -AutoLock::~AutoLock() -{ - if (is_lock_acquired) { - int res = pthread_mutex_unlock(auto_mutex); - if(res != 0){ - S3FS_PRN_CRIT("pthread_mutex_lock returned: %d", res); - abort(); - } - } + return realpath; } void init_sysconf_vars() { - // SUSv4tc1 says the following about _SC_GETGR_R_SIZE_MAX and - // _SC_GETPW_R_SIZE_MAX: - // Note that sysconf(_SC_GETGR_R_SIZE_MAX) may return -1 if - // there is no hard limit on the size of the buffer needed to - // store all the groups returned. + // SUSv4tc1 says the following about _SC_GETGR_R_SIZE_MAX and + // _SC_GETPW_R_SIZE_MAX: + // Note that sysconf(_SC_GETGR_R_SIZE_MAX) may return -1 if + // there is no hard limit on the size of the buffer needed to + // store all the groups returned. - long res = sysconf(_SC_GETPW_R_SIZE_MAX); - if(0 > res){ - if (errno != 0){ - S3FS_PRN_WARN("could not get max pw length."); - abort(); + long res = sysconf(_SC_GETPW_R_SIZE_MAX); + if(0 > res){ + if (errno != 0){ + S3FS_PRN_WARN("could not get max pw length."); + abort(); + } + res = 1024; // default initial length } - res = 1024; // default initial length - } - max_password_size = res; + max_password_size = res; - res = sysconf(_SC_GETGR_R_SIZE_MAX); - if(0 > res) { - if (errno != 0) { - S3FS_PRN_ERR("could not get max name length."); - abort(); + res = sysconf(_SC_GETGR_R_SIZE_MAX); + if(0 > res) { + if (errno != 0) { + S3FS_PRN_ERR("could not get max name length."); + abort(); + } + res = 1024; // default initial length } - res = 1024; // default initial length - } - max_group_name_length = res; + max_group_name_length = res; } //------------------------------------------------------------------- @@ -502,251 +93,251 @@ void init_sysconf_vars() // get user name from uid string get_username(uid_t uid) { - size_t maxlen = max_password_size; - int result; - char* pbuf; - struct passwd pwinfo; - struct passwd* ppwinfo = NULL; + size_t maxlen = max_password_size; + int result; + char* pbuf; + struct passwd pwinfo; + struct passwd* ppwinfo = NULL; - // make buffer - pbuf = new char[maxlen]; - // get pw information - while(ERANGE == (result = getpwuid_r(uid, &pwinfo, pbuf, maxlen, &ppwinfo))){ - delete[] pbuf; - maxlen *= 2; + // make buffer pbuf = new char[maxlen]; - } + // get pw information + while(ERANGE == (result = getpwuid_r(uid, &pwinfo, pbuf, maxlen, &ppwinfo))){ + delete[] pbuf; + maxlen *= 2; + pbuf = new char[maxlen]; + } - if(0 != result){ - S3FS_PRN_ERR("could not get pw information(%d).", result); - delete[] pbuf; - return string(""); - } + if(0 != result){ + S3FS_PRN_ERR("could not get pw information(%d).", result); + delete[] pbuf; + return string(""); + } - // check pw - if(NULL == ppwinfo){ + // check pw + if(NULL == ppwinfo){ + delete[] pbuf; + return string(""); + } + string name = SAFESTRPTR(ppwinfo->pw_name); delete[] pbuf; - return string(""); - } - string name = SAFESTRPTR(ppwinfo->pw_name); - delete[] pbuf; - return name; + return name; } int is_uid_include_group(uid_t uid, gid_t gid) { - size_t maxlen = max_group_name_length; - int result; - char* pbuf; - struct group ginfo; - struct group* pginfo = NULL; + size_t maxlen = max_group_name_length; + int result; + char* pbuf; + struct group ginfo; + struct group* pginfo = NULL; - // make buffer - pbuf = new char[maxlen]; - // get group information - while(ERANGE == (result = getgrgid_r(gid, &ginfo, pbuf, maxlen, &pginfo))){ - delete[] pbuf; - maxlen *= 2; + // make buffer pbuf = new char[maxlen]; - } - - if(0 != result){ - S3FS_PRN_ERR("could not get group information(%d).", result); - delete[] pbuf; - return -result; - } - - // check group - if(NULL == pginfo){ - // there is not gid in group. - delete[] pbuf; - return -EINVAL; - } - - string username = get_username(uid); - - char** ppgr_mem; - for(ppgr_mem = pginfo->gr_mem; ppgr_mem && *ppgr_mem; ppgr_mem++){ - if(username == *ppgr_mem){ - // Found username in group. - delete[] pbuf; - return 1; + // get group information + while(ERANGE == (result = getgrgid_r(gid, &ginfo, pbuf, maxlen, &pginfo))){ + delete[] pbuf; + maxlen *= 2; + pbuf = new char[maxlen]; } - } - delete[] pbuf; - return 0; + + if(0 != result){ + S3FS_PRN_ERR("could not get group information(%d).", result); + delete[] pbuf; + return -result; + } + + // check group + if(NULL == pginfo){ + // there is not gid in group. + delete[] pbuf; + return -EINVAL; + } + + string username = get_username(uid); + + char** ppgr_mem; + for(ppgr_mem = pginfo->gr_mem; ppgr_mem && *ppgr_mem; ppgr_mem++){ + if(username == *ppgr_mem){ + // Found username in group. + delete[] pbuf; + return 1; + } + } + delete[] pbuf; + return 0; } //------------------------------------------------------------------- // Utility for file and directory //------------------------------------------------------------------- +string mydirname(const string& path) +{ + return string(dirname((char*)path.c_str())); +} + // safe variant of dirname // dirname clobbers path so let it operate on a tmp copy string mydirname(const char* path) { - if(!path || '\0' == path[0]){ - return string(""); - } - return mydirname(string(path)); + if(!path || '\0' == path[0]){ + return string(""); + } + return mydirname(string(path)); } -string mydirname(const string& path) +string mybasename(const string& path) { - return string(dirname((char*)path.c_str())); + return string(basename((char*)path.c_str())); } // safe variant of basename // basename clobbers path so let it operate on a tmp copy string mybasename(const char* path) { - if(!path || '\0' == path[0]){ - return string(""); - } - return mybasename(string(path)); -} - -string mybasename(const string& path) -{ - return string(basename((char*)path.c_str())); + if(!path || '\0' == path[0]){ + return string(""); + } + return mybasename(string(path)); } // mkdir --parents int mkdirp(const string& path, mode_t mode) { - string base; - string component; - istringstream ss(path); - while (getline(ss, component, '/')) { - base += "/" + component; + string base; + string component; + istringstream ss(path); + while (getline(ss, component, '/')) { + base += "/" + component; - struct stat st; - if(0 == stat(base.c_str(), &st)){ - if(!S_ISDIR(st.st_mode)){ - return EPERM; - } - }else{ - if(0 != mkdir(base.c_str(), mode) && errno != EEXIST){ - return errno; - } + struct stat st; + if(0 == stat(base.c_str(), &st)){ + if(!S_ISDIR(st.st_mode)){ + return EPERM; + } + }else{ + if(0 != mkdir(base.c_str(), mode) && errno != EEXIST){ + return errno; + } + } } - } - return 0; + return 0; } // get existed directory path string get_exist_directory_path(const string& path) { - string existed("/"); // "/" is existed. - string base; - string component; - istringstream ss(path); - while (getline(ss, component, '/')) { - if(base != "/"){ - base += "/"; + string existed("/"); // "/" is existed. + string base; + string component; + istringstream ss(path); + while (getline(ss, component, '/')) { + if(base != "/"){ + base += "/"; + } + base += component; + struct stat st; + if(0 == stat(base.c_str(), &st) && S_ISDIR(st.st_mode)){ + existed = base; + }else{ + break; + } } - base += component; - struct stat st; - if(0 == stat(base.c_str(), &st) && S_ISDIR(st.st_mode)){ - existed = base; - }else{ - break; - } - } - return existed; + return existed; } bool check_exist_dir_permission(const char* dirpath) { - if(!dirpath || '\0' == dirpath[0]){ - return false; - } - - // exists - struct stat st; - if(0 != stat(dirpath, &st)){ - if(ENOENT == errno){ - // dir does not exist - return true; - } - if(EACCES == errno){ - // could not access directory - return false; - } - // something error occurred - return false; - } - - // check type - if(!S_ISDIR(st.st_mode)){ - // path is not directory - return false; - } - - // check permission - uid_t myuid = geteuid(); - if(myuid == st.st_uid){ - if(S_IRWXU != (st.st_mode & S_IRWXU)){ - return false; - } - }else{ - if(1 == is_uid_include_group(myuid, st.st_gid)){ - if(S_IRWXG != (st.st_mode & S_IRWXG)){ + if(!dirpath || '\0' == dirpath[0]){ return false; - } + } + + // exists + struct stat st; + if(0 != stat(dirpath, &st)){ + if(ENOENT == errno){ + // dir does not exist + return true; + } + if(EACCES == errno){ + // could not access directory + return false; + } + // something error occurred + return false; + } + + // check type + if(!S_ISDIR(st.st_mode)){ + // path is not directory + return false; + } + + // check permission + uid_t myuid = geteuid(); + if(myuid == st.st_uid){ + if(S_IRWXU != (st.st_mode & S_IRWXU)){ + return false; + } }else{ - if(S_IRWXO != (st.st_mode & S_IRWXO)){ - return false; - } + if(1 == is_uid_include_group(myuid, st.st_gid)){ + if(S_IRWXG != (st.st_mode & S_IRWXG)){ + return false; + } + }else{ + if(S_IRWXO != (st.st_mode & S_IRWXO)){ + return false; + } + } } - } - return true; + return true; } bool delete_files_in_dir(const char* dir, bool is_remove_own) { - DIR* dp; - struct dirent* dent; + DIR* dp; + struct dirent* dent; - if(NULL == (dp = opendir(dir))){ - S3FS_PRN_ERR("could not open dir(%s) - errno(%d)", dir, errno); - return false; - } - - for(dent = readdir(dp); dent; dent = readdir(dp)){ - if(0 == strcmp(dent->d_name, "..") || 0 == strcmp(dent->d_name, ".")){ - continue; - } - string fullpath = dir; - fullpath += "/"; - fullpath += dent->d_name; - struct stat st; - if(0 != lstat(fullpath.c_str(), &st)){ - S3FS_PRN_ERR("could not get stats of file(%s) - errno(%d)", fullpath.c_str(), errno); - closedir(dp); - return false; - } - if(S_ISDIR(st.st_mode)){ - // dir -> Reentrant - if(!delete_files_in_dir(fullpath.c_str(), true)){ - S3FS_PRN_ERR("could not remove sub dir(%s) - errno(%d)", fullpath.c_str(), errno); - closedir(dp); + if(NULL == (dp = opendir(dir))){ + S3FS_PRN_ERR("could not open dir(%s) - errno(%d)", dir, errno); return false; - } - }else{ - if(0 != unlink(fullpath.c_str())){ - S3FS_PRN_ERR("could not remove file(%s) - errno(%d)", fullpath.c_str(), errno); - closedir(dp); - return false; - } } - } - closedir(dp); - if(is_remove_own && 0 != rmdir(dir)){ - S3FS_PRN_ERR("could not remove dir(%s) - errno(%d)", dir, errno); - return false; - } - return true; + for(dent = readdir(dp); dent; dent = readdir(dp)){ + if(0 == strcmp(dent->d_name, "..") || 0 == strcmp(dent->d_name, ".")){ + continue; + } + string fullpath = dir; + fullpath += "/"; + fullpath += dent->d_name; + struct stat st; + if(0 != lstat(fullpath.c_str(), &st)){ + S3FS_PRN_ERR("could not get stats of file(%s) - errno(%d)", fullpath.c_str(), errno); + closedir(dp); + return false; + } + if(S_ISDIR(st.st_mode)){ + // dir -> Reentrant + if(!delete_files_in_dir(fullpath.c_str(), true)){ + S3FS_PRN_ERR("could not remove sub dir(%s) - errno(%d)", fullpath.c_str(), errno); + closedir(dp); + return false; + } + }else{ + if(0 != unlink(fullpath.c_str())){ + S3FS_PRN_ERR("could not remove file(%s) - errno(%d)", fullpath.c_str(), errno); + closedir(dp); + return false; + } + } + } + closedir(dp); + + if(is_remove_own && 0 != rmdir(dir)){ + S3FS_PRN_ERR("could not remove dir(%s) - errno(%d)", dir, errno); + return false; + } + return true; } //------------------------------------------------------------------- @@ -754,841 +345,36 @@ bool delete_files_in_dir(const char* dir, bool is_remove_own) //------------------------------------------------------------------- bool compare_sysname(const char* target) { - // [NOTE] - // The buffer size of sysname member in struct utsname is - // OS dependent, but 512 bytes is sufficient for now. - // - static char* psysname = NULL; - static char sysname[512]; - if(!psysname){ - struct utsname sysinfo; - if(0 != uname(&sysinfo)){ - S3FS_PRN_ERR("could not initialize system name to internal buffer(errno:%d), thus use \"Linux\".", errno); - strcpy(sysname, "Linux"); - }else{ - S3FS_PRN_INFO("system name is %s", sysinfo.sysname); - sysname[sizeof(sysname) - 1] = '\0'; - strncpy(sysname, sysinfo.sysname, sizeof(sysname) - 1); - } - psysname = &sysname[0]; - } - - if(!target || 0 != strcmp(psysname, target)){ - return false; - } - return true; -} - -//------------------------------------------------------------------- -// Utility functions for convert -//------------------------------------------------------------------- -time_t get_mtime(const char *str) -{ - // [NOTE] - // In rclone, there are cases where ns is set to x-amz-meta-mtime - // with floating point number. s3fs uses x-amz-meta-mtime by - // truncating the floating point or less (in seconds or less) to - // correspond to this. - // - string strmtime; - if(str && '\0' != *str){ - strmtime = str; - string::size_type pos = strmtime.find('.', 0); - if(string::npos != pos){ - strmtime = strmtime.substr(0, pos); - } - } - return static_cast(cvt_strtoofft(strmtime.c_str())); -} - -static time_t get_time(const headers_t& meta, const char *header) -{ - headers_t::const_iterator iter; - if(meta.end() == (iter = meta.find(header))){ - return 0; - } - return get_mtime((*iter).second.c_str()); -} - -time_t get_mtime(const headers_t& meta, bool overcheck) -{ - time_t t = get_time(meta, "x-amz-meta-mtime"); - if(t != 0){ - return t; - } - t = get_time(meta, "x-amz-meta-goog-reserved-file-mtime"); - if(t != 0){ - return t; - } - if(overcheck){ - return get_lastmodified(meta); - } - return 0; -} - -time_t get_ctime(const headers_t& meta, bool overcheck) -{ - time_t t = get_time(meta, "x-amz-meta-ctime"); - if(t != 0){ - return t; - } - if(overcheck){ - return get_lastmodified(meta); - } - return 0; -} - -off_t get_size(const char *s) -{ - return cvt_strtoofft(s); -} - -off_t get_size(const headers_t& meta) -{ - headers_t::const_iterator iter = meta.find("Content-Length"); - if(meta.end() == iter){ - return 0; - } - return get_size((*iter).second.c_str()); -} - -mode_t get_mode(const char *s, int base) -{ - return static_cast(cvt_strtoofft(s, base)); -} - -mode_t get_mode(const headers_t& meta, const char* path, bool checkdir, bool forcedir) -{ - mode_t mode = 0; - bool isS3sync = false; - headers_t::const_iterator iter; - - if(meta.end() != (iter = meta.find("x-amz-meta-mode"))){ - mode = get_mode((*iter).second.c_str()); - }else if(meta.end() != (iter = meta.find("x-amz-meta-permissions"))){ // for s3sync - mode = get_mode((*iter).second.c_str()); - isS3sync = true; - }else if(meta.end() != (iter = meta.find("x-amz-meta-goog-reserved-posix-mode"))){ // for GCS - mode = get_mode((*iter).second.c_str(), 8); - }else{ - // If another tool creates an object without permissions, default to owner - // read-write and group readable. - mode = path[strlen(path) - 1] == '/' ? 0750 : 0640; - } - // Checking the bitmask, if the last 3 bits are all zero then process as a regular - // file type (S_IFDIR or S_IFREG), otherwise return mode unmodified so that S_IFIFO, - // S_IFSOCK, S_IFCHR, S_IFLNK and S_IFBLK devices can be processed properly by fuse. - if(!(mode & S_IFMT)){ - if(!isS3sync){ - if(checkdir){ - if(forcedir){ - mode |= S_IFDIR; + // [NOTE] + // The buffer size of sysname member in struct utsname is + // OS dependent, but 512 bytes is sufficient for now. + // + static char* psysname = NULL; + static char sysname[512]; + if(!psysname){ + struct utsname sysinfo; + if(0 != uname(&sysinfo)){ + S3FS_PRN_ERR("could not initialize system name to internal buffer(errno:%d), thus use \"Linux\".", errno); + strcpy(sysname, "Linux"); }else{ - if(meta.end() != (iter = meta.find("Content-Type"))){ - string strConType = (*iter).second; - // Leave just the mime type, remove any optional parameters (eg charset) - string::size_type pos = strConType.find(';'); - if(string::npos != pos){ - strConType = strConType.substr(0, pos); - } - if(strConType == "application/x-directory" - || strConType == "httpd/unix-directory"){ // Nextcloud uses this MIME type for directory objects when mounting bucket as external Storage - mode |= S_IFDIR; - }else if(path && 0 < strlen(path) && '/' == path[strlen(path) - 1]){ - if(strConType == "binary/octet-stream" || strConType == "application/octet-stream"){ - mode |= S_IFDIR; - }else{ - if(complement_stat){ - // If complement lack stat mode, when the object has '/' character at end of name - // and content type is text/plain and the object's size is 0 or 1, it should be - // directory. - off_t size = get_size(meta); - if(strConType == "text/plain" && (0 == size || 1 == size)){ - mode |= S_IFDIR; - }else{ - mode |= S_IFREG; - } - }else{ - mode |= S_IFREG; - } - } - }else{ - mode |= S_IFREG; - } - }else{ - mode |= S_IFREG; - } + S3FS_PRN_INFO("system name is %s", sysinfo.sysname); + sysname[sizeof(sysname) - 1] = '\0'; + strncpy(sysname, sysinfo.sysname, sizeof(sysname) - 1); } - } - // If complement lack stat mode, when it's mode is not set any permission, - // the object is added minimal mode only for read permission. - if(complement_stat && 0 == (mode & (S_IRWXU | S_IRWXG | S_IRWXO))){ - mode |= (S_IRUSR | (0 == (mode & S_IFDIR) ? 0 : S_IXUSR)); - } - }else{ - if(!checkdir){ - // cut dir/reg flag. - mode &= ~S_IFDIR; - mode &= ~S_IFREG; - } + psysname = &sysname[0]; } - } - return mode; -} -uid_t get_uid(const char *s) -{ - return static_cast(cvt_strtoofft(s)); -} - -uid_t get_uid(const headers_t& meta) -{ - headers_t::const_iterator iter; - if(meta.end() != (iter = meta.find("x-amz-meta-uid"))){ - return get_uid((*iter).second.c_str()); - }else if(meta.end() != (iter = meta.find("x-amz-meta-owner"))){ // for s3sync - return get_uid((*iter).second.c_str()); - }else if(meta.end() != (iter = meta.find("x-amz-meta-goog-reserved-posix-uid"))){ // for GCS - return get_uid((*iter).second.c_str()); - }else{ - return geteuid(); - } -} - -gid_t get_gid(const char *s) -{ - return static_cast(cvt_strtoofft(s)); -} - -gid_t get_gid(const headers_t& meta) -{ - headers_t::const_iterator iter; - if(meta.end() != (iter = meta.find("x-amz-meta-gid"))){ - return get_gid((*iter).second.c_str()); - }else if(meta.end() != (iter = meta.find("x-amz-meta-group"))){ // for s3sync - return get_gid((*iter).second.c_str()); - }else if(meta.end() != (iter = meta.find("x-amz-meta-goog-reserved-posix-gid"))){ // for GCS - return get_gid((*iter).second.c_str()); - }else{ - return getegid(); - } -} - -blkcnt_t get_blocks(off_t size) -{ - return size / 512 + 1; -} - -time_t cvtIAMExpireStringToTime(const char* s) -{ - struct tm tm; - if(!s){ - return 0L; - } - memset(&tm, 0, sizeof(struct tm)); - strptime(s, "%Y-%m-%dT%H:%M:%S", &tm); - return timegm(&tm); // GMT -} - -time_t get_lastmodified(const char* s) -{ - struct tm tm; - if(!s){ - return 0L; - } - memset(&tm, 0, sizeof(struct tm)); - strptime(s, "%a, %d %b %Y %H:%M:%S %Z", &tm); - return timegm(&tm); // GMT -} - -time_t get_lastmodified(const headers_t& meta) -{ - headers_t::const_iterator iter = meta.find("Last-Modified"); - if(meta.end() == iter){ - return 0; - } - return get_lastmodified((*iter).second.c_str()); -} - -// -// Returns it whether it is an object with need checking in detail. -// If this function returns true, the object is possible to be directory -// and is needed checking detail(searching sub object). -// -bool is_need_check_obj_detail(const headers_t& meta) -{ - headers_t::const_iterator iter; - - // directory object is Content-Length as 0. - if(0 != get_size(meta)){ - return false; - } - // if the object has x-amz-meta information, checking is no more. - if(meta.end() != meta.find("x-amz-meta-mode") || - meta.end() != meta.find("x-amz-meta-mtime") || - meta.end() != meta.find("x-amz-meta-uid") || - meta.end() != meta.find("x-amz-meta-gid") || - meta.end() != meta.find("x-amz-meta-owner") || - meta.end() != meta.find("x-amz-meta-group") || - meta.end() != meta.find("x-amz-meta-permissions") ) - { - return false; - } - // if there is not Content-Type, or Content-Type is "x-directory", - // checking is no more. - if(meta.end() == (iter = meta.find("Content-Type"))){ - return false; - } - if("application/x-directory" == (*iter).second){ - return false; - } - return true; -} - -// [NOTE] -// If add_noexist is false and the key does not exist, it will not be added. -// -bool merge_headers(headers_t& base, const headers_t& additional, bool add_noexist) -{ - bool added = false; - for(headers_t::const_iterator iter = additional.begin(); iter != additional.end(); ++iter){ - if(add_noexist || base.find(iter->first) != base.end()){ - base[iter->first] = iter->second; - added = true; + if(!target || 0 != strcmp(psysname, target)){ + return false; } - } - return added; -} - -bool simple_parse_xml(const char* data, size_t len, const char* key, std::string& value) -{ - bool result = false; - - if(!data || !key){ - return false; - } - value.clear(); - - xmlDocPtr doc; - if(NULL == (doc = xmlReadMemory(data, len, "", NULL, 0))){ - return false; - } - - if(NULL == doc->children){ - S3FS_XMLFREEDOC(doc); - return false; - } - for(xmlNodePtr cur_node = doc->children->children; NULL != cur_node; cur_node = cur_node->next){ - // For DEBUG - // string cur_node_name(reinterpret_cast(cur_node->name)); - // printf("cur_node_name: %s\n", cur_node_name.c_str()); - - if(XML_ELEMENT_NODE == cur_node->type){ - string elementName = reinterpret_cast(cur_node->name); - // For DEBUG - // printf("elementName: %s\n", elementName.c_str()); - - if(cur_node->children){ - if(XML_TEXT_NODE == cur_node->children->type){ - if(elementName == key) { - value = reinterpret_cast(cur_node->children->content); - result = true; - break; - } - } - } - } - } - S3FS_XMLFREEDOC(doc); - - return result; -} - -//------------------------------------------------------------------- -// Help -//------------------------------------------------------------------- -void show_usage () -{ - printf("Usage: %s BUCKET:[PATH] MOUNTPOINT [OPTION]...\n", - program_name.c_str()); -} - -void show_help () -{ - show_usage(); - printf( - "\n" - "Mount an Amazon S3 bucket as a file system.\n" - "\n" - "Usage:\n" - " mounting\n" - " s3fs bucket[:/path] mountpoint [options]\n" - " s3fs mountpoint [options (must specify bucket= option)]\n" - "\n" - " unmounting\n" - " umount mountpoint\n" - "\n" - " General forms for s3fs and FUSE/mount options:\n" - " -o opt[,opt...]\n" - " -o opt [-o opt] ...\n" - "\n" - " utility mode (remove interrupted multipart uploading objects)\n" - " s3fs --incomplete-mpu-list (-u) bucket\n" - " s3fs --incomplete-mpu-abort[=all | =] bucket\n" - "\n" - "s3fs Options:\n" - "\n" - " Most s3fs options are given in the form where \"opt\" is:\n" - "\n" - " =\n" - "\n" - " bucket\n" - " - if it is not specified bucket name (and path) in command line,\n" - " must specify this option after -o option for bucket name.\n" - "\n" - " default_acl (default=\"private\")\n" - " - the default canned acl to apply to all written s3 objects,\n" - " e.g., private, public-read. see\n" - " https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl\n" - " for the full list of canned acls\n" - "\n" - " retries (default=\"5\")\n" - " - number of times to retry a failed S3 transaction\n" - "\n" - " use_cache (default=\"\" which means disabled)\n" - " - local folder to use for local file cache\n" - "\n" - " check_cache_dir_exist (default is disable)\n" - " - if use_cache is set, check if the cache directory exists.\n" - " If this option is not specified, it will be created at runtime\n" - " when the cache directory does not exist.\n" - "\n" - " del_cache (delete local file cache)\n" - " - delete local file cache when s3fs starts and exits.\n" - "\n" - " storage_class (default=\"standard\")\n" - " - store object with specified storage class. Possible values:\n" - " standard, standard_ia, onezone_ia, reduced_redundancy,\n" - " intelligent_tiering, glacier, and deep_archive.\n" - "\n" - " use_rrs (default is disable)\n" - " - use Amazon's Reduced Redundancy Storage.\n" - " this option can not be specified with use_sse.\n" - " (can specify use_rrs=1 for old version)\n" - " this option has been replaced by new storage_class option.\n" - "\n" - " use_sse (default is disable)\n" - " - Specify three type Amazon's Server-Site Encryption: SSE-S3,\n" - " SSE-C or SSE-KMS. SSE-S3 uses Amazon S3-managed encryption\n" - " keys, SSE-C uses customer-provided encryption keys, and\n" - " SSE-KMS uses the master key which you manage in AWS KMS.\n" - " You can specify \"use_sse\" or \"use_sse=1\" enables SSE-S3\n" - " type (use_sse=1 is old type parameter).\n" - " Case of setting SSE-C, you can specify \"use_sse=custom\",\n" - " \"use_sse=custom:\" or\n" - " \"use_sse=\" (only \n" - " specified is old type parameter). You can use \"c\" for\n" - " short \"custom\".\n" - " The custom key file must be 600 permission. The file can\n" - " have some lines, each line is one SSE-C key. The first line\n" - " in file is used as Customer-Provided Encryption Keys for\n" - " uploading and changing headers etc. If there are some keys\n" - " after first line, those are used downloading object which\n" - " are encrypted by not first key. So that, you can keep all\n" - " SSE-C keys in file, that is SSE-C key history.\n" - " If you specify \"custom\" (\"c\") without file path, you\n" - " need to set custom key by load_sse_c option or AWSSSECKEYS\n" - " environment. (AWSSSECKEYS environment has some SSE-C keys\n" - " with \":\" separator.) This option is used to decide the\n" - " SSE type. So that if you do not want to encrypt a object\n" - " object at uploading, but you need to decrypt encrypted\n" - " object at downloading, you can use load_sse_c option instead\n" - " of this option.\n" - " For setting SSE-KMS, specify \"use_sse=kmsid\" or\n" - " \"use_sse=kmsid:\". You can use \"k\" for short \"kmsid\".\n" - " If you san specify SSE-KMS type with your in AWS\n" - " KMS, you can set it after \"kmsid:\" (or \"k:\"). If you\n" - " specify only \"kmsid\" (\"k\"), you need to set AWSSSEKMSID\n" - " environment which value is . You must be careful\n" - " about that you can not use the KMS id which is not same EC2\n" - " region.\n" - "\n" - " load_sse_c - specify SSE-C keys\n" - " Specify the custom-provided encryption keys file path for decrypting\n" - " at downloading.\n" - " If you use the custom-provided encryption key at uploading, you\n" - " specify with \"use_sse=custom\". The file has many lines, one line\n" - " means one custom key. So that you can keep all SSE-C keys in file,\n" - " that is SSE-C key history. AWSSSECKEYS environment is as same as this\n" - " file contents.\n" - "\n" - " public_bucket (default=\"\" which means disabled)\n" - " - anonymously mount a public bucket when set to 1, ignores the \n" - " $HOME/.passwd-s3fs and /etc/passwd-s3fs files.\n" - " S3 does not allow copy object api for anonymous users, then\n" - " s3fs sets nocopyapi option automatically when public_bucket=1\n" - " option is specified.\n" - "\n" - " passwd_file (default=\"\")\n" - " - specify which s3fs password file to use\n" - "\n" - " ahbe_conf (default=\"\" which means disabled)\n" - " - This option specifies the configuration file path which\n" - " file is the additional HTTP header by file (object) extension.\n" - " The configuration file format is below:\n" - " -----------\n" - " line = [file suffix or regex] HTTP-header [HTTP-values]\n" - " file suffix = file (object) suffix, if this field is empty,\n" - " it means \"reg:(.*)\".(=all object).\n" - " regex = regular expression to match the file (object) path.\n" - " this type starts with \"reg:\" prefix.\n" - " HTTP-header = additional HTTP header name\n" - " HTTP-values = additional HTTP header value\n" - " -----------\n" - " Sample:\n" - " -----------\n" - " .gz Content-Encoding gzip\n" - " .Z Content-Encoding compress\n" - " reg:^/MYDIR/(.*)[.]t2$ Content-Encoding text2\n" - " -----------\n" - " A sample configuration file is uploaded in \"test\" directory.\n" - " If you specify this option for set \"Content-Encoding\" HTTP \n" - " header, please take care for RFC 2616.\n" - "\n" - " profile (default=\"default\")\n" - " - Choose a profile from ${HOME}/.aws/credentials to authenticate\n" - " against S3. Note that this format matches the AWS CLI format and\n" - " differs from the s3fs passwd format.\n" - "\n" - " connect_timeout (default=\"300\" seconds)\n" - " - time to wait for connection before giving up\n" - "\n" - " readwrite_timeout (default=\"120\" seconds)\n" - " - time to wait between read/write activity before giving up\n" - "\n" - " list_object_max_keys (default=\"1000\")\n" - " - specify the maximum number of keys returned by S3 list object\n" - " API. The default is 1000. you can set this value to 1000 or more.\n" - "\n" - " max_stat_cache_size (default=\"100,000\" entries (about 40MB))\n" - " - maximum number of entries in the stat cache, and this maximum is\n" - " also treated as the number of symbolic link cache.\n" - "\n" - " stat_cache_expire (default is 900))\n" - " - specify expire time (seconds) for entries in the stat cache.\n" - " This expire time indicates the time since stat cached. and this\n" - " is also set to the expire time of the symbolic link cache.\n" - "\n" - " stat_cache_interval_expire (default is 900)\n" - " - specify expire time (seconds) for entries in the stat cache(and\n" - " symbolic link cache).\n" - " This expire time is based on the time from the last access time\n" - " of the stat cache. This option is exclusive with stat_cache_expire,\n" - " and is left for compatibility with older versions.\n" - "\n" - " enable_noobj_cache (default is disable)\n" - " - enable cache entries for the object which does not exist.\n" - " s3fs always has to check whether file (or sub directory) exists \n" - " under object (path) when s3fs does some command, since s3fs has \n" - " recognized a directory which does not exist and has files or \n" - " sub directories under itself. It increases ListBucket request \n" - " and makes performance bad.\n" - " You can specify this option for performance, s3fs memorizes \n" - " in stat cache that the object (file or directory) does not exist.\n" - "\n" - " no_check_certificate\n" - " - server certificate won't be checked against the available \n" - " certificate authorities.\n" - "\n" - " ssl_verify_hostname (default=\"2\")\n" - " - When 0, do not verify the SSL certificate against the hostname.\n" - "\n" - " nodnscache (disable DNS cache)\n" - " - s3fs is always using DNS cache, this option make DNS cache disable.\n" - "\n" - " nosscache (disable SSL session cache)\n" - " - s3fs is always using SSL session cache, this option make SSL \n" - " session cache disable.\n" - "\n" - " multireq_max (default=\"20\")\n" - " - maximum number of parallel request for listing objects.\n" - "\n" - " parallel_count (default=\"5\")\n" - " - number of parallel request for uploading big objects.\n" - " s3fs uploads large object (over 20MB) by multipart post request, \n" - " and sends parallel requests.\n" - " This option limits parallel request count which s3fs requests \n" - " at once. It is necessary to set this value depending on a CPU \n" - " and a network band.\n" - "\n" - " multipart_size (default=\"10\")\n" - " - part size, in MB, for each multipart request.\n" - " The minimum value is 5 MB and the maximum value is 5 GB.\n" - "\n" - " ensure_diskfree (default 0)\n" - " - sets MB to ensure disk free space. This option means the\n" - " threshold of free space size on disk which is used for the\n" - " cache file by s3fs. s3fs makes file for\n" - " downloading, uploading and caching files. If the disk free\n" - " space is smaller than this value, s3fs do not use diskspace\n" - " as possible in exchange for the performance.\n" - "\n" - " singlepart_copy_limit (default=\"512\")\n" - " - maximum size, in MB, of a single-part copy before trying \n" - " multipart copy.\n" - "\n" - " host (default=\"https://s3.amazonaws.com\")\n" - " - Set a non-Amazon host, e.g., https://example.com.\n" - "\n" - " servicepath (default=\"/\")\n" - " - Set a service path when the non-Amazon host requires a prefix.\n" - "\n" - " url (default=\"https://s3.amazonaws.com\")\n" - " - sets the url to use to access Amazon S3. If you want to use HTTP,\n" - " then you can set \"url=http://s3.amazonaws.com\".\n" - " If you do not use https, please specify the URL with the url\n" - " option.\n" - "\n" - " endpoint (default=\"us-east-1\")\n" - " - sets the endpoint to use on signature version 4\n" - " If this option is not specified, s3fs uses \"us-east-1\" region as\n" - " the default. If the s3fs could not connect to the region specified\n" - " by this option, s3fs could not run. But if you do not specify this\n" - " option, and if you can not connect with the default region, s3fs\n" - " will retry to automatically connect to the other region. So s3fs\n" - " can know the correct region name, because s3fs can find it in an\n" - " error from the S3 server.\n" - "\n" - " sigv2 (default is signature version 4)\n" - " - sets signing AWS requests by using Signature Version 2\n" - "\n" - " mp_umask (default is \"0000\")\n" - " - sets umask for the mount point directory.\n" - " If allow_other option is not set, s3fs allows access to the mount\n" - " point only to the owner. In the opposite case s3fs allows access\n" - " to all users as the default. But if you set the allow_other with\n" - " this option, you can control the permissions of the\n" - " mount point by this option like umask.\n" - "\n" - " umask (default is \"0000\")\n" - " - sets umask for files under the mountpoint. This can allow\n" - " users other than the mounting user to read and write to files\n" - " that they did not create.\n" - "\n" - " nomultipart (disable multipart uploads)\n" - "\n" - " enable_content_md5 (default is disable)\n" - " Allow S3 server to check data integrity of uploads via the\n" - " Content-MD5 header. This can add CPU overhead to transfers.\n" - "\n" - " ecs (default is disable)\n" - " - This option instructs s3fs to query the ECS container credential\n" - " metadata address instead of the instance metadata address.\n" - "\n" - " iam_role (default is no IAM role)\n" - " - This option requires the IAM role name or \"auto\". If you specify\n" - " \"auto\", s3fs will automatically use the IAM role names that are set\n" - " to an instance. If you specify this option without any argument, it\n" - " is the same as that you have specified the \"auto\".\n" - "\n" - " ibm_iam_auth (default is not using IBM IAM authentication)\n" - " - This option instructs s3fs to use IBM IAM authentication.\n" - " In this mode, the AWSAccessKey and AWSSecretKey will be used as\n" - " IBM's Service-Instance-ID and APIKey, respectively.\n" - "\n" - " ibm_iam_endpoint (default is https://iam.bluemix.net)\n" - " - sets the URL to use for IBM IAM authentication.\n" - "\n" - " use_xattr (default is not handling the extended attribute)\n" - " Enable to handle the extended attribute (xattrs).\n" - " If you set this option, you can use the extended attribute.\n" - " For example, encfs and ecryptfs need to support the extended attribute.\n" - " Notice: if s3fs handles the extended attribute, s3fs can not work to\n" - " copy command with preserve=mode.\n" - "\n" - " noxmlns (disable registering xml name space)\n" - " disable registering xml name space for response of \n" - " ListBucketResult and ListVersionsResult etc. Default name \n" - " space is looked up from \"http://s3.amazonaws.com/doc/2006-03-01\".\n" - " This option should not be specified now, because s3fs looks up\n" - " xmlns automatically after v1.66.\n" - "\n" - " nomixupload (disable copy in multipart uploads)\n" - " Disable to use PUT (copy api) when multipart uploading large size objects.\n" - " By default, when doing multipart upload, the range of unchanged data\n" - " will use PUT (copy api) whenever possible.\n" - " When nocopyapi or norenameapi is specified, use of PUT (copy api) is\n" - " invalidated even if this option is not specified.\n" - "\n" - " nocopyapi (for other incomplete compatibility object storage)\n" - " For a distributed object storage which is compatibility S3\n" - " API without PUT (copy api).\n" - " If you set this option, s3fs do not use PUT with \n" - " \"x-amz-copy-source\" (copy api). Because traffic is increased\n" - " 2-3 times by this option, we do not recommend this.\n" - "\n" - " norenameapi (for other incomplete compatibility object storage)\n" - " For a distributed object storage which is compatibility S3\n" - " API without PUT (copy api).\n" - " This option is a subset of nocopyapi option. The nocopyapi\n" - " option does not use copy-api for all command (ex. chmod, chown,\n" - " touch, mv, etc), but this option does not use copy-api for\n" - " only rename command (ex. mv). If this option is specified with\n" - " nocopyapi, then s3fs ignores it.\n" - "\n" - " use_path_request_style (use legacy API calling style)\n" - " Enable compatibility with S3-like APIs which do not support\n" - " the virtual-host request style, by using the older path request\n" - " style.\n" - "\n" - " noua (suppress User-Agent header)\n" - " Usually s3fs outputs of the User-Agent in \"s3fs/ (commit\n" - " hash ; )\" format.\n" - " If this option is specified, s3fs suppresses the output of the\n" - " User-Agent.\n" - "\n" - " cipher_suites\n" - " Customize the list of TLS cipher suites.\n" - " Expects a colon separated list of cipher suite names.\n" - " A list of available cipher suites, depending on your TLS engine,\n" - " can be found on the CURL library documentation:\n" - " https://curl.haxx.se/docs/ssl-ciphers.html\n" - "\n" - " instance_name - The instance name of the current s3fs mountpoint.\n" - " This name will be added to logging messages and user agent headers sent by s3fs.\n" - "\n" - " complement_stat (complement lack of file/directory mode)\n" - " s3fs complements lack of information about file/directory mode\n" - " if a file or a directory object does not have x-amz-meta-mode\n" - " header. As default, s3fs does not complements stat information\n" - " for a object, then the object will not be able to be allowed to\n" - " list/modify.\n" - "\n" - " notsup_compat_dir (not support compatibility directory types)\n" - " As a default, s3fs supports objects of the directory type as\n" - " much as possible and recognizes them as directories.\n" - " Objects that can be recognized as directory objects are \"dir/\",\n" - " \"dir\", \"dir_$folder$\", and there is a file object that does\n" - " not have a directory object but contains that directory path.\n" - " s3fs needs redundant communication to support all these\n" - " directory types. The object as the directory created by s3fs\n" - " is \"dir/\". By restricting s3fs to recognize only \"dir/\" as\n" - " a directory, communication traffic can be reduced. This option\n" - " is used to give this restriction to s3fs.\n" - " However, if there is a directory object other than \"dir/\" in\n" - " the bucket, specifying this option is not recommended. s3fs may\n" - " not be able to recognize the object correctly if an object\n" - " created by s3fs exists in the bucket.\n" - " Please use this option when the directory in the bucket is\n" - " only \"dir/\" object.\n" - "\n" - " use_wtf8 - support arbitrary file system encoding.\n" - " S3 requires all object names to be valid UTF-8. But some\n" - " clients, notably Windows NFS clients, use their own encoding.\n" - " This option re-encodes invalid UTF-8 object names into valid\n" - " UTF-8 by mapping offending codes into a 'private' codepage of the\n" - " Unicode set.\n" - " Useful on clients not using UTF-8 as their file system encoding.\n" - "\n" - " use_session_token - indicate that session token should be provided.\n" - " If credentials are provided by environment variables this switch\n" - " forces presence check of AWSSESSIONTOKEN variable.\n" - " Otherwise an error is returned.\n" - "\n" - " requester_pays (default is disable)\n" - " This option instructs s3fs to enable requests involving\n" - " Requester Pays buckets.\n" - " It includes the 'x-amz-request-payer=requester' entry in the\n" - " request header.\n" - "\n" - " mime (default is \"/etc/mime.types\")\n" - " Specify the path of the mime.types file.\n" - " If this option is not specified, the existence of \"/etc/mime.types\"\n" - " is checked, and that file is loaded as mime information.\n" - " If this file does not exist on macOS, then \"/etc/apache2/mime.types\"\n" - " is checked as well.\n" - "\n" - " dbglevel (default=\"crit\")\n" - " Set the debug message level. set value as crit (critical), err\n" - " (error), warn (warning), info (information) to debug level.\n" - " default debug level is critical. If s3fs run with \"-d\" option,\n" - " the debug level is set information. When s3fs catch the signal\n" - " SIGUSR2, the debug level is bumpup.\n" - "\n" - " curldbg - put curl debug message\n" - " Put the debug message from libcurl when this option is specified.\n" - " Specify \"normal\" or \"body\" for the parameter.\n" - " If the parameter is omitted, it is the same as \"normal\".\n" - " If \"body\" is specified, some API communication body data will be\n" - " output in addition to the debug message output as \"normal\".\n" - "\n" - " set_check_cache_sigusr1 (default is stdout)\n" - " If the cache is enabled, you can check the integrity of the\n" - " cache file and the cache file's stats info file.\n" - " This option is specified and when sending the SIGUSR1 signal\n" - " to the s3fs process checks the cache status at that time.\n" - " This option can take a file path as parameter to output the\n" - " check result to that file. The file path parameter can be omitted.\n" - " If omitted, the result will be output to stdout or syslog.\n" - "\n" - "FUSE/mount Options:\n" - "\n" - " Most of the generic mount options described in 'man mount' are\n" - " supported (ro, rw, suid, nosuid, dev, nodev, exec, noexec, atime,\n" - " noatime, sync async, dirsync). Filesystems are mounted with\n" - " '-onodev,nosuid' by default, which can only be overridden by a\n" - " privileged user.\n" - " \n" - " There are many FUSE specific mount options that can be specified.\n" - " e.g. allow_other See the FUSE's README for the full set.\n" - "\n" - "Utility mode Options:\n" - "\n" - " -u, --incomplete-mpu-list\n" - " Lists multipart incomplete objects uploaded to the specified\n" - " bucket.\n" - " --incomplete-mpu-abort (=all or =)\n" - " Delete the multipart incomplete object uploaded to the specified\n" - " bucket.\n" - " If \"all\" is specified for this option, all multipart incomplete\n" - " objects will be deleted. If you specify no argument as an option,\n" - " objects older than 24 hours (24H) will be deleted (This is the\n" - " default value). You can specify an optional date format. It can\n" - " be specified as year, month, day, hour, minute, second, and it is\n" - " expressed as \"Y\", \"M\", \"D\", \"h\", \"m\", \"s\" respectively.\n" - " For example, \"1Y6M10D12h30m30s\".\n" - "\n" - "Miscellaneous Options:\n" - "\n" - " -h, --help Output this help.\n" - " --version Output version info.\n" - " -d --debug Turn on DEBUG messages to syslog. Specifying -d\n" - " twice turns on FUSE debug messages to STDOUT.\n" - " -f FUSE foreground option - do not run as daemon.\n" - " -s FUSE single-threaded option\n" - " disable multi-threaded operation\n" - "\n" - "\n" - "s3fs home page: \n" - ); -} - -void show_version() -{ - printf( - "Amazon Simple Storage Service File System V%s (commit:%s) with %s\n" - "Copyright (C) 2010 Randy Rizun \n" - "License GPL2: GNU GPL version 2 \n" - "This is free software: you are free to change and redistribute it.\n" - "There is NO WARRANTY, to the extent permitted by law.\n", - VERSION, COMMIT_HASH_VAL, s3fs_crypt_lib_name()); + return true; } /* * Local variables: -* tab-width: 2 -* c-basic-offset: 2 +* tab-width: 4 +* c-basic-offset: 4 * End: -* vim600: expandtab sw=2 ts=2 fdm=marker -* vim<600: expandtab sw=2 ts=2 +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 */ diff --git a/src/s3fs_util.h b/src/s3fs_util.h index 781fdae..e8537f7 100644 --- a/src/s3fs_util.h +++ b/src/s3fs_util.h @@ -17,100 +17,15 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ + #ifndef S3FS_S3FS_UTIL_H_ #define S3FS_S3FS_UTIL_H_ -//------------------------------------------------------------------- -// Typedef -//------------------------------------------------------------------- -// -// Struct -// -struct s3obj_entry{ - std::string normalname; // normalized name: if empty, object is normalized name. - std::string orgname; // original name: if empty, object is original name. - std::string etag; - bool is_dir; - - s3obj_entry() : is_dir(false) {} -}; - -typedef std::map s3obj_t; -typedef std::list s3obj_list_t; - -// -// Class -// -class S3ObjList -{ - private: - s3obj_t objects; - - private: - bool insert_normalized(const char* name, const char* normalized, bool is_dir); - const s3obj_entry* GetS3Obj(const char* name) const; - - s3obj_t::const_iterator begin(void) const { - return objects.begin(); - } - s3obj_t::const_iterator end(void) const { - return objects.end(); - } - - public: - S3ObjList() {} - ~S3ObjList() {} - - bool IsEmpty(void) const { - return objects.empty(); - } - bool insert(const char* name, const char* etag = NULL, bool is_dir = false); - std::string GetOrgName(const char* name) const; - std::string GetNormalizedName(const char* name) const; - std::string GetETag(const char* name) const; - bool IsDir(const char* name) const; - bool GetNameList(s3obj_list_t& list, bool OnlyNormalized = true, bool CutSlash = true) const; - bool GetLastName(std::string& lastname) const; - - static bool MakeHierarchizedList(s3obj_list_t& list, bool haveSlash); -}; - -typedef struct mvnode { - char *old_path; - char *new_path; - bool is_dir; - bool is_normdir; - struct mvnode *prev; - struct mvnode *next; -} MVNODE; - -class AutoLock -{ - public: - enum Type { - NO_WAIT = 1, - ALREADY_LOCKED = 2, - NONE = 0 - }; - explicit AutoLock(pthread_mutex_t* pmutex, Type type = NONE); - bool isLockAcquired() const; - ~AutoLock(); - - private: - AutoLock(const AutoLock&); - pthread_mutex_t* const auto_mutex; - bool is_lock_acquired; -}; - //------------------------------------------------------------------- // Functions //------------------------------------------------------------------- std::string get_realpath(const char *path); -MVNODE *create_mvnode(const char *old_path, const char *new_path, bool is_dir, bool normdir = false); -MVNODE *add_mvnode(MVNODE** head, MVNODE** tail, const char *old_path, const char *new_path, bool is_dir, bool normdir = false); -void free_mvnodes(MVNODE *head); - void init_sysconf_vars(); std::string get_username(uid_t uid); int is_uid_include_group(uid_t uid, gid_t gid); @@ -119,6 +34,7 @@ std::string mydirname(const char* path); std::string mydirname(const std::string& path); std::string mybasename(const char* path); std::string mybasename(const std::string& path); + int mkdirp(const std::string& path, mode_t mode); std::string get_exist_directory_path(const std::string& path); bool check_exist_dir_permission(const char* dirpath); @@ -126,36 +42,13 @@ bool delete_files_in_dir(const char* dir, bool is_remove_own); bool compare_sysname(const char* target); -time_t get_mtime(const char *s); -time_t get_mtime(const headers_t& meta, bool overcheck = true); -time_t get_ctime(const headers_t& meta, bool overcheck = true); -off_t get_size(const char *s); -off_t get_size(const headers_t& meta); -mode_t get_mode(const char *s, int base = 0); -mode_t get_mode(const headers_t& meta, const char* path = NULL, bool checkdir = false, bool forcedir = false); -uid_t get_uid(const char *s); -uid_t get_uid(const headers_t& meta); -gid_t get_gid(const char *s); -gid_t get_gid(const headers_t& meta); -blkcnt_t get_blocks(off_t size); -time_t cvtIAMExpireStringToTime(const char* s); -time_t get_lastmodified(const char* s); -time_t get_lastmodified(const headers_t& meta); -bool is_need_check_obj_detail(const headers_t& meta); -bool merge_headers(headers_t& base, const headers_t& additional, bool add_noexist); -bool simple_parse_xml(const char* data, size_t len, const char* key, std::string& value); - -void show_usage(void); -void show_help(void); -void show_version(void); - #endif // S3FS_S3FS_UTIL_H_ /* * Local variables: -* tab-width: 2 -* c-basic-offset: 2 +* tab-width: 4 +* c-basic-offset: 4 * End: -* vim600: expandtab sw=2 ts=2 fdm=marker -* vim<600: expandtab sw=2 ts=2 +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 */ diff --git a/src/s3fs_xml.cpp b/src/s3fs_xml.cpp new file mode 100644 index 0000000..f077d7f --- /dev/null +++ b/src/s3fs_xml.cpp @@ -0,0 +1,499 @@ +/* + * s3fs - FUSE-based file system backed by Amazon S3 + * + * Copyright(C) 2007 Takeshi Nakatani + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include +#include + +#include "common.h" +#include "s3fs.h" +#include "s3fs_xml.h" +#include "s3fs_util.h" + +using namespace std; + +//------------------------------------------------------------------- +// Variables +//------------------------------------------------------------------- +static const char* c_strErrorObjectName = "FILE or SUBDIR in DIR"; + +//------------------------------------------------------------------- +// Functions +//------------------------------------------------------------------- +static bool GetXmlNsUrl(xmlDocPtr doc, string& nsurl) +{ + static time_t tmLast = 0; // cache for 60 sec. + static string strNs; + bool result = false; + + if(!doc){ + return false; + } + if((tmLast + 60) < time(NULL)){ + // refresh + tmLast = time(NULL); + strNs = ""; + xmlNodePtr pRootNode = xmlDocGetRootElement(doc); + if(pRootNode){ + xmlNsPtr* nslist = xmlGetNsList(doc, pRootNode); + if(nslist){ + if(nslist[0] && nslist[0]->href){ + strNs = (const char*)(nslist[0]->href); + } + S3FS_XMLFREE(nslist); + } + } + } + if(!strNs.empty()){ + nsurl = strNs; + result = true; + } + return result; +} + +static xmlChar* get_base_exp(xmlDocPtr doc, const char* exp) +{ + xmlXPathObjectPtr marker_xp; + string xmlnsurl; + string exp_string; + + if(!doc){ + return NULL; + } + xmlXPathContextPtr ctx = xmlXPathNewContext(doc); + + if(!noxmlns && GetXmlNsUrl(doc, xmlnsurl)){ + xmlXPathRegisterNs(ctx, (xmlChar*)"s3", (xmlChar*)xmlnsurl.c_str()); + exp_string = "/s3:ListBucketResult/s3:"; + } else { + exp_string = "/ListBucketResult/"; + } + + exp_string += exp; + + if(NULL == (marker_xp = xmlXPathEvalExpression((xmlChar *)exp_string.c_str(), ctx))){ + xmlXPathFreeContext(ctx); + return NULL; + } + if(xmlXPathNodeSetIsEmpty(marker_xp->nodesetval)){ + S3FS_PRN_ERR("marker_xp->nodesetval is empty."); + xmlXPathFreeObject(marker_xp); + xmlXPathFreeContext(ctx); + return NULL; + } + xmlNodeSetPtr nodes = marker_xp->nodesetval; + xmlChar* result = xmlNodeListGetString(doc, nodes->nodeTab[0]->xmlChildrenNode, 1); + + xmlXPathFreeObject(marker_xp); + xmlXPathFreeContext(ctx); + + return result; +} + +static xmlChar* get_prefix(xmlDocPtr doc) +{ + return get_base_exp(doc, "Prefix"); +} + +xmlChar* get_next_marker(xmlDocPtr doc) +{ + return get_base_exp(doc, "NextMarker"); +} + +// return: the pointer to object name on allocated memory. +// the pointer to "c_strErrorObjectName".(not allocated) +// NULL(a case of something error occurred) +static char* get_object_name(xmlDocPtr doc, xmlNodePtr node, const char* path) +{ + // Get full path + xmlChar* fullpath = xmlNodeListGetString(doc, node, 1); + if(!fullpath){ + S3FS_PRN_ERR("could not get object full path name.."); + return NULL; + } + // basepath(path) is as same as fullpath. + if(0 == strcmp((char*)fullpath, path)){ + xmlFree(fullpath); + return (char*)c_strErrorObjectName; + } + + // Make dir path and filename + string strdirpath = mydirname(string((char*)fullpath)); + string strmybpath = mybasename(string((char*)fullpath)); + const char* dirpath = strdirpath.c_str(); + const char* mybname = strmybpath.c_str(); + const char* basepath= (path && '/' == path[0]) ? &path[1] : path; + xmlFree(fullpath); + + if(!mybname || '\0' == mybname[0]){ + return NULL; + } + + // check subdir & file in subdir + if(dirpath && 0 < strlen(dirpath)){ + // case of "/" + if(0 == strcmp(mybname, "/") && 0 == strcmp(dirpath, "/")){ + return (char*)c_strErrorObjectName; + } + // case of "." + if(0 == strcmp(mybname, ".") && 0 == strcmp(dirpath, ".")){ + return (char*)c_strErrorObjectName; + } + // case of ".." + if(0 == strcmp(mybname, "..") && 0 == strcmp(dirpath, ".")){ + return (char*)c_strErrorObjectName; + } + // case of "name" + if(0 == strcmp(dirpath, ".")){ + // OK + return strdup(mybname); + }else{ + if(basepath && 0 == strcmp(dirpath, basepath)){ + // OK + return strdup(mybname); + }else if(basepath && 0 < strlen(basepath) && '/' == basepath[strlen(basepath) - 1] && 0 == strncmp(dirpath, basepath, strlen(basepath) - 1)){ + string withdirname; + if(strlen(dirpath) > strlen(basepath)){ + withdirname = &dirpath[strlen(basepath)]; + } + if(0 < withdirname.length() && '/' != withdirname[withdirname.length() - 1]){ + withdirname += "/"; + } + withdirname += mybname; + return strdup(withdirname.c_str()); + } + } + } + // case of something wrong + return (char*)c_strErrorObjectName; +} + +static xmlChar* get_exp_value_xml(xmlDocPtr doc, xmlXPathContextPtr ctx, const char* exp_key) +{ + if(!doc || !ctx || !exp_key){ + return NULL; + } + + xmlXPathObjectPtr exp; + xmlNodeSetPtr exp_nodes; + xmlChar* exp_value; + + // search exp_key tag + if(NULL == (exp = xmlXPathEvalExpression((xmlChar*)exp_key, ctx))){ + S3FS_PRN_ERR("Could not find key(%s).", exp_key); + return NULL; + } + if(xmlXPathNodeSetIsEmpty(exp->nodesetval)){ + S3FS_PRN_ERR("Key(%s) node is empty.", exp_key); + S3FS_XMLXPATHFREEOBJECT(exp); + return NULL; + } + // get exp_key value & set in struct + exp_nodes = exp->nodesetval; + if(NULL == (exp_value = xmlNodeListGetString(doc, exp_nodes->nodeTab[0]->xmlChildrenNode, 1))){ + S3FS_PRN_ERR("Key(%s) value is empty.", exp_key); + S3FS_XMLXPATHFREEOBJECT(exp); + return NULL; + } + + S3FS_XMLXPATHFREEOBJECT(exp); + return exp_value; +} + +bool get_incomp_mpu_list(xmlDocPtr doc, incomp_mpu_list_t& list) +{ + if(!doc){ + return false; + } + + xmlXPathContextPtr ctx = xmlXPathNewContext(doc);; + + string xmlnsurl; + string ex_upload = "//"; + string ex_key; + string ex_id; + string ex_date; + + if(!noxmlns && GetXmlNsUrl(doc, xmlnsurl)){ + xmlXPathRegisterNs(ctx, (xmlChar*)"s3", (xmlChar*)xmlnsurl.c_str()); + ex_upload += "s3:"; + ex_key += "s3:"; + ex_id += "s3:"; + ex_date += "s3:"; + } + ex_upload += "Upload"; + ex_key += "Key"; + ex_id += "UploadId"; + ex_date += "Initiated"; + + // get "Upload" Tags + xmlXPathObjectPtr upload_xp; + if(NULL == (upload_xp = xmlXPathEvalExpression((xmlChar*)ex_upload.c_str(), ctx))){ + S3FS_PRN_ERR("xmlXPathEvalExpression returns null."); + return false; + } + if(xmlXPathNodeSetIsEmpty(upload_xp->nodesetval)){ + S3FS_PRN_INFO("upload_xp->nodesetval is empty."); + S3FS_XMLXPATHFREEOBJECT(upload_xp); + S3FS_XMLXPATHFREECONTEXT(ctx); + return true; + } + + // Make list + int cnt; + xmlNodeSetPtr upload_nodes; + list.clear(); + for(cnt = 0, upload_nodes = upload_xp->nodesetval; cnt < upload_nodes->nodeNr; cnt++){ + ctx->node = upload_nodes->nodeTab[cnt]; + + INCOMP_MPU_INFO part; + xmlChar* ex_value; + + // search "Key" tag + if(NULL == (ex_value = get_exp_value_xml(doc, ctx, ex_key.c_str()))){ + continue; + } + if('/' != *((char*)ex_value)){ + part.key = "/"; + }else{ + part.key = ""; + } + part.key += (char*)ex_value; + S3FS_XMLFREE(ex_value); + + // search "UploadId" tag + if(NULL == (ex_value = get_exp_value_xml(doc, ctx, ex_id.c_str()))){ + continue; + } + part.id = (char*)ex_value; + S3FS_XMLFREE(ex_value); + + // search "Initiated" tag + if(NULL == (ex_value = get_exp_value_xml(doc, ctx, ex_date.c_str()))){ + continue; + } + part.date = (char*)ex_value; + S3FS_XMLFREE(ex_value); + + list.push_back(part); + } + + S3FS_XMLXPATHFREEOBJECT(upload_xp); + S3FS_XMLXPATHFREECONTEXT(ctx); + + return true; +} + +bool is_truncated(xmlDocPtr doc) +{ + bool result = false; + + xmlChar* strTruncate = get_base_exp(doc, "IsTruncated"); + if(!strTruncate){ + return false; + } + if(0 == strcasecmp((const char*)strTruncate, "true")){ + result = true; + } + xmlFree(strTruncate); + return result; +} + +int append_objects_from_xml_ex(const char* path, xmlDocPtr doc, xmlXPathContextPtr ctx, const char* ex_contents, const char* ex_key, const char* ex_etag, int isCPrefix, S3ObjList& head) +{ + xmlXPathObjectPtr contents_xp; + xmlNodeSetPtr content_nodes; + + if(NULL == (contents_xp = xmlXPathEvalExpression((xmlChar*)ex_contents, ctx))){ + S3FS_PRN_ERR("xmlXPathEvalExpression returns null."); + return -1; + } + if(xmlXPathNodeSetIsEmpty(contents_xp->nodesetval)){ + S3FS_PRN_DBG("contents_xp->nodesetval is empty."); + S3FS_XMLXPATHFREEOBJECT(contents_xp); + return 0; + } + content_nodes = contents_xp->nodesetval; + + bool is_dir; + string stretag; + int i; + for(i = 0; i < content_nodes->nodeNr; i++){ + ctx->node = content_nodes->nodeTab[i]; + + // object name + xmlXPathObjectPtr key; + if(NULL == (key = xmlXPathEvalExpression((xmlChar*)ex_key, ctx))){ + S3FS_PRN_WARN("key is null. but continue."); + continue; + } + if(xmlXPathNodeSetIsEmpty(key->nodesetval)){ + S3FS_PRN_WARN("node is empty. but continue."); + xmlXPathFreeObject(key); + continue; + } + xmlNodeSetPtr key_nodes = key->nodesetval; + char* name = get_object_name(doc, key_nodes->nodeTab[0]->xmlChildrenNode, path); + + if(!name){ + S3FS_PRN_WARN("name is something wrong. but continue."); + + }else if((const char*)name != c_strErrorObjectName){ + is_dir = isCPrefix ? true : false; + stretag = ""; + + if(!isCPrefix && ex_etag){ + // Get ETag + xmlXPathObjectPtr ETag; + if(NULL != (ETag = xmlXPathEvalExpression((xmlChar*)ex_etag, ctx))){ + if(xmlXPathNodeSetIsEmpty(ETag->nodesetval)){ + S3FS_PRN_INFO("ETag->nodesetval is empty."); + }else{ + xmlNodeSetPtr etag_nodes = ETag->nodesetval; + xmlChar* petag = xmlNodeListGetString(doc, etag_nodes->nodeTab[0]->xmlChildrenNode, 1); + if(petag){ + stretag = (char*)petag; + xmlFree(petag); + } + } + xmlXPathFreeObject(ETag); + } + } + if(!head.insert(name, (0 < stretag.length() ? stretag.c_str() : NULL), is_dir)){ + S3FS_PRN_ERR("insert_object returns with error."); + xmlXPathFreeObject(key); + xmlXPathFreeObject(contents_xp); + free(name); + S3FS_MALLOCTRIM(0); + return -1; + } + free(name); + }else{ + S3FS_PRN_DBG("name is file or subdir in dir. but continue."); + } + xmlXPathFreeObject(key); + } + S3FS_XMLXPATHFREEOBJECT(contents_xp); + + return 0; +} + +int append_objects_from_xml(const char* path, xmlDocPtr doc, S3ObjList& head) +{ + string xmlnsurl; + string ex_contents = "//"; + string ex_key; + string ex_cprefix = "//"; + string ex_prefix; + string ex_etag; + + if(!doc){ + return -1; + } + + // If there is not , use path instead of it. + xmlChar* pprefix = get_prefix(doc); + string prefix = (pprefix ? (char*)pprefix : path ? path : ""); + if(pprefix){ + xmlFree(pprefix); + } + + xmlXPathContextPtr ctx = xmlXPathNewContext(doc); + + if(!noxmlns && GetXmlNsUrl(doc, xmlnsurl)){ + xmlXPathRegisterNs(ctx, (xmlChar*)"s3", (xmlChar*)xmlnsurl.c_str()); + ex_contents+= "s3:"; + ex_key += "s3:"; + ex_cprefix += "s3:"; + ex_prefix += "s3:"; + ex_etag += "s3:"; + } + ex_contents+= "Contents"; + ex_key += "Key"; + ex_cprefix += "CommonPrefixes"; + ex_prefix += "Prefix"; + ex_etag += "ETag"; + + if(-1 == append_objects_from_xml_ex(prefix.c_str(), doc, ctx, ex_contents.c_str(), ex_key.c_str(), ex_etag.c_str(), 0, head) || + -1 == append_objects_from_xml_ex(prefix.c_str(), doc, ctx, ex_cprefix.c_str(), ex_prefix.c_str(), NULL, 1, head) ) + { + S3FS_PRN_ERR("append_objects_from_xml_ex returns with error."); + S3FS_XMLXPATHFREECONTEXT(ctx); + return -1; + } + S3FS_XMLXPATHFREECONTEXT(ctx); + + return 0; +} + +//------------------------------------------------------------------- +// Utility functions +//------------------------------------------------------------------- +bool simple_parse_xml(const char* data, size_t len, const char* key, std::string& value) +{ + bool result = false; + + if(!data || !key){ + return false; + } + value.clear(); + + xmlDocPtr doc; + if(NULL == (doc = xmlReadMemory(data, len, "", NULL, 0))){ + return false; + } + + if(NULL == doc->children){ + S3FS_XMLFREEDOC(doc); + return false; + } + for(xmlNodePtr cur_node = doc->children->children; NULL != cur_node; cur_node = cur_node->next){ + // For DEBUG + // string cur_node_name(reinterpret_cast(cur_node->name)); + // printf("cur_node_name: %s\n", cur_node_name.c_str()); + + if(XML_ELEMENT_NODE == cur_node->type){ + string elementName = reinterpret_cast(cur_node->name); + // For DEBUG + // printf("elementName: %s\n", elementName.c_str()); + + if(cur_node->children){ + if(XML_TEXT_NODE == cur_node->children->type){ + if(elementName == key) { + value = reinterpret_cast(cur_node->children->content); + result = true; + break; + } + } + } + } + } + S3FS_XMLFREEDOC(doc); + + return result; +} + +/* +* Local variables: +* tab-width: 4 +* c-basic-offset: 4 +* End: +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 +*/ diff --git a/src/s3fs_xml.h b/src/s3fs_xml.h new file mode 100644 index 0000000..d029720 --- /dev/null +++ b/src/s3fs_xml.h @@ -0,0 +1,53 @@ +/* + * s3fs - FUSE-based file system backed by Amazon S3 + * + * Copyright(C) 2007 Randy Rizun + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef S3FS_S3FS_XML_H_ +#define S3FS_S3FS_XML_H_ + +#include +#include +#include + +#include + +#include "s3objlist.h" +#include "mpu_util.h" + +//------------------------------------------------------------------- +// Functions +//------------------------------------------------------------------- +bool is_truncated(xmlDocPtr doc); +int append_objects_from_xml_ex(const char* path, xmlDocPtr doc, xmlXPathContextPtr ctx, const char* ex_contents, const char* ex_key, const char* ex_etag, int isCPrefix, S3ObjList& head); +int append_objects_from_xml(const char* path, xmlDocPtr doc, S3ObjList& head); +xmlChar* get_next_marker(xmlDocPtr doc); +bool get_incomp_mpu_list(xmlDocPtr doc, incomp_mpu_list_t& list); + +bool simple_parse_xml(const char* data, size_t len, const char* key, std::string& value); + +#endif // S3FS_S3FS_XML_H_ + +/* +* Local variables: +* tab-width: 4 +* c-basic-offset: 4 +* End: +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 +*/ diff --git a/src/s3objlist.cpp b/src/s3objlist.cpp new file mode 100644 index 0000000..28602a6 --- /dev/null +++ b/src/s3objlist.cpp @@ -0,0 +1,286 @@ +/* + * s3fs - FUSE-based file system backed by Amazon S3 + * + * Copyright(C) 2007 Takeshi Nakatani + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include +#include + +#include "common.h" +#include "s3fs.h" +#include "s3objlist.h" + +using namespace std; + +//------------------------------------------------------------------- +// Class S3ObjList +//------------------------------------------------------------------- +// New class S3ObjList is base on old s3_object struct. +// This class is for S3 compatible clients. +// +// If name is terminated by "/", it is forced dir type. +// If name is terminated by "_$folder$", it is forced dir type. +// If is_dir is true and name is not terminated by "/", the name is added "/". +// +bool S3ObjList::insert(const char* name, const char* etag, bool is_dir) +{ + if(!name || '\0' == name[0]){ + return false; + } + + s3obj_t::iterator iter; + string newname; + string orgname = name; + + // Normalization + string::size_type pos = orgname.find("_$folder$"); + if(string::npos != pos){ + newname = orgname.substr(0, pos); + is_dir = true; + }else{ + newname = orgname; + } + if(is_dir){ + if('/' != newname[newname.length() - 1]){ + newname += "/"; + } + }else{ + if('/' == newname[newname.length() - 1]){ + is_dir = true; + } + } + + // Check derived name object. + if(is_dir){ + string chkname = newname.substr(0, newname.length() - 1); + if(objects.end() != (iter = objects.find(chkname))){ + // found "dir" object --> remove it. + objects.erase(iter); + } + }else{ + string chkname = newname + "/"; + if(objects.end() != (iter = objects.find(chkname))){ + // found "dir/" object --> not add new object. + // and add normalization + return insert_normalized(orgname.c_str(), chkname.c_str(), true); + } + } + + // Add object + if(objects.end() != (iter = objects.find(newname))){ + // Found same object --> update information. + (*iter).second.normalname.erase(); + (*iter).second.orgname = orgname; + (*iter).second.is_dir = is_dir; + if(etag){ + (*iter).second.etag = string(etag); // over write + } + }else{ + // add new object + s3obj_entry newobject; + newobject.orgname = orgname; + newobject.is_dir = is_dir; + if(etag){ + newobject.etag = etag; + } + objects[newname] = newobject; + } + + // add normalization + return insert_normalized(orgname.c_str(), newname.c_str(), is_dir); +} + +bool S3ObjList::insert_normalized(const char* name, const char* normalized, bool is_dir) +{ + if(!name || '\0' == name[0] || !normalized || '\0' == normalized[0]){ + return false; + } + if(0 == strcmp(name, normalized)){ + return true; + } + + s3obj_t::iterator iter; + if(objects.end() != (iter = objects.find(name))){ + // found name --> over write + iter->second.orgname.erase(); + iter->second.etag.erase(); + iter->second.normalname = normalized; + iter->second.is_dir = is_dir; + }else{ + // not found --> add new object + s3obj_entry newobject; + newobject.normalname = normalized; + newobject.is_dir = is_dir; + objects[name] = newobject; + } + return true; +} + +const s3obj_entry* S3ObjList::GetS3Obj(const char* name) const +{ + s3obj_t::const_iterator iter; + + if(!name || '\0' == name[0]){ + return NULL; + } + if(objects.end() == (iter = objects.find(name))){ + return NULL; + } + return &((*iter).second); +} + +string S3ObjList::GetOrgName(const char* name) const +{ + const s3obj_entry* ps3obj; + + if(!name || '\0' == name[0]){ + return string(""); + } + if(NULL == (ps3obj = GetS3Obj(name))){ + return string(""); + } + return ps3obj->orgname; +} + +string S3ObjList::GetNormalizedName(const char* name) const +{ + const s3obj_entry* ps3obj; + + if(!name || '\0' == name[0]){ + return string(""); + } + if(NULL == (ps3obj = GetS3Obj(name))){ + return string(""); + } + if(0 == (ps3obj->normalname).length()){ + return string(name); + } + return ps3obj->normalname; +} + +string S3ObjList::GetETag(const char* name) const +{ + const s3obj_entry* ps3obj; + + if(!name || '\0' == name[0]){ + return string(""); + } + if(NULL == (ps3obj = GetS3Obj(name))){ + return string(""); + } + return ps3obj->etag; +} + +bool S3ObjList::IsDir(const char* name) const +{ + const s3obj_entry* ps3obj; + + if(NULL == (ps3obj = GetS3Obj(name))){ + return false; + } + return ps3obj->is_dir; +} + +bool S3ObjList::GetLastName(std::string& lastname) const +{ + bool result = false; + lastname = ""; + for(s3obj_t::const_iterator iter = objects.begin(); iter != objects.end(); ++iter){ + if((*iter).second.orgname.length()){ + if(0 > strcmp(lastname.c_str(), (*iter).second.orgname.c_str())){ + lastname = (*iter).second.orgname; + result = true; + } + }else{ + if(0 > strcmp(lastname.c_str(), (*iter).second.normalname.c_str())){ + lastname = (*iter).second.normalname; + result = true; + } + } + } + return result; +} + +bool S3ObjList::GetNameList(s3obj_list_t& list, bool OnlyNormalized, bool CutSlash) const +{ + s3obj_t::const_iterator iter; + + for(iter = objects.begin(); objects.end() != iter; ++iter){ + if(OnlyNormalized && 0 != (*iter).second.normalname.length()){ + continue; + } + string name = (*iter).first; + if(CutSlash && 1 < name.length() && '/' == name[name.length() - 1]){ + // only "/" string is skipped this. + name = name.substr(0, name.length() - 1); + } + list.push_back(name); + } + return true; +} + +typedef std::map s3obj_h_t; + +bool S3ObjList::MakeHierarchizedList(s3obj_list_t& list, bool haveSlash) +{ + s3obj_h_t h_map; + s3obj_h_t::iterator hiter; + s3obj_list_t::const_iterator liter; + + for(liter = list.begin(); list.end() != liter; ++liter){ + string strtmp = (*liter); + if(1 < strtmp.length() && '/' == strtmp[strtmp.length() - 1]){ + strtmp = strtmp.substr(0, strtmp.length() - 1); + } + h_map[strtmp] = true; + + // check hierarchized directory + for(string::size_type pos = strtmp.find_last_of('/'); string::npos != pos; pos = strtmp.find_last_of('/')){ + strtmp = strtmp.substr(0, pos); + if(0 == strtmp.length() || "/" == strtmp){ + break; + } + if(h_map.end() == h_map.find(strtmp)){ + // not found + h_map[strtmp] = false; + } + } + } + + // check map and add lost hierarchized directory. + for(hiter = h_map.begin(); hiter != h_map.end(); ++hiter){ + if(false == (*hiter).second){ + // add hierarchized directory. + string strtmp = (*hiter).first; + if(haveSlash){ + strtmp += "/"; + } + list.push_back(strtmp); + } + } + return true; +} + +/* +* Local variables: +* tab-width: 4 +* c-basic-offset: 4 +* End: +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 +*/ diff --git a/src/s3objlist.h b/src/s3objlist.h new file mode 100644 index 0000000..e80108f --- /dev/null +++ b/src/s3objlist.h @@ -0,0 +1,79 @@ +/* + * s3fs - FUSE-based file system backed by Amazon S3 + * + * Copyright(C) 2007 Randy Rizun + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef S3FS_S3OBJLIST_H_ +#define S3FS_S3OBJLIST_H_ + +//------------------------------------------------------------------- +// Structure / Typedef +//------------------------------------------------------------------- +struct s3obj_entry{ + std::string normalname; // normalized name: if empty, object is normalized name. + std::string orgname; // original name: if empty, object is original name. + std::string etag; + bool is_dir; + + s3obj_entry() : is_dir(false) {} +}; + +typedef std::map s3obj_t; +typedef std::list s3obj_list_t; + +//------------------------------------------------------------------- +// Class S3ObjList +//------------------------------------------------------------------- +class S3ObjList +{ + private: + s3obj_t objects; + + private: + bool insert_normalized(const char* name, const char* normalized, bool is_dir); + const s3obj_entry* GetS3Obj(const char* name) const; + + s3obj_t::const_iterator begin(void) const { return objects.begin(); } + s3obj_t::const_iterator end(void) const { return objects.end(); } + + public: + S3ObjList() {} + ~S3ObjList() {} + + bool IsEmpty(void) const { return objects.empty(); } + bool insert(const char* name, const char* etag = NULL, bool is_dir = false); + std::string GetOrgName(const char* name) const; + std::string GetNormalizedName(const char* name) const; + std::string GetETag(const char* name) const; + bool IsDir(const char* name) const; + bool GetNameList(s3obj_list_t& list, bool OnlyNormalized = true, bool CutSlash = true) const; + bool GetLastName(std::string& lastname) const; + + static bool MakeHierarchizedList(s3obj_list_t& list, bool haveSlash); +}; + +#endif // S3FS_S3OBJLIST_H_ + +/* +* Local variables: +* tab-width: 4 +* c-basic-offset: 4 +* End: +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 +*/ diff --git a/src/sighandlers.cpp b/src/sighandlers.cpp index 1961aaa..82ebf51 100644 --- a/src/sighandlers.cpp +++ b/src/sighandlers.cpp @@ -20,33 +20,16 @@ #include #include -#include -#include -#include -#include -#include #include - -#include -#include -#include -#include -#include +#include #include "common.h" +#include "s3fs.h" #include "sighandlers.h" -#include "curl.h" #include "fdcache.h" -#include "psemaphore.h" using namespace std; -//------------------------------------------------------------------- -// Global variables -//------------------------------------------------------------------- -s3fs_log_level debug_level = S3FS_LOG_CRIT; -const char* s3fs_log_nest[S3FS_LOG_NEST_MAX] = {"", " ", " ", " "}; - //------------------------------------------------------------------- // Class S3fsSignals //------------------------------------------------------------------- @@ -58,133 +41,133 @@ bool S3fsSignals::enableUsr1 = false; //------------------------------------------------------------------- bool S3fsSignals::Initialize() { - if(!S3fsSignals::pSingleton){ - S3fsSignals::pSingleton = new S3fsSignals; - } - return true; + if(!S3fsSignals::pSingleton){ + S3fsSignals::pSingleton = new S3fsSignals; + } + return true; } bool S3fsSignals::Destroy() { - if(S3fsSignals::pSingleton){ - delete S3fsSignals::pSingleton; - } - return true; + if(S3fsSignals::pSingleton){ + delete S3fsSignals::pSingleton; + } + return true; } void S3fsSignals::HandlerUSR1(int sig) { - if(SIGUSR1 != sig){ - S3FS_PRN_ERR("The handler for SIGUSR1 received signal(%d)", sig); - return; - } + if(SIGUSR1 != sig){ + S3FS_PRN_ERR("The handler for SIGUSR1 received signal(%d)", sig); + return; + } - S3fsSignals* pSigobj = S3fsSignals::get(); - if(!pSigobj){ - S3FS_PRN_ERR("S3fsSignals object is not initialized."); - return; - } + S3fsSignals* pSigobj = S3fsSignals::get(); + if(!pSigobj){ + S3FS_PRN_ERR("S3fsSignals object is not initialized."); + return; + } - if(!pSigobj->WakeupUsr1Thread()){ - S3FS_PRN_ERR("Failed to wakeup the thread for SIGUSR1."); - return; - } + if(!pSigobj->WakeupUsr1Thread()){ + S3FS_PRN_ERR("Failed to wakeup the thread for SIGUSR1."); + return; + } } bool S3fsSignals::SetUsr1Handler(const char* path) { - if(!FdManager::HaveLseekHole()){ - S3FS_PRN_ERR("Could not set SIGUSR1 for checking cache, because this system does not support SEEK_DATA/SEEK_HOLE in lseek function."); - return false; - } + if(!FdManager::HaveLseekHole()){ + S3FS_PRN_ERR("Could not set SIGUSR1 for checking cache, because this system does not support SEEK_DATA/SEEK_HOLE in lseek function."); + return false; + } - // set output file - if(!FdManager::SetCacheCheckOutput(path)){ - S3FS_PRN_ERR("Could not set output file(%s) for checking cache.", path ? path : "null(stdout)"); - return false; - } + // set output file + if(!FdManager::SetCacheCheckOutput(path)){ + S3FS_PRN_ERR("Could not set output file(%s) for checking cache.", path ? path : "null(stdout)"); + return false; + } - S3fsSignals::enableUsr1 = true; + S3fsSignals::enableUsr1 = true; - return true; + return true; } void* S3fsSignals::CheckCacheWorker(void* arg) { - Semaphore* pSem = static_cast(arg); - if(!pSem){ - pthread_exit(NULL); - } - if(!S3fsSignals::enableUsr1){ - pthread_exit(NULL); - } - - // wait and loop - while(S3fsSignals::enableUsr1){ - // wait - pSem->wait(); + Semaphore* pSem = static_cast(arg); + if(!pSem){ + pthread_exit(NULL); + } if(!S3fsSignals::enableUsr1){ - break; // assap + pthread_exit(NULL); } - // check all cache - if(!FdManager::get()->CheckAllCache()){ - S3FS_PRN_ERR("Processing failed due to some problem."); - } + // wait and loop + while(S3fsSignals::enableUsr1){ + // wait + pSem->wait(); + if(!S3fsSignals::enableUsr1){ + break; // assap + } - // do not allow request queuing - for(int value = pSem->get_value(); 0 < value; value = pSem->get_value()){ - pSem->wait(); + // check all cache + if(!FdManager::get()->CheckAllCache()){ + S3FS_PRN_ERR("Processing failed due to some problem."); + } + + // do not allow request queuing + for(int value = pSem->get_value(); 0 < value; value = pSem->get_value()){ + pSem->wait(); + } } - } - return NULL; + return NULL; } void S3fsSignals::HandlerUSR2(int sig) { - if(SIGUSR2 == sig){ - S3fsSignals::BumpupLogLevel(); - }else{ - S3FS_PRN_ERR("The handler for SIGUSR2 received signal(%d)", sig); - } + if(SIGUSR2 == sig){ + S3fsSignals::BumpupLogLevel(); + }else{ + S3FS_PRN_ERR("The handler for SIGUSR2 received signal(%d)", sig); + } } bool S3fsSignals::InitUsr2Handler() { - struct sigaction sa; + struct sigaction sa; - memset(&sa, 0, sizeof(struct sigaction)); - sa.sa_handler = S3fsSignals::HandlerUSR2; - sa.sa_flags = SA_RESTART; - if(0 != sigaction(SIGUSR2, &sa, NULL)){ - return false; - } - return true; + memset(&sa, 0, sizeof(struct sigaction)); + sa.sa_handler = S3fsSignals::HandlerUSR2; + sa.sa_flags = SA_RESTART; + if(0 != sigaction(SIGUSR2, &sa, NULL)){ + return false; + } + return true; } s3fs_log_level S3fsSignals::SetLogLevel(s3fs_log_level level) { - if(level == debug_level){ - return debug_level; - } - s3fs_log_level old = debug_level; - debug_level = level; - setlogmask(LOG_UPTO(S3FS_LOG_LEVEL_TO_SYSLOG(debug_level))); - S3FS_PRN_CRIT("change debug level from %sto %s", S3FS_LOG_LEVEL_STRING(old), S3FS_LOG_LEVEL_STRING(debug_level)); - return old; + if(level == debug_level){ + return debug_level; + } + s3fs_log_level old = debug_level; + debug_level = level; + setlogmask(LOG_UPTO(S3FS_LOG_LEVEL_TO_SYSLOG(debug_level))); + S3FS_PRN_CRIT("change debug level from %sto %s", S3FS_LOG_LEVEL_STRING(old), S3FS_LOG_LEVEL_STRING(debug_level)); + return old; } s3fs_log_level S3fsSignals::BumpupLogLevel() { - s3fs_log_level old = debug_level; - debug_level = ( S3FS_LOG_CRIT == debug_level ? S3FS_LOG_ERR : - S3FS_LOG_ERR == debug_level ? S3FS_LOG_WARN : - S3FS_LOG_WARN == debug_level ? S3FS_LOG_INFO : - S3FS_LOG_INFO == debug_level ? S3FS_LOG_DBG : - S3FS_LOG_CRIT ); - setlogmask(LOG_UPTO(S3FS_LOG_LEVEL_TO_SYSLOG(debug_level))); - S3FS_PRN_CRIT("change debug level from %sto %s", S3FS_LOG_LEVEL_STRING(old), S3FS_LOG_LEVEL_STRING(debug_level)); - return old; + s3fs_log_level old = debug_level; + debug_level = ( S3FS_LOG_CRIT == debug_level ? S3FS_LOG_ERR : + S3FS_LOG_ERR == debug_level ? S3FS_LOG_WARN : + S3FS_LOG_WARN == debug_level ? S3FS_LOG_INFO : + S3FS_LOG_INFO == debug_level ? S3FS_LOG_DBG : + S3FS_LOG_CRIT ); + setlogmask(LOG_UPTO(S3FS_LOG_LEVEL_TO_SYSLOG(debug_level))); + S3FS_PRN_CRIT("change debug level from %sto %s", S3FS_LOG_LEVEL_STRING(old), S3FS_LOG_LEVEL_STRING(debug_level)); + return old; } //------------------------------------------------------------------- @@ -192,100 +175,100 @@ s3fs_log_level S3fsSignals::BumpupLogLevel() //------------------------------------------------------------------- S3fsSignals::S3fsSignals() : pThreadUsr1(NULL), pSemUsr1(NULL) { - if(S3fsSignals::enableUsr1){ - if(!InitUsr1Handler()){ - S3FS_PRN_ERR("failed creating thread for SIGUSR1 handler, but continue..."); + if(S3fsSignals::enableUsr1){ + if(!InitUsr1Handler()){ + S3FS_PRN_ERR("failed creating thread for SIGUSR1 handler, but continue..."); + } + } + if(!S3fsSignals::InitUsr2Handler()){ + S3FS_PRN_ERR("failed to initialize SIGUSR2 handler for bumping log level, but continue..."); } - } - if(!S3fsSignals::InitUsr2Handler()){ - S3FS_PRN_ERR("failed to initialize SIGUSR2 handler for bumping log level, but continue..."); - } } S3fsSignals::~S3fsSignals() { - if(S3fsSignals::enableUsr1){ - if(!DestroyUsr1Handler()){ - S3FS_PRN_ERR("failed stopping thread for SIGUSR1 handler, but continue..."); + if(S3fsSignals::enableUsr1){ + if(!DestroyUsr1Handler()){ + S3FS_PRN_ERR("failed stopping thread for SIGUSR1 handler, but continue..."); + } } - } } bool S3fsSignals::InitUsr1Handler() { - if(pThreadUsr1 || pSemUsr1){ - S3FS_PRN_ERR("Already run thread for SIGUSR1"); - return false; - } + if(pThreadUsr1 || pSemUsr1){ + S3FS_PRN_ERR("Already run thread for SIGUSR1"); + return false; + } - // create thread - int result; - pSemUsr1 = new Semaphore(0); - pThreadUsr1 = new pthread_t; - if(0 != (result = pthread_create(pThreadUsr1, NULL, S3fsSignals::CheckCacheWorker, static_cast(pSemUsr1)))){ - S3FS_PRN_ERR("Could not create thread for SIGUSR1 by %d", result); - delete pSemUsr1; - delete pThreadUsr1; - pSemUsr1 = NULL; - pThreadUsr1 = NULL; - return false; - } + // create thread + int result; + pSemUsr1 = new Semaphore(0); + pThreadUsr1 = new pthread_t; + if(0 != (result = pthread_create(pThreadUsr1, NULL, S3fsSignals::CheckCacheWorker, static_cast(pSemUsr1)))){ + S3FS_PRN_ERR("Could not create thread for SIGUSR1 by %d", result); + delete pSemUsr1; + delete pThreadUsr1; + pSemUsr1 = NULL; + pThreadUsr1 = NULL; + return false; + } - // set handler - struct sigaction sa; - memset(&sa, 0, sizeof(struct sigaction)); - sa.sa_handler = S3fsSignals::HandlerUSR1; - sa.sa_flags = SA_RESTART; - if(0 != sigaction(SIGUSR1, &sa, NULL)){ - S3FS_PRN_ERR("Could not set signal handler for SIGUSR1"); - DestroyUsr1Handler(); - return false; - } + // set handler + struct sigaction sa; + memset(&sa, 0, sizeof(struct sigaction)); + sa.sa_handler = S3fsSignals::HandlerUSR1; + sa.sa_flags = SA_RESTART; + if(0 != sigaction(SIGUSR1, &sa, NULL)){ + S3FS_PRN_ERR("Could not set signal handler for SIGUSR1"); + DestroyUsr1Handler(); + return false; + } - return true; + return true; } bool S3fsSignals::DestroyUsr1Handler() { - if(!pThreadUsr1 || !pSemUsr1){ - return false; - } - // for thread exit - S3fsSignals::enableUsr1 = false; + if(!pThreadUsr1 || !pSemUsr1){ + return false; + } + // for thread exit + S3fsSignals::enableUsr1 = false; - // wakeup thread - pSemUsr1->post(); + // wakeup thread + pSemUsr1->post(); - // wait for thread exiting - void* retval = NULL; - int result; - if(0 != (result = pthread_join(*pThreadUsr1, &retval))){ - S3FS_PRN_ERR("Could not stop thread for SIGUSR1 by %d", result); - return false; - } - delete pSemUsr1; - delete pThreadUsr1; - pSemUsr1 = NULL; - pThreadUsr1 = NULL; + // wait for thread exiting + void* retval = NULL; + int result; + if(0 != (result = pthread_join(*pThreadUsr1, &retval))){ + S3FS_PRN_ERR("Could not stop thread for SIGUSR1 by %d", result); + return false; + } + delete pSemUsr1; + delete pThreadUsr1; + pSemUsr1 = NULL; + pThreadUsr1 = NULL; - return true; + return true; } bool S3fsSignals::WakeupUsr1Thread() { - if(!pThreadUsr1 || !pSemUsr1){ - S3FS_PRN_ERR("The thread for SIGUSR1 is not setup."); - return false; - } - pSemUsr1->post(); - return true; + if(!pThreadUsr1 || !pSemUsr1){ + S3FS_PRN_ERR("The thread for SIGUSR1 is not setup."); + return false; + } + pSemUsr1->post(); + return true; } /* * Local variables: -* tab-width: 2 -* c-basic-offset: 2 +* tab-width: 4 +* c-basic-offset: 4 * End: -* vim600: expandtab sw=2 ts=2 fdm=marker -* vim<600: expandtab sw=2 ts=2 +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 */ diff --git a/src/sighandlers.h b/src/sighandlers.h index c071245..cd3c066 100644 --- a/src/sighandlers.h +++ b/src/sighandlers.h @@ -28,46 +28,46 @@ //---------------------------------------------- class S3fsSignals { - private: - static S3fsSignals* pSingleton; - static bool enableUsr1; + private: + static S3fsSignals* pSingleton; + static bool enableUsr1; - pthread_t* pThreadUsr1; - Semaphore* pSemUsr1; + pthread_t* pThreadUsr1; + Semaphore* pSemUsr1; - protected: - static S3fsSignals* get(void) { return pSingleton; } + protected: + static S3fsSignals* get(void) { return pSingleton; } - static void HandlerUSR1(int sig); - static void* CheckCacheWorker(void* arg); + static void HandlerUSR1(int sig); + static void* CheckCacheWorker(void* arg); - static void HandlerUSR2(int sig); - static bool InitUsr2Handler(void); + static void HandlerUSR2(int sig); + static bool InitUsr2Handler(void); - S3fsSignals(); - ~S3fsSignals(); + S3fsSignals(); + ~S3fsSignals(); - bool InitUsr1Handler(void); - bool DestroyUsr1Handler(void); - bool WakeupUsr1Thread(void); + bool InitUsr1Handler(void); + bool DestroyUsr1Handler(void); + bool WakeupUsr1Thread(void); - public: - static bool Initialize(void); - static bool Destroy(void); + public: + static bool Initialize(void); + static bool Destroy(void); - static bool SetUsr1Handler(const char* path); + static bool SetUsr1Handler(const char* path); - static s3fs_log_level SetLogLevel(s3fs_log_level level); - static s3fs_log_level BumpupLogLevel(void); + static s3fs_log_level SetLogLevel(s3fs_log_level level); + static s3fs_log_level BumpupLogLevel(void); }; #endif // S3FS_SIGHANDLERS_H_ /* * Local variables: -* tab-width: 2 -* c-basic-offset: 2 +* tab-width: 4 +* c-basic-offset: 4 * End: -* vim600: expandtab sw=2 ts=2 fdm=marker -* vim<600: expandtab sw=2 ts=2 +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 */ diff --git a/src/string_util.cpp b/src/string_util.cpp index 2ea0455..b6dd742 100644 --- a/src/string_util.cpp +++ b/src/string_util.cpp @@ -17,28 +17,35 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ -#include -#include + #include #include #include -#include -#include +#include +#include #include #include -#include -#include #include "common.h" +#include "s3fs.h" #include "string_util.h" using namespace std; -template std::string str(T value) { - std::ostringstream s; - s << value; - return s.str(); +//------------------------------------------------------------------- +// Gloval variables +//------------------------------------------------------------------- +const std::string SPACES = " \t\r\n"; + +//------------------------------------------------------------------- +// Templates +//------------------------------------------------------------------- +template std::string str(T value) +{ + std::ostringstream s; + s << value; + return s.str(); } template std::string str(short value); @@ -50,22 +57,25 @@ template std::string str(unsigned long value); template std::string str(long long value); template std::string str(unsigned long long value); +//------------------------------------------------------------------- +// Functions +//------------------------------------------------------------------- static const char hexAlphabet[] = "0123456789ABCDEF"; // replacement for C++11 std::stoll off_t s3fs_strtoofft(const char* str, int base) { - errno = 0; - char *temp; - long long result = strtoll(str, &temp, base); + errno = 0; + char *temp; + long long result = strtoll(str, &temp, base); - if(temp == str || *temp != '\0'){ - throw std::invalid_argument("s3fs_strtoofft"); - } - if((result == LLONG_MIN || result == LLONG_MAX) && errno == ERANGE){ - throw std::out_of_range("s3fs_strtoofft"); - } - return result; + if(temp == str || *temp != '\0'){ + throw std::invalid_argument("s3fs_strtoofft"); + } + if((result == LLONG_MIN || result == LLONG_MAX) && errno == ERANGE){ + throw std::out_of_range("s3fs_strtoofft"); + } + return result; } // wrapped s3fs_strtoofft() @@ -74,18 +84,18 @@ off_t s3fs_strtoofft(const char* str, int base) // bool try_strtoofft(const char* str, off_t& value, int base) { - if(str){ - try{ - value = s3fs_strtoofft(str, base); - }catch(std::exception &e){ - S3FS_PRN_WARN("something error is occurred in convert string(%s) to off_t.", str); - return false; + if(str){ + try{ + value = s3fs_strtoofft(str, base); + }catch(std::exception &e){ + S3FS_PRN_WARN("something error is occurred in convert string(%s) to off_t.", str); + return false; + } + }else{ + S3FS_PRN_WARN("parameter string is null."); + return false; } - }else{ - S3FS_PRN_WARN("parameter string is null."); - return false; - } - return true; + return true; } // wrapped try_strtoofft -> s3fs_strtoofft() @@ -95,144 +105,146 @@ bool try_strtoofft(const char* str, off_t& value, int base) // off_t cvt_strtoofft(const char* str, int base) { - off_t result = 0; - if(!try_strtoofft(str, result, base)){ - S3FS_PRN_WARN("something error is occurred in convert string(%s) to off_t, thus return 0 as default.", (str ? str : "null")); - return 0; - } - return result; + off_t result = 0; + if(!try_strtoofft(str, result, base)){ + S3FS_PRN_WARN("something error is occurred in convert string(%s) to off_t, thus return 0 as default.", (str ? str : "null")); + return 0; + } + return result; } string lower(string s) { - // change each character of the string to lower case - for(size_t i = 0; i < s.length(); i++){ - s[i] = tolower(s[i]); - } - return s; + // change each character of the string to lower case + for(size_t i = 0; i < s.length(); i++){ + s[i] = tolower(s[i]); + } + return s; } string trim_left(const string &s, const string &t /* = SPACES */) { - string d(s); - return d.erase(0, s.find_first_not_of(t)); + string d(s); + return d.erase(0, s.find_first_not_of(t)); } string trim_right(const string &s, const string &t /* = SPACES */) { - string d(s); - string::size_type i(d.find_last_not_of(t)); - if(i == string::npos){ - return ""; - }else{ - return d.erase(d.find_last_not_of(t) + 1); - } + string d(s); + string::size_type i(d.find_last_not_of(t)); + if(i == string::npos){ + return ""; + }else{ + return d.erase(d.find_last_not_of(t) + 1); + } } string trim(const string &s, const string &t /* = SPACES */) { - return trim_left(trim_right(s, t), t); + return trim_left(trim_right(s, t), t); } -/** - * urlEncode a fuse path, - * taking into special consideration "/", - * otherwise regular urlEncode. - */ +// +// urlEncode a fuse path, +// taking into special consideration "/", +// otherwise regular urlEncode. +// string urlEncode(const string &s) { - string result; - for (size_t i = 0; i < s.length(); ++i) { - char c = s[i]; - if (c == '/' // Note- special case for fuse paths... - || c == '.' - || c == '-' - || c == '_' - || c == '~' - || (c >= 'a' && c <= 'z') - || (c >= 'A' && c <= 'Z') - || (c >= '0' && c <= '9')) { - result += c; - } else { - result += "%"; - result += hexAlphabet[static_cast(c) / 16]; - result += hexAlphabet[static_cast(c) % 16]; + string result; + for (size_t i = 0; i < s.length(); ++i) { + char c = s[i]; + if (c == '/' // Note- special case for fuse paths... + || c == '.' + || c == '-' + || c == '_' + || c == '~' + || (c >= 'a' && c <= 'z') + || (c >= 'A' && c <= 'Z') + || (c >= '0' && c <= '9')) + { + result += c; + }else{ + result += "%"; + result += hexAlphabet[static_cast(c) / 16]; + result += hexAlphabet[static_cast(c) % 16]; + } } - } - return result; + return result; } -/** - * urlEncode a fuse path, - * taking into special consideration "/", - * otherwise regular urlEncode. - */ +// +// urlEncode a fuse path, +// taking into special consideration "/", +// otherwise regular urlEncode. +// string urlEncode2(const string &s) { - string result; - for (size_t i = 0; i < s.length(); ++i) { - char c = s[i]; - if (c == '=' // Note- special case for fuse paths... - || c == '&' // Note- special case for s3... - || c == '%' - || c == '.' - || c == '-' - || c == '_' - || c == '~' - || (c >= 'a' && c <= 'z') - || (c >= 'A' && c <= 'Z') - || (c >= '0' && c <= '9')) { - result += c; - } else { - result += "%"; - result += hexAlphabet[static_cast(c) / 16]; - result += hexAlphabet[static_cast(c) % 16]; + string result; + for (size_t i = 0; i < s.length(); ++i) { + char c = s[i]; + if (c == '=' // Note- special case for fuse paths... + || c == '&' // Note- special case for s3... + || c == '%' + || c == '.' + || c == '-' + || c == '_' + || c == '~' + || (c >= 'a' && c <= 'z') + || (c >= 'A' && c <= 'Z') + || (c >= '0' && c <= '9')) + { + result += c; + }else{ + result += "%"; + result += hexAlphabet[static_cast(c) / 16]; + result += hexAlphabet[static_cast(c) % 16]; + } } - } - return result; + return result; } string urlDecode(const string& s) { - string result; - for(size_t i = 0; i < s.length(); ++i){ - if(s[i] != '%'){ - result += s[i]; - }else{ - int ch = 0; - if(s.length() <= ++i){ - break; // wrong format. - } - ch += ('0' <= s[i] && s[i] <= '9') ? (s[i] - '0') : ('A' <= s[i] && s[i] <= 'F') ? (s[i] - 'A' + 0x0a) : ('a' <= s[i] && s[i] <= 'f') ? (s[i] - 'a' + 0x0a) : 0x00; - if(s.length() <= ++i){ - break; // wrong format. - } - ch *= 16; - ch += ('0' <= s[i] && s[i] <= '9') ? (s[i] - '0') : ('A' <= s[i] && s[i] <= 'F') ? (s[i] - 'A' + 0x0a) : ('a' <= s[i] && s[i] <= 'f') ? (s[i] - 'a' + 0x0a) : 0x00; - result += static_cast(ch); + string result; + for(size_t i = 0; i < s.length(); ++i){ + if(s[i] != '%'){ + result += s[i]; + }else{ + int ch = 0; + if(s.length() <= ++i){ + break; // wrong format. + } + ch += ('0' <= s[i] && s[i] <= '9') ? (s[i] - '0') : ('A' <= s[i] && s[i] <= 'F') ? (s[i] - 'A' + 0x0a) : ('a' <= s[i] && s[i] <= 'f') ? (s[i] - 'a' + 0x0a) : 0x00; + if(s.length() <= ++i){ + break; // wrong format. + } + ch *= 16; + ch += ('0' <= s[i] && s[i] <= '9') ? (s[i] - '0') : ('A' <= s[i] && s[i] <= 'F') ? (s[i] - 'A' + 0x0a) : ('a' <= s[i] && s[i] <= 'f') ? (s[i] - 'a' + 0x0a) : 0x00; + result += static_cast(ch); + } } - } - return result; + return result; } bool takeout_str_dquart(string& str) { - size_t pos; + size_t pos; - // '"' for start - if(string::npos != (pos = str.find_first_of('\"'))){ - str = str.substr(pos + 1); + // '"' for start + if(string::npos != (pos = str.find_first_of('\"'))){ + str = str.substr(pos + 1); - // '"' for end - if(string::npos == (pos = str.find_last_of('\"'))){ - return false; + // '"' for end + if(string::npos == (pos = str.find_last_of('\"'))){ + return false; + } + str = str.substr(0, pos); + if(string::npos != str.find_first_of('\"')){ + return false; + } } - str = str.substr(0, pos); - if(string::npos != str.find_first_of('\"')){ - return false; - } - } - return true; + return true; } // @@ -240,77 +252,77 @@ bool takeout_str_dquart(string& str) // bool get_keyword_value(string& target, const char* keyword, string& value) { - if(!keyword){ - return false; - } - size_t spos; - size_t epos; - if(string::npos == (spos = target.find(keyword))){ - return false; - } - spos += strlen(keyword); - if('=' != target.at(spos)){ - return false; - } - spos++; - if(string::npos == (epos = target.find('&', spos))){ - value = target.substr(spos); - }else{ - value = target.substr(spos, (epos - spos)); - } - return true; + if(!keyword){ + return false; + } + size_t spos; + size_t epos; + if(string::npos == (spos = target.find(keyword))){ + return false; + } + spos += strlen(keyword); + if('=' != target.at(spos)){ + return false; + } + spos++; + if(string::npos == (epos = target.find('&', spos))){ + value = target.substr(spos); + }else{ + value = target.substr(spos, (epos - spos)); + } + return true; } -/** - * Returns the current date - * in a format suitable for a HTTP request header. - */ +// +// Returns the current date +// in a format suitable for a HTTP request header. +// string get_date_rfc850() { - char buf[100]; - time_t t = time(NULL); - struct tm res; - strftime(buf, sizeof(buf), "%a, %d %b %Y %H:%M:%S GMT", gmtime_r(&t, &res)); - return buf; + char buf[100]; + time_t t = time(NULL); + struct tm res; + strftime(buf, sizeof(buf), "%a, %d %b %Y %H:%M:%S GMT", gmtime_r(&t, &res)); + return buf; } void get_date_sigv3(string& date, string& date8601) { - time_t tm = time(NULL); - date = get_date_string(tm); - date8601 = get_date_iso8601(tm); + time_t tm = time(NULL); + date = get_date_string(tm); + date8601 = get_date_iso8601(tm); } string get_date_string(time_t tm) { - char buf[100]; - struct tm res; - strftime(buf, sizeof(buf), "%Y%m%d", gmtime_r(&tm, &res)); - return buf; + char buf[100]; + struct tm res; + strftime(buf, sizeof(buf), "%Y%m%d", gmtime_r(&tm, &res)); + return buf; } string get_date_iso8601(time_t tm) { - char buf[100]; - struct tm res; - strftime(buf, sizeof(buf), "%Y%m%dT%H%M%SZ", gmtime_r(&tm, &res)); - return buf; + char buf[100]; + struct tm res; + strftime(buf, sizeof(buf), "%Y%m%dT%H%M%SZ", gmtime_r(&tm, &res)); + return buf; } bool get_unixtime_from_iso8601(const char* pdate, time_t& unixtime) { - if(!pdate){ - return false; - } + if(!pdate){ + return false; + } - struct tm tm; - char* prest = strptime(pdate, "%Y-%m-%dT%T", &tm); - if(prest == pdate){ - // wrong format - return false; - } - unixtime = mktime(&tm); - return true; + struct tm tm; + char* prest = strptime(pdate, "%Y-%m-%dT%T", &tm); + if(prest == pdate){ + // wrong format + return false; + } + unixtime = mktime(&tm); + return true; } // @@ -319,155 +331,155 @@ bool get_unixtime_from_iso8601(const char* pdate, time_t& unixtime) // bool convert_unixtime_from_option_arg(const char* argv, time_t& unixtime) { - if(!argv){ - return false; - } - unixtime = 0; - const char* ptmp; - int last_unit_type = 0; // unit flag. - bool is_last_number; - time_t tmptime; - for(ptmp = argv, is_last_number = true, tmptime = 0; ptmp && *ptmp; ++ptmp){ - if('0' <= *ptmp && *ptmp <= '9'){ - tmptime *= 10; - tmptime += static_cast(*ptmp - '0'); - is_last_number = true; - }else if(is_last_number){ - if('Y' == *ptmp && 1 > last_unit_type){ - unixtime += (tmptime * (60 * 60 * 24 * 365)); // average 365 day / year - last_unit_type = 1; - }else if('M' == *ptmp && 2 > last_unit_type){ - unixtime += (tmptime * (60 * 60 * 24 * 30)); // average 30 day / month - last_unit_type = 2; - }else if('D' == *ptmp && 3 > last_unit_type){ - unixtime += (tmptime * (60 * 60 * 24)); - last_unit_type = 3; - }else if('h' == *ptmp && 4 > last_unit_type){ - unixtime += (tmptime * (60 * 60)); - last_unit_type = 4; - }else if('m' == *ptmp && 5 > last_unit_type){ - unixtime += (tmptime * 60); - last_unit_type = 5; - }else if('s' == *ptmp && 6 > last_unit_type){ - unixtime += tmptime; - last_unit_type = 6; - }else{ - return false; - } - tmptime = 0; - is_last_number = false; - }else{ + if(!argv){ return false; } - } - if(is_last_number){ - return false; - } - return true; + unixtime = 0; + const char* ptmp; + int last_unit_type = 0; // unit flag. + bool is_last_number; + time_t tmptime; + for(ptmp = argv, is_last_number = true, tmptime = 0; ptmp && *ptmp; ++ptmp){ + if('0' <= *ptmp && *ptmp <= '9'){ + tmptime *= 10; + tmptime += static_cast(*ptmp - '0'); + is_last_number = true; + }else if(is_last_number){ + if('Y' == *ptmp && 1 > last_unit_type){ + unixtime += (tmptime * (60 * 60 * 24 * 365)); // average 365 day / year + last_unit_type = 1; + }else if('M' == *ptmp && 2 > last_unit_type){ + unixtime += (tmptime * (60 * 60 * 24 * 30)); // average 30 day / month + last_unit_type = 2; + }else if('D' == *ptmp && 3 > last_unit_type){ + unixtime += (tmptime * (60 * 60 * 24)); + last_unit_type = 3; + }else if('h' == *ptmp && 4 > last_unit_type){ + unixtime += (tmptime * (60 * 60)); + last_unit_type = 4; + }else if('m' == *ptmp && 5 > last_unit_type){ + unixtime += (tmptime * 60); + last_unit_type = 5; + }else if('s' == *ptmp && 6 > last_unit_type){ + unixtime += tmptime; + last_unit_type = 6; + }else{ + return false; + } + tmptime = 0; + is_last_number = false; + }else{ + return false; + } + } + if(is_last_number){ + return false; + } + return true; } std::string s3fs_hex(const unsigned char* input, size_t length) { - std::string hex; - for(size_t pos = 0; pos < length; ++pos){ - char hexbuf[3]; - snprintf(hexbuf, 3, "%02x", input[pos]); - hex += hexbuf; - } - return hex; + std::string hex; + for(size_t pos = 0; pos < length; ++pos){ + char hexbuf[3]; + snprintf(hexbuf, 3, "%02x", input[pos]); + hex += hexbuf; + } + return hex; } char* s3fs_base64(const unsigned char* input, size_t length) { - static const char* base = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/="; - char* result; + static const char* base = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/="; + char* result; - if(!input || 0 == length){ - return NULL; - } - result = new char[((length / 3) + 1) * 4 + 1]; + if(!input || 0 == length){ + return NULL; + } + result = new char[((length / 3) + 1) * 4 + 1]; - unsigned char parts[4]; - size_t rpos; - size_t wpos; - for(rpos = 0, wpos = 0; rpos < length; rpos += 3){ - parts[0] = (input[rpos] & 0xfc) >> 2; - parts[1] = ((input[rpos] & 0x03) << 4) | ((((rpos + 1) < length ? input[rpos + 1] : 0x00) & 0xf0) >> 4); - parts[2] = (rpos + 1) < length ? (((input[rpos + 1] & 0x0f) << 2) | ((((rpos + 2) < length ? input[rpos + 2] : 0x00) & 0xc0) >> 6)) : 0x40; - parts[3] = (rpos + 2) < length ? (input[rpos + 2] & 0x3f) : 0x40; + unsigned char parts[4]; + size_t rpos; + size_t wpos; + for(rpos = 0, wpos = 0; rpos < length; rpos += 3){ + parts[0] = (input[rpos] & 0xfc) >> 2; + parts[1] = ((input[rpos] & 0x03) << 4) | ((((rpos + 1) < length ? input[rpos + 1] : 0x00) & 0xf0) >> 4); + parts[2] = (rpos + 1) < length ? (((input[rpos + 1] & 0x0f) << 2) | ((((rpos + 2) < length ? input[rpos + 2] : 0x00) & 0xc0) >> 6)) : 0x40; + parts[3] = (rpos + 2) < length ? (input[rpos + 2] & 0x3f) : 0x40; - result[wpos++] = base[parts[0]]; - result[wpos++] = base[parts[1]]; - result[wpos++] = base[parts[2]]; - result[wpos++] = base[parts[3]]; - } - result[wpos] = '\0'; + result[wpos++] = base[parts[0]]; + result[wpos++] = base[parts[1]]; + result[wpos++] = base[parts[2]]; + result[wpos++] = base[parts[3]]; + } + result[wpos] = '\0'; - return result; + return result; } inline unsigned char char_decode64(const char ch) { - unsigned char by; - if('A' <= ch && ch <= 'Z'){ // A - Z - by = static_cast(ch - 'A'); - }else if('a' <= ch && ch <= 'z'){ // a - z - by = static_cast(ch - 'a' + 26); - }else if('0' <= ch && ch <= '9'){ // 0 - 9 - by = static_cast(ch - '0' + 52); - }else if('+' == ch){ // + - by = 62; - }else if('/' == ch){ // / - by = 63; - }else if('=' == ch){ // = - by = 64; - }else{ // something wrong - by = UCHAR_MAX; - } - return by; + unsigned char by; + if('A' <= ch && ch <= 'Z'){ // A - Z + by = static_cast(ch - 'A'); + }else if('a' <= ch && ch <= 'z'){ // a - z + by = static_cast(ch - 'a' + 26); + }else if('0' <= ch && ch <= '9'){ // 0 - 9 + by = static_cast(ch - '0' + 52); + }else if('+' == ch){ // + + by = 62; + }else if('/' == ch){ // / + by = 63; + }else if('=' == ch){ // = + by = 64; + }else{ // something wrong + by = UCHAR_MAX; + } + return by; } unsigned char* s3fs_decode64(const char* input, size_t* plength) { - unsigned char* result; - if(!input || 0 == strlen(input) || !plength){ - return NULL; - } - result = new unsigned char[strlen(input) + 1]; - - unsigned char parts[4]; - size_t input_len = strlen(input); - size_t rpos; - size_t wpos; - for(rpos = 0, wpos = 0; rpos < input_len; rpos += 4){ - parts[0] = char_decode64(input[rpos]); - parts[1] = (rpos + 1) < input_len ? char_decode64(input[rpos + 1]) : 64; - parts[2] = (rpos + 2) < input_len ? char_decode64(input[rpos + 2]) : 64; - parts[3] = (rpos + 3) < input_len ? char_decode64(input[rpos + 3]) : 64; - - result[wpos++] = ((parts[0] << 2) & 0xfc) | ((parts[1] >> 4) & 0x03); - if(64 == parts[2]){ - break; + unsigned char* result; + if(!input || 0 == strlen(input) || !plength){ + return NULL; } - result[wpos++] = ((parts[1] << 4) & 0xf0) | ((parts[2] >> 2) & 0x0f); - if(64 == parts[3]){ - break; + result = new unsigned char[strlen(input) + 1]; + + unsigned char parts[4]; + size_t input_len = strlen(input); + size_t rpos; + size_t wpos; + for(rpos = 0, wpos = 0; rpos < input_len; rpos += 4){ + parts[0] = char_decode64(input[rpos]); + parts[1] = (rpos + 1) < input_len ? char_decode64(input[rpos + 1]) : 64; + parts[2] = (rpos + 2) < input_len ? char_decode64(input[rpos + 2]) : 64; + parts[3] = (rpos + 3) < input_len ? char_decode64(input[rpos + 3]) : 64; + + result[wpos++] = ((parts[0] << 2) & 0xfc) | ((parts[1] >> 4) & 0x03); + if(64 == parts[2]){ + break; + } + result[wpos++] = ((parts[1] << 4) & 0xf0) | ((parts[2] >> 2) & 0x0f); + if(64 == parts[3]){ + break; + } + result[wpos++] = ((parts[2] << 6) & 0xc0) | (parts[3] & 0x3f); } - result[wpos++] = ((parts[2] << 6) & 0xc0) | (parts[3] & 0x3f); - } - result[wpos] = '\0'; - *plength = wpos; - return result; + result[wpos] = '\0'; + *plength = wpos; + return result; } -/* - * detect and rewrite invalid utf8. We take invalid bytes - * and encode them into a private region of the unicode - * space. This is sometimes known as wtf8, wobbly transformation format. - * it is necessary because S3 validates the utf8 used for identifiers for - * correctness, while some clients may provide invalid utf, notably - * windows using cp1252. - */ +// +// detect and rewrite invalid utf8. We take invalid bytes +// and encode them into a private region of the unicode +// space. This is sometimes known as wtf8, wobbly transformation format. +// it is necessary because S3 validates the utf8 used for identifiers for +// correctness, while some clients may provide invalid utf, notably +// windows using cp1252. +// // Base location for transform. The range 0xE000 - 0xF8ff // is a private range, se use the start of this range. @@ -477,123 +489,122 @@ static unsigned int escape_base = 0xe000; // 'result' can be null. returns true if transform was needed. bool s3fs_wtf8_encode(const char *s, string *result) { - bool invalid = false; + bool invalid = false; - // Pass valid utf8 code through - for (; *s; s++) { - const unsigned char c = *s; + // Pass valid utf8 code through + for (; *s; s++) { + const unsigned char c = *s; - // single byte encoding - if (c <= 0x7f) { - if (result) { - *result += c; - } - continue; - } + // single byte encoding + if (c <= 0x7f) { + if (result) { + *result += c; + } + continue; + } - // otherwise, it must be one of the valid start bytes - if ( c >= 0xc2 && c <= 0xf5 ) { - - // two byte encoding - // don't need bounds check, string is zero terminated - if ((c & 0xe0) == 0xc0 && (s[1] & 0xc0) == 0x80) { - // all two byte encodings starting higher than c1 are valid + // otherwise, it must be one of the valid start bytes + if ( c >= 0xc2 && c <= 0xf5 ) { + // two byte encoding + // don't need bounds check, string is zero terminated + if ((c & 0xe0) == 0xc0 && (s[1] & 0xc0) == 0x80) { + // all two byte encodings starting higher than c1 are valid + if (result) { + *result += c; + *result += *(++s); + } + continue; + } + // three byte encoding + if ((c & 0xf0) == 0xe0 && (s[1] & 0xc0) == 0x80 && (s[2] & 0xc0) == 0x80) { + const unsigned code = ((c & 0x0f) << 12) | ((s[1] & 0x3f) << 6) | (s[2] & 0x3f); + if (code >= 0x800 && ! (code >= 0xd800 && code <= 0xd8ff)) { + // not overlong and not a surrogate pair + if (result) { + *result += c; + *result += *(++s); + *result += *(++s); + } + continue; + } + } + // four byte encoding + if ((c & 0xf8) == 0xf0 && (s[1] & 0xc0) == 0x80 && (s[2] & 0xc0) == 0x80 && (s[3] & 0xc0) == 0x80) { + const unsigned code = ((c & 0x07) << 18) | ((s[1] & 0x3f) << 12) | ((s[2] & 0x3f) << 6) | (s[3] & 0x3f); + if (code >= 0x10000 && code <= 0x10ffff) { + // not overlong and in defined unicode space + if (result) { + *result += c; + *result += *(++s); + *result += *(++s); + *result += *(++s); + } + continue; + } + } + } + // printf("invalid %02x at %d\n", c, i); + // Invalid utf8 code. Convert it to a private two byte area of unicode + // e.g. the e000 - f8ff area. This will be a three byte encoding + invalid = true; if (result) { - *result += c; - *result += *(++s); + unsigned escape = escape_base + c; + *result += static_cast(0xe0 | ((escape >> 12) & 0x0f)); + *result += static_cast(0x80 | ((escape >> 06) & 0x3f)); + *result += static_cast(0x80 | ((escape >> 00) & 0x3f)); } - continue; - } - // three byte encoding - if ((c & 0xf0) == 0xe0 && (s[1] & 0xc0) == 0x80 && (s[2] & 0xc0) == 0x80) { - const unsigned code = ((c & 0x0f) << 12) | ((s[1] & 0x3f) << 6) | (s[2] & 0x3f); - if (code >= 0x800 && ! (code >= 0xd800 && code <= 0xd8ff)) { - // not overlong and not a surrogate pair - if (result) { - *result += c; - *result += *(++s); - *result += *(++s); - } - continue; - } - } - // four byte encoding - if ((c & 0xf8) == 0xf0 && (s[1] & 0xc0) == 0x80 && (s[2] & 0xc0) == 0x80 && (s[3] & 0xc0) == 0x80) { - const unsigned code = ((c & 0x07) << 18) | ((s[1] & 0x3f) << 12) | ((s[2] & 0x3f) << 6) | (s[3] & 0x3f); - if (code >= 0x10000 && code <= 0x10ffff) { - // not overlong and in defined unicode space - if (result) { - *result += c; - *result += *(++s); - *result += *(++s); - *result += *(++s); - } - continue; - } - } } - // printf("invalid %02x at %d\n", c, i); - // Invalid utf8 code. Convert it to a private two byte area of unicode - // e.g. the e000 - f8ff area. This will be a three byte encoding - invalid = true; - if (result) { - unsigned escape = escape_base + c; - *result += static_cast(0xe0 | ((escape >> 12) & 0x0f)); - *result += static_cast(0x80 | ((escape >> 06) & 0x3f)); - *result += static_cast(0x80 | ((escape >> 00) & 0x3f)); - } - } - return invalid; + return invalid; } string s3fs_wtf8_encode(const string &s) { - string result; - s3fs_wtf8_encode(s.c_str(), &result); - return result; + string result; + s3fs_wtf8_encode(s.c_str(), &result); + return result; } // The reverse operation, turn encoded bytes back into their original values // The code assumes that we map to a three-byte code point. bool s3fs_wtf8_decode(const char *s, string *result) { - bool encoded = false; - for (; *s; s++) { - unsigned char c = *s; - // look for a three byte tuple matching our encoding code - if ((c & 0xf0) == 0xe0 && (s[1] & 0xc0) == 0x80 && (s[2] & 0xc0) == 0x80) { - unsigned code = (c & 0x0f) << 12; - code |= (s[1] & 0x3f) << 6; - code |= (s[2] & 0x3f) << 0; - if (code >= escape_base && code <= escape_base + 0xff) { - // convert back - encoded = true; - if(result){ - *result += static_cast(code - escape_base); + bool encoded = false; + for (; *s; s++) { + unsigned char c = *s; + // look for a three byte tuple matching our encoding code + if ((c & 0xf0) == 0xe0 && (s[1] & 0xc0) == 0x80 && (s[2] & 0xc0) == 0x80) { + unsigned code = (c & 0x0f) << 12; + code |= (s[1] & 0x3f) << 6; + code |= (s[2] & 0x3f) << 0; + if (code >= escape_base && code <= escape_base + 0xff) { + // convert back + encoded = true; + if(result){ + *result += static_cast(code - escape_base); + } + s+=2; + continue; + } + } + if (result) { + *result += c; } - s+=2; - continue; - } } - if (result) { - *result += c; - } - } - return encoded; + return encoded; } string s3fs_wtf8_decode(const string &s) { - string result; - s3fs_wtf8_decode(s.c_str(), &result); - return result; + string result; + s3fs_wtf8_decode(s.c_str(), &result); + return result; } /* * Local variables: -* tab-width: 2 -* c-basic-offset: 2 +* tab-width: 4 +* c-basic-offset: 4 * End: -* vim600: expandtab sw=2 ts=2 fdm=marker -* vim<600: expandtab sw=2 ts=2 +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 */ diff --git a/src/string_util.h b/src/string_util.h index 7318208..ac21a30 100644 --- a/src/string_util.h +++ b/src/string_util.h @@ -17,49 +17,88 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ + #ifndef S3FS_STRING_UTIL_H_ #define S3FS_STRING_UTIL_H_ -/* - * A collection of string utilities for manipulating URLs and HTTP responses. - */ -#include -#include -#include - -#include - -static const std::string SPACES = " \t\r\n"; +// +// A collection of string utilities for manipulating URLs and HTTP responses. +// +//------------------------------------------------------------------- +// Gloval variables +//------------------------------------------------------------------- +extern const std::string SPACES; +//------------------------------------------------------------------- +// Inline functions +//------------------------------------------------------------------- static inline int STR2NCMP(const char *str1, const char *str2) { return strncmp(str1, str2, strlen(str2)); } +static inline const char* SAFESTRPTR(const char *strptr) { return strptr ? strptr : ""; } +//------------------------------------------------------------------- +// Templates +//------------------------------------------------------------------- template std::string str(T value); +//------------------------------------------------------------------- +// Macros(WTF8) +//------------------------------------------------------------------- +#define WTF8_ENCODE(ARG) \ + std::string ARG##_buf; \ + const char * ARG = _##ARG; \ + if (use_wtf8 && s3fs_wtf8_encode( _##ARG, 0 )) { \ + s3fs_wtf8_encode( _##ARG, &ARG##_buf); \ + ARG = ARG##_buf.c_str(); \ + } + +//------------------------------------------------------------------- +// Utilities +//------------------------------------------------------------------- +// // Convert string to off_t. Throws std::invalid_argument and std::out_of_range on bad input. +// off_t s3fs_strtoofft(const char* str, int base = 0); bool try_strtoofft(const char* str, off_t& value, int base = 0); off_t cvt_strtoofft(const char* str, int base = 0); +// +// String Manipulation +// std::string trim_left(const std::string &s, const std::string &t = SPACES); std::string trim_right(const std::string &s, const std::string &t = SPACES); std::string trim(const std::string &s, const std::string &t = SPACES); std::string lower(std::string s); + +// +// Date string +// std::string get_date_rfc850(void); void get_date_sigv3(std::string& date, std::string& date8601); std::string get_date_string(time_t tm); std::string get_date_iso8601(time_t tm); bool get_unixtime_from_iso8601(const char* pdate, time_t& unixtime); bool convert_unixtime_from_option_arg(const char* argv, time_t& unixtime); + +// +// For encoding +// std::string urlEncode(const std::string &s); std::string urlEncode2(const std::string &s); std::string urlDecode(const std::string& s); + bool takeout_str_dquart(std::string& str); bool get_keyword_value(std::string& target, const char* keyword, std::string& value); +// +// For binary string +// std::string s3fs_hex(const unsigned char* input, size_t length); char* s3fs_base64(const unsigned char* input, size_t length); unsigned char* s3fs_decode64(const char* input, size_t* plength); +// +// WTF8 +// bool s3fs_wtf8_encode(const char *s, std::string *result); std::string s3fs_wtf8_encode(const std::string &s); bool s3fs_wtf8_decode(const char *s, std::string *result); @@ -69,9 +108,9 @@ std::string s3fs_wtf8_decode(const std::string &s); /* * Local variables: -* tab-width: 2 -* c-basic-offset: 2 +* tab-width: 4 +* c-basic-offset: 4 * End: -* vim600: expandtab sw=2 ts=2 fdm=marker -* vim<600: expandtab sw=2 ts=2 +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 */ diff --git a/src/test_string_util.cpp b/src/test_string_util.cpp index 733e0a8..a67d8e8 100644 --- a/src/test_string_util.cpp +++ b/src/test_string_util.cpp @@ -18,6 +18,7 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ +#include #include #include #include @@ -25,6 +26,7 @@ #include #include "common.h" +#include "s3fs.h" #include "string_util.h" #include "test_util.h" @@ -37,109 +39,110 @@ std::string instance_name; void test_trim() { - ASSERT_EQUALS(std::string("1234"), trim(" 1234 ")); - ASSERT_EQUALS(std::string("1234"), trim("1234 ")); - ASSERT_EQUALS(std::string("1234"), trim(" 1234")); - ASSERT_EQUALS(std::string("1234"), trim("1234")); + ASSERT_EQUALS(std::string("1234"), trim(" 1234 ")); + ASSERT_EQUALS(std::string("1234"), trim("1234 ")); + ASSERT_EQUALS(std::string("1234"), trim(" 1234")); + ASSERT_EQUALS(std::string("1234"), trim("1234")); - ASSERT_EQUALS(std::string("1234 "), trim_left(" 1234 ")); - ASSERT_EQUALS(std::string("1234 "), trim_left("1234 ")); - ASSERT_EQUALS(std::string("1234"), trim_left(" 1234")); - ASSERT_EQUALS(std::string("1234"), trim_left("1234")); + ASSERT_EQUALS(std::string("1234 "), trim_left(" 1234 ")); + ASSERT_EQUALS(std::string("1234 "), trim_left("1234 ")); + ASSERT_EQUALS(std::string("1234"), trim_left(" 1234")); + ASSERT_EQUALS(std::string("1234"), trim_left("1234")); - ASSERT_EQUALS(std::string(" 1234"), trim_right(" 1234 ")); - ASSERT_EQUALS(std::string("1234"), trim_right("1234 ")); - ASSERT_EQUALS(std::string(" 1234"), trim_right(" 1234")); - ASSERT_EQUALS(std::string("1234"), trim_right("1234")); + ASSERT_EQUALS(std::string(" 1234"), trim_right(" 1234 ")); + ASSERT_EQUALS(std::string("1234"), trim_right("1234 ")); + ASSERT_EQUALS(std::string(" 1234"), trim_right(" 1234")); + ASSERT_EQUALS(std::string("1234"), trim_right("1234")); - ASSERT_EQUALS(std::string("0"), str(0)); - ASSERT_EQUALS(std::string("1"), str(1)); - ASSERT_EQUALS(std::string("-1"), str(-1)); - ASSERT_EQUALS(std::string("9223372036854775807"), str(std::numeric_limits::max())); - ASSERT_EQUALS(std::string("-9223372036854775808"), str(std::numeric_limits::min())); - ASSERT_EQUALS(std::string("0"), str(std::numeric_limits::min())); - ASSERT_EQUALS(std::string("18446744073709551615"), str(std::numeric_limits::max())); + ASSERT_EQUALS(std::string("0"), str(0)); + ASSERT_EQUALS(std::string("1"), str(1)); + ASSERT_EQUALS(std::string("-1"), str(-1)); + ASSERT_EQUALS(std::string("9223372036854775807"), str(std::numeric_limits::max())); + ASSERT_EQUALS(std::string("-9223372036854775808"), str(std::numeric_limits::min())); + ASSERT_EQUALS(std::string("0"), str(std::numeric_limits::min())); + ASSERT_EQUALS(std::string("18446744073709551615"), str(std::numeric_limits::max())); } void test_base64() { - size_t len; - ASSERT_STREQUALS(s3fs_base64(NULL, 0), NULL); - ASSERT_STREQUALS(reinterpret_cast(s3fs_decode64(NULL, &len)), NULL); - ASSERT_STREQUALS(s3fs_base64(reinterpret_cast(""), 0), NULL); - ASSERT_STREQUALS(reinterpret_cast(s3fs_decode64("", &len)), NULL); + size_t len; + ASSERT_STREQUALS(s3fs_base64(NULL, 0), NULL); + ASSERT_STREQUALS(reinterpret_cast(s3fs_decode64(NULL, &len)), NULL); + ASSERT_STREQUALS(s3fs_base64(reinterpret_cast(""), 0), NULL); + ASSERT_STREQUALS(reinterpret_cast(s3fs_decode64("", &len)), NULL); - ASSERT_STREQUALS(s3fs_base64(reinterpret_cast("1"), 1), "MQ=="); - ASSERT_STREQUALS(reinterpret_cast(s3fs_decode64("MQ==", &len)), "1"); - ASSERT_EQUALS(len, static_cast(1)); - ASSERT_STREQUALS(s3fs_base64(reinterpret_cast("12"), 2), "MTI="); - ASSERT_STREQUALS(reinterpret_cast(s3fs_decode64("MTI=", &len)), "12"); - ASSERT_EQUALS(len, static_cast(2)); - ASSERT_STREQUALS(s3fs_base64(reinterpret_cast("123"), 3), "MTIz"); - ASSERT_STREQUALS(reinterpret_cast(s3fs_decode64("MTIz", &len)), "123"); - ASSERT_EQUALS(len, static_cast(3)); - ASSERT_STREQUALS(s3fs_base64(reinterpret_cast("1234"), 4), "MTIzNA=="); - ASSERT_STREQUALS(reinterpret_cast(s3fs_decode64("MTIzNA==", &len)), "1234"); - ASSERT_EQUALS(len, static_cast(4)); + ASSERT_STREQUALS(s3fs_base64(reinterpret_cast("1"), 1), "MQ=="); + ASSERT_STREQUALS(reinterpret_cast(s3fs_decode64("MQ==", &len)), "1"); + ASSERT_EQUALS(len, static_cast(1)); + ASSERT_STREQUALS(s3fs_base64(reinterpret_cast("12"), 2), "MTI="); + ASSERT_STREQUALS(reinterpret_cast(s3fs_decode64("MTI=", &len)), "12"); + ASSERT_EQUALS(len, static_cast(2)); + ASSERT_STREQUALS(s3fs_base64(reinterpret_cast("123"), 3), "MTIz"); + ASSERT_STREQUALS(reinterpret_cast(s3fs_decode64("MTIz", &len)), "123"); + ASSERT_EQUALS(len, static_cast(3)); + ASSERT_STREQUALS(s3fs_base64(reinterpret_cast("1234"), 4), "MTIzNA=="); + ASSERT_STREQUALS(reinterpret_cast(s3fs_decode64("MTIzNA==", &len)), "1234"); + ASSERT_EQUALS(len, static_cast(4)); - // TODO: invalid input + // TODO: invalid input } void test_strtoofft() { - ASSERT_EQUALS(s3fs_strtoofft("0"), static_cast(0L)); - ASSERT_EQUALS(s3fs_strtoofft("9"), static_cast(9L)); - try{ - s3fs_strtoofft("A"); - abort(); - }catch(std::exception &e){ - // expected - } - ASSERT_EQUALS(s3fs_strtoofft("A", /*base=*/ 16), static_cast(10L)); - ASSERT_EQUALS(s3fs_strtoofft("F", /*base=*/ 16), static_cast(15L)); - ASSERT_EQUALS(s3fs_strtoofft("a", /*base=*/ 16), static_cast(10L)); - ASSERT_EQUALS(s3fs_strtoofft("f", /*base=*/ 16), static_cast(15L)); - ASSERT_EQUALS(s3fs_strtoofft("deadbeef", /*base=*/ 16), static_cast(3735928559L)); + ASSERT_EQUALS(s3fs_strtoofft("0"), static_cast(0L)); + ASSERT_EQUALS(s3fs_strtoofft("9"), static_cast(9L)); + try{ + s3fs_strtoofft("A"); + abort(); + }catch(std::exception &e){ + // expected + } + ASSERT_EQUALS(s3fs_strtoofft("A", /*base=*/ 16), static_cast(10L)); + ASSERT_EQUALS(s3fs_strtoofft("F", /*base=*/ 16), static_cast(15L)); + ASSERT_EQUALS(s3fs_strtoofft("a", /*base=*/ 16), static_cast(10L)); + ASSERT_EQUALS(s3fs_strtoofft("f", /*base=*/ 16), static_cast(15L)); + ASSERT_EQUALS(s3fs_strtoofft("deadbeef", /*base=*/ 16), static_cast(3735928559L)); } void test_wtf8_encoding() { - std::string ascii("normal string"); - std::string utf8("Hyld\xc3\xbdpi \xc3\xbej\xc3\xb3\xc3\xb0""f\xc3\xa9lagsins vex \xc3\xbar k\xc3\xa6rkomnu b\xc3\xb6li \xc3\xad \xc3\xa1st"); - std::string cp1252("Hyld\xfdpi \xfej\xf3\xf0""f\xe9lagsins vex \xfar k\xe6rkomnu b\xf6li \xed \xe1st"); - std::string broken = utf8; - broken[14] = 0x97; - std::string mixed = ascii + utf8 + cp1252; + std::string ascii("normal string"); + std::string utf8("Hyld\xc3\xbdpi \xc3\xbej\xc3\xb3\xc3\xb0""f\xc3\xa9lagsins vex \xc3\xbar k\xc3\xa6rkomnu b\xc3\xb6li \xc3\xad \xc3\xa1st"); + std::string cp1252("Hyld\xfdpi \xfej\xf3\xf0""f\xe9lagsins vex \xfar k\xe6rkomnu b\xf6li \xed \xe1st"); + std::string broken = utf8; + broken[14] = 0x97; + std::string mixed = ascii + utf8 + cp1252; - ASSERT_EQUALS(s3fs_wtf8_encode(ascii), ascii); - ASSERT_EQUALS(s3fs_wtf8_decode(ascii), ascii); - ASSERT_EQUALS(s3fs_wtf8_encode(utf8), utf8); - ASSERT_EQUALS(s3fs_wtf8_decode(utf8), utf8); + ASSERT_EQUALS(s3fs_wtf8_encode(ascii), ascii); + ASSERT_EQUALS(s3fs_wtf8_decode(ascii), ascii); + ASSERT_EQUALS(s3fs_wtf8_encode(utf8), utf8); + ASSERT_EQUALS(s3fs_wtf8_decode(utf8), utf8); - ASSERT_NEQUALS(s3fs_wtf8_encode(cp1252), cp1252); - ASSERT_EQUALS(s3fs_wtf8_decode(s3fs_wtf8_encode(cp1252)), cp1252); + ASSERT_NEQUALS(s3fs_wtf8_encode(cp1252), cp1252); + ASSERT_EQUALS(s3fs_wtf8_decode(s3fs_wtf8_encode(cp1252)), cp1252); - ASSERT_NEQUALS(s3fs_wtf8_encode(broken), broken); - ASSERT_EQUALS(s3fs_wtf8_decode(s3fs_wtf8_encode(broken)), broken); + ASSERT_NEQUALS(s3fs_wtf8_encode(broken), broken); + ASSERT_EQUALS(s3fs_wtf8_decode(s3fs_wtf8_encode(broken)), broken); - ASSERT_NEQUALS(s3fs_wtf8_encode(mixed), mixed); - ASSERT_EQUALS(s3fs_wtf8_decode(s3fs_wtf8_encode(mixed)), mixed); + ASSERT_NEQUALS(s3fs_wtf8_encode(mixed), mixed); + ASSERT_EQUALS(s3fs_wtf8_decode(s3fs_wtf8_encode(mixed)), mixed); } int main(int argc, char *argv[]) { - test_trim(); - test_base64(); - test_strtoofft(); - test_wtf8_encoding(); - return 0; + test_trim(); + test_base64(); + test_strtoofft(); + test_wtf8_encoding(); + + return 0; } /* * Local variables: -* tab-width: 2 -* c-basic-offset: 2 +* tab-width: 4 +* c-basic-offset: 4 * End: -* vim600: expandtab sw=2 ts=2 fdm=marker -* vim<600: expandtab sw=2 ts=2 +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 */ diff --git a/src/test_util.h b/src/test_util.h index 1b9b9f4..ea4ec4c 100644 --- a/src/test_util.h +++ b/src/test_util.h @@ -18,81 +18,85 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ +#ifndef S3FS_TEST_UTIL_H_ +#define S3FS_TEST_UTIL_H_ + #include #include #include template void assert_equals(const T &x, const T &y, const char *file, int line) { - if (x != y) { - std::cerr << x << " != " << y << " at " << file << ":" << line << std::endl; - std::cerr << std::endl; - std::exit(1); - } + if (x != y) { + std::cerr << x << " != " << y << " at " << file << ":" << line << std::endl; + std::cerr << std::endl; + std::exit(1); + } } template <> void assert_equals(const std::string &x, const std::string &y, const char *file, int line) { - if (x != y) { - std::cerr << x << " != " << y << " at " << file << ":" << line << std::endl; - for (unsigned i=0; i void assert_nequals(const T &x, const T &y, const char *file, int line) { - if (x == y) { - std::cerr << x << " == " << y << " at " << file << ":" << line << std::endl; - std::exit(1); - } + if (x == y) { + std::cerr << x << " == " << y << " at " << file << ":" << line << std::endl; + std::exit(1); + } } template <> void assert_nequals(const std::string &x, const std::string &y, const char *file, int line) { - if (x == y) { - std::cerr << x << " == " << y << " at " << file << ":" << line << std::endl; - for (unsigned i=0; i +#include +#include +#include +#include + +// +// For extended attribute +// (HAVE_XXX symbols are defined in config.h) +// +#ifdef HAVE_SYS_EXTATTR_H +#include +#elif HAVE_ATTR_XATTR_H +#include +#elif HAVE_SYS_XATTR_H +#include +#endif + +#if __cplusplus < 201103L + #define OPERATOR_EXPLICIT +#else + #define OPERATOR_EXPLICIT explicit +#endif + +//------------------------------------------------------------------- +// xattrs_t +//------------------------------------------------------------------- +// +// Header "x-amz-meta-xattr" is for extended attributes. +// This header is url encoded string which is json formatted. +// x-amz-meta-xattr:urlencode({"xattr-1":"base64(value-1)","xattr-2":"base64(value-2)","xattr-3":"base64(value-3)"}) +// +typedef struct xattr_value +{ + unsigned char* pvalue; + size_t length; + + explicit xattr_value(unsigned char* pval = NULL, size_t len = 0) : pvalue(pval), length(len) {} + ~xattr_value() + { + delete[] pvalue; + } +}XATTRVAL, *PXATTRVAL; + +typedef std::map xattrs_t; + +//------------------------------------------------------------------- +// storage_class_t +//------------------------------------------------------------------- class storage_class_t{ - public: - enum Value{ - STANDARD, - STANDARD_IA, - ONEZONE_IA, - REDUCED_REDUNDANCY, - INTELLIGENT_TIERING, - GLACIER, - DEEP_ARCHIVE, - UNKNOWN - }; + public: + enum Value{ + STANDARD, + STANDARD_IA, + ONEZONE_IA, + REDUCED_REDUNDANCY, + INTELLIGENT_TIERING, + GLACIER, + DEEP_ARCHIVE, + UNKNOWN + }; - // cppcheck-suppress noExplicitConstructor - storage_class_t(Value value) : value_(value) {} + // cppcheck-suppress noExplicitConstructor + storage_class_t(Value value) : value_(value) {} - operator Value() const { return value_; } + operator Value() const { return value_; } - const char* str() const { - switch(value_){ - case STANDARD: - return "STANDARD"; - case STANDARD_IA: - return "STANDARD_IA"; - case ONEZONE_IA: - return "ONEZONE_IA"; - case REDUCED_REDUNDANCY: - return "REDUCED_REDUNDANCY"; - case INTELLIGENT_TIERING: - return "INTELLIGENT_TIERING"; - case GLACIER: - return "GLACIER"; - case DEEP_ARCHIVE: - return "DEEP_ARCHIVE"; - case UNKNOWN: - return NULL; - } - abort(); - } + const char* str() const + { + switch(value_){ + case STANDARD: + return "STANDARD"; + case STANDARD_IA: + return "STANDARD_IA"; + case ONEZONE_IA: + return "ONEZONE_IA"; + case REDUCED_REDUNDANCY: + return "REDUCED_REDUNDANCY"; + case INTELLIGENT_TIERING: + return "INTELLIGENT_TIERING"; + case GLACIER: + return "GLACIER"; + case DEEP_ARCHIVE: + return "DEEP_ARCHIVE"; + case UNKNOWN: + return NULL; + } + abort(); + } - static storage_class_t from_str(const char* str) { - if(0 == strcmp(str, "standard")){ - return STANDARD; - }else if(0 == strcmp(str, "standard_ia")){ - return STANDARD_IA; - }else if(0 == strcmp(str, "onezone_ia")){ - return ONEZONE_IA; - }else if(0 == strcmp(str, "reduced_redundancy")){ - return REDUCED_REDUNDANCY; - }else if(0 == strcmp(str, "intelligent_tiering")){ - return INTELLIGENT_TIERING; - }else if(0 == strcmp(str, "glacier")){ - return GLACIER; - }else if(0 == strcmp(str, "deep_archive")){ - return DEEP_ARCHIVE; - }else{ - return UNKNOWN; - } - } + static storage_class_t from_str(const char* str) + { + if(0 == strcmp(str, "standard")){ + return STANDARD; + }else if(0 == strcmp(str, "standard_ia")){ + return STANDARD_IA; + }else if(0 == strcmp(str, "onezone_ia")){ + return ONEZONE_IA; + }else if(0 == strcmp(str, "reduced_redundancy")){ + return REDUCED_REDUNDANCY; + }else if(0 == strcmp(str, "intelligent_tiering")){ + return INTELLIGENT_TIERING; + }else if(0 == strcmp(str, "glacier")){ + return GLACIER; + }else if(0 == strcmp(str, "deep_archive")){ + return DEEP_ARCHIVE; + }else{ + return UNKNOWN; + } + } - private: - explicit operator bool(); - Value value_; + private: + OPERATOR_EXPLICIT operator bool(); + Value value_; }; +//------------------------------------------------------------------- +// acl_t +//------------------------------------------------------------------- class acl_t{ - public: - enum Value{ - PRIVATE, - PUBLIC_READ, - PUBLIC_READ_WRITE, - AWS_EXEC_READ, - AUTHENTICATED_READ, - BUCKET_OWNER_READ, - BUCKET_OWNER_FULL_CONTROL, - LOG_DELIVERY_WRITE, - UNKNOWN - }; + public: + enum Value{ + PRIVATE, + PUBLIC_READ, + PUBLIC_READ_WRITE, + AWS_EXEC_READ, + AUTHENTICATED_READ, + BUCKET_OWNER_READ, + BUCKET_OWNER_FULL_CONTROL, + LOG_DELIVERY_WRITE, + UNKNOWN + }; - // cppcheck-suppress noExplicitConstructor - acl_t(Value value) : value_(value) {} + // cppcheck-suppress noExplicitConstructor + acl_t(Value value) : value_(value) {} - operator Value() const { return value_; } + operator Value() const { return value_; } - const char* str() const { - switch(value_){ - case PRIVATE: - return "private"; - case PUBLIC_READ: - return "public-read"; - case PUBLIC_READ_WRITE: - return "public-read-write"; - case AWS_EXEC_READ: - return "aws-exec-read"; - case AUTHENTICATED_READ: - return "authenticated-read"; - case BUCKET_OWNER_READ: - return "bucket-owner-read"; - case BUCKET_OWNER_FULL_CONTROL: - return "bucket-owner-full-control"; - case LOG_DELIVERY_WRITE: - return "log-delivery-write"; - case UNKNOWN: - return NULL; - } - abort(); - } + const char* str() const + { + switch(value_){ + case PRIVATE: + return "private"; + case PUBLIC_READ: + return "public-read"; + case PUBLIC_READ_WRITE: + return "public-read-write"; + case AWS_EXEC_READ: + return "aws-exec-read"; + case AUTHENTICATED_READ: + return "authenticated-read"; + case BUCKET_OWNER_READ: + return "bucket-owner-read"; + case BUCKET_OWNER_FULL_CONTROL: + return "bucket-owner-full-control"; + case LOG_DELIVERY_WRITE: + return "log-delivery-write"; + case UNKNOWN: + return NULL; + } + abort(); + } - static acl_t from_str(const char *acl) { - if(0 == strcmp(acl, "private")){ - return PRIVATE; - }else if(0 == strcmp(acl, "public-read")){ - return PUBLIC_READ; - }else if(0 == strcmp(acl, "public-read-write")){ - return PUBLIC_READ_WRITE; - }else if(0 == strcmp(acl, "aws-exec-read")){ - return AWS_EXEC_READ; - }else if(0 == strcmp(acl, "authenticated-read")){ - return AUTHENTICATED_READ; - }else if(0 == strcmp(acl, "bucket-owner-read")){ - return BUCKET_OWNER_READ; - }else if(0 == strcmp(acl, "bucket-owner-full-control")){ - return BUCKET_OWNER_FULL_CONTROL; - }else if(0 == strcmp(acl, "log-delivery-write")){ - return LOG_DELIVERY_WRITE; - }else{ - return UNKNOWN; - } - } + static acl_t from_str(const char *acl) + { + if(0 == strcmp(acl, "private")){ + return PRIVATE; + }else if(0 == strcmp(acl, "public-read")){ + return PUBLIC_READ; + }else if(0 == strcmp(acl, "public-read-write")){ + return PUBLIC_READ_WRITE; + }else if(0 == strcmp(acl, "aws-exec-read")){ + return AWS_EXEC_READ; + }else if(0 == strcmp(acl, "authenticated-read")){ + return AUTHENTICATED_READ; + }else if(0 == strcmp(acl, "bucket-owner-read")){ + return BUCKET_OWNER_READ; + }else if(0 == strcmp(acl, "bucket-owner-full-control")){ + return BUCKET_OWNER_FULL_CONTROL; + }else if(0 == strcmp(acl, "log-delivery-write")){ + return LOG_DELIVERY_WRITE; + }else{ + return UNKNOWN; + } + } - private: - explicit operator bool(); - Value value_; + private: + OPERATOR_EXPLICIT operator bool(); + Value value_; }; +//------------------------------------------------------------------- +// sse_type_t +//------------------------------------------------------------------- class sse_type_t{ - public: - enum Value{ - SSE_DISABLE = 0, // not use server side encrypting - SSE_S3, // server side encrypting by S3 key - SSE_C, // server side encrypting by custom key - SSE_KMS // server side encrypting by kms id - }; + public: + enum Value{ + SSE_DISABLE = 0, // not use server side encrypting + SSE_S3, // server side encrypting by S3 key + SSE_C, // server side encrypting by custom key + SSE_KMS // server side encrypting by kms id + }; - // cppcheck-suppress noExplicitConstructor - sse_type_t(Value value) : value_(value) {} + // cppcheck-suppress noExplicitConstructor + sse_type_t(Value value) : value_(value) {} - operator Value() const { return value_; } + operator Value() const { return value_; } - private: - explicit operator bool(); - Value value_; + private: + //OPERATOR_EXPLICIT operator bool(); + Value value_; }; +//---------------------------------------------- +// etaglist_t / filepart +//---------------------------------------------- +typedef std::vector etaglist_t; + +// +// Each part information for Multipart upload +// +struct filepart +{ + bool uploaded; // does finish uploading + std::string etag; // expected etag value + int fd; // base file(temporary full file) descriptor + off_t startpos; // seek fd point for uploading + off_t size; // uploading size + etaglist_t* etaglist; // use only parallel upload + int etagpos; // use only parallel upload + + filepart() : uploaded(false), fd(-1), startpos(0), size(-1), etaglist(NULL), etagpos(-1) {} + ~filepart() + { + clear(); + } + + void clear(void) + { + uploaded = false; + etag = ""; + fd = -1; + startpos = 0; + size = -1; + etaglist = NULL; + etagpos = - 1; + } + + void add_etag_list(etaglist_t* list) + { + if(list){ + list->push_back(std::string("")); + etaglist = list; + etagpos = list->size() - 1; + }else{ + etaglist = NULL; + etagpos = - 1; + } + } +}; + +//------------------------------------------------------------------- +// mimes_t +//------------------------------------------------------------------- +struct case_insensitive_compare_func +{ + bool operator()(const std::string& a, const std::string& b) const { + return strcasecmp(a.c_str(), b.c_str()) < 0; + } +}; +typedef std::map mimes_t; + +//------------------------------------------------------------------- +// Typedefs specialized for use +//------------------------------------------------------------------- +typedef std::list readline_t; +typedef std::map kvmap_t; +typedef std::map bucketkvmap_t; + #endif // S3FS_TYPES_H_ /* * Local variables: -* tab-width: 2 -* c-basic-offset: 2 +* tab-width: 4 +* c-basic-offset: 4 * End: -* vim600: expandtab sw=2 ts=2 fdm=marker -* vim<600: expandtab sw=2 ts=2 +* vim600: expandtab sw=4 ts=4 fdm=marker +* vim<600: expandtab sw=4 ts=4 */ diff --git a/test/Makefile.am b/test/Makefile.am index 84b13fa..2bd2be7 100644 --- a/test/Makefile.am +++ b/test/Makefile.am @@ -17,14 +17,24 @@ # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. ###################################################################### + TESTS=small-integration-test.sh EXTRA_DIST = \ - integration-test-common.sh \ - require-root.sh \ - small-integration-test.sh \ - mergedir.sh \ - sample_delcache.sh \ - sample_ahbe.conf + integration-test-common.sh \ + require-root.sh \ + small-integration-test.sh \ + mergedir.sh \ + sample_delcache.sh \ + sample_ahbe.conf testdir = test + +# +# Local variables: +# tab-width: 4 +# c-basic-offset: 4 +# End: +# vim600: noet sw=4 ts=4 fdm=marker +# vim<600: noet sw=4 ts=4 +# diff --git a/test/integration-test-common.sh b/test/integration-test-common.sh index bc09c7c..fd68ed4 100644 --- a/test/integration-test-common.sh +++ b/test/integration-test-common.sh @@ -1,4 +1,23 @@ #!/bin/bash +# +# s3fs - FUSE-based file system backed by Amazon S3 +# +# Copyright 2007-2008 Randy Rizun +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# # # Common code for starting an s3fs-fuse mountpoint and an S3Proxy instance @@ -257,3 +276,12 @@ function common_exit_handler { stop_s3proxy } trap common_exit_handler EXIT + +# +# Local variables: +# tab-width: 4 +# c-basic-offset: 4 +# End: +# vim600: noet sw=4 ts=4 fdm=marker +# vim<600: noet sw=4 ts=4 +# diff --git a/test/integration-test-main.sh b/test/integration-test-main.sh index 87c89b6..7b918eb 100755 --- a/test/integration-test-main.sh +++ b/test/integration-test-main.sh @@ -1,4 +1,23 @@ #!/bin/bash +# +# s3fs - FUSE-based file system backed by Amazon S3 +# +# Copyright 2007-2008 Randy Rizun +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# set -o errexit set -o pipefail @@ -1059,3 +1078,12 @@ function add_all_tests { init_suite add_all_tests run_suite + +# +# Local variables: +# tab-width: 4 +# c-basic-offset: 4 +# End: +# vim600: noet sw=4 ts=4 fdm=marker +# vim<600: noet sw=4 ts=4 +# diff --git a/test/mergedir.sh b/test/mergedir.sh index 375066a..90c433d 100755 --- a/test/mergedir.sh +++ b/test/mergedir.sh @@ -1,4 +1,24 @@ #!/bin/sh +# +# s3fs - FUSE-based file system backed by Amazon S3 +# +# Copyright 2007-2008 Randy Rizun +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# + # # Merge old directory object to new. # For s3fs after v1.64 @@ -165,5 +185,10 @@ echo -n "# Finished : " >> $LOGFILE echo `date` >> $LOGFILE # -# END +# Local variables: +# tab-width: 4 +# c-basic-offset: 4 +# End: +# vim600: noet sw=4 ts=4 fdm=marker +# vim<600: noet sw=4 ts=4 # diff --git a/test/require-root.sh b/test/require-root.sh index a16196e..4a6e177 100755 --- a/test/require-root.sh +++ b/test/require-root.sh @@ -1,7 +1,35 @@ #!/bin/bash -e +# +# s3fs - FUSE-based file system backed by Amazon S3 +# +# Copyright 2007-2008 Randy Rizun +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# if [[ $EUID -ne 0 ]] then echo "This test script must be run as root" 1>&2 exit 1 fi + +# +# Local variables: +# tab-width: 4 +# c-basic-offset: 4 +# End: +# vim600: noet sw=4 ts=4 fdm=marker +# vim<600: noet sw=4 ts=4 +# diff --git a/test/run_tests_using_sanitizers.sh b/test/run_tests_using_sanitizers.sh index 8ff85e5..6c9a95f 100755 --- a/test/run_tests_using_sanitizers.sh +++ b/test/run_tests_using_sanitizers.sh @@ -1,4 +1,23 @@ #!/bin/bash +# +# s3fs - FUSE-based file system backed by Amazon S3 +# +# Copyright 2007-2008 Randy Rizun +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# set -o errexit set -o nounset @@ -27,3 +46,12 @@ make clean ./configure CXXFLAGS='-O1 -g' make RETRIES=200 VALGRIND='--error-exitcode=1 --leak-check=full' make check -C test/ + +# +# Local variables: +# tab-width: 4 +# c-basic-offset: 4 +# End: +# vim600: noet sw=4 ts=4 fdm=marker +# vim<600: noet sw=4 ts=4 +# diff --git a/test/sample_delcache.sh b/test/sample_delcache.sh index ec6384a..0e5c7a2 100755 --- a/test/sample_delcache.sh +++ b/test/sample_delcache.sh @@ -1,4 +1,24 @@ #!/bin/sh +# +# s3fs - FUSE-based file system backed by Amazon S3 +# +# Copyright 2007-2008 Randy Rizun +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# + # # This is unsupport sample deleting cache files script. # So s3fs's local cache files(stats and objects) grow up, @@ -12,29 +32,29 @@ func_usage() { - echo "" - echo "Usage: $1 [-silent]" - echo " $1 -h" - echo "Sample: $1 mybucket /tmp/s3fs/cache 1073741824" - echo "" - echo " bucket name = bucket name which specified s3fs option" - echo " cache path = cache directory path which specified by" - echo " use_cache s3fs option." - echo " limit size = limit for total cache files size." - echo " specify by BYTE" - echo " -silent = silent mode" - echo "" + echo "" + echo "Usage: $1 [-silent]" + echo " $1 -h" + echo "Sample: $1 mybucket /tmp/s3fs/cache 1073741824" + echo "" + echo " bucket name = bucket name which specified s3fs option" + echo " cache path = cache directory path which specified by" + echo " use_cache s3fs option." + echo " limit size = limit for total cache files size." + echo " specify by BYTE" + echo " -silent = silent mode" + echo "" } PRGNAME=`basename $0` if [ "X$1" = "X-h" -o "X$1" = "X-H" ]; then - func_usage $PRGNAME - exit 0 + func_usage $PRGNAME + exit 0 fi if [ "X$1" = "X" -o "X$2" = "X" -o "X$3" = "X" ]; then - func_usage $PRGNAME - exit 1 + func_usage $PRGNAME + exit 1 fi BUCKET=$1 @@ -42,7 +62,7 @@ CDIR="$2" LIMIT=$3 SILENT=0 if [ "X$4" = "X-silent" ]; then - SILENT=1 + SILENT=1 fi FILES_CDIR="${CDIR}/${BUCKET}" STATS_CDIR="${CDIR}/.${BUCKET}.stat" @@ -51,10 +71,10 @@ CURRENT_CACHE_SIZE=`du -sb "$FILES_CDIR" | awk '{print $1}'` # Check total size # if [ $LIMIT -ge $CURRENT_CACHE_SIZE ]; then - if [ $SILENT -ne 1 ]; then - echo "$FILES_CDIR ($CURRENT_CACHE_SIZE) is below allowed $LIMIT" - fi - exit 0 + if [ $SILENT -ne 1 ]; then + echo "$FILES_CDIR ($CURRENT_CACHE_SIZE) is below allowed $LIMIT" + fi + exit 0 fi # @@ -68,39 +88,44 @@ TMP_CFILE="" # find "$STATS_CDIR" -type f -exec stat -c "%X:%n" "{}" \; | sort | while read part do - echo Looking at $part - TMP_ATIME=`echo "$part" | cut -d: -f1` - TMP_STATS="`echo "$part" | cut -d: -f2`" - TMP_CFILE=`echo "$TMP_STATS" | sed s/\.$BUCKET\.stat/$BUCKET/` - - if [ `stat -c %X "$TMP_STATS"` -eq $TMP_ATIME ]; then - rm -f "$TMP_STATS" "$TMP_CFILE" > /dev/null 2>&1 - if [ $? -ne 0 ]; then - if [ $SILENT -ne 1 ]; then - echo "ERROR: Could not remove files($TMP_STATS,$TMP_CFILE)" - fi - exit 1 - else - if [ $SILENT -ne 1 ]; then - echo "remove file: $TMP_CFILE $TMP_STATS" - fi + echo Looking at $part + TMP_ATIME=`echo "$part" | cut -d: -f1` + TMP_STATS="`echo "$part" | cut -d: -f2`" + TMP_CFILE=`echo "$TMP_STATS" | sed s/\.$BUCKET\.stat/$BUCKET/` + + if [ `stat -c %X "$TMP_STATS"` -eq $TMP_ATIME ]; then + rm -f "$TMP_STATS" "$TMP_CFILE" > /dev/null 2>&1 + if [ $? -ne 0 ]; then + if [ $SILENT -ne 1 ]; then + echo "ERROR: Could not remove files($TMP_STATS,$TMP_CFILE)" + fi + exit 1 + else + if [ $SILENT -ne 1 ]; then + echo "remove file: $TMP_CFILE $TMP_STATS" + fi + fi fi - fi - if [ $LIMIT -ge `du -sb "$FILES_CDIR" | awk '{print $1}'` ]; then - if [ $SILENT -ne 1 ]; then - echo "finish removing files" + if [ $LIMIT -ge `du -sb "$FILES_CDIR" | awk '{print $1}'` ]; then + if [ $SILENT -ne 1 ]; then + echo "finish removing files" + fi + break fi - break - fi done if [ $SILENT -ne 1 ]; then - TOTAL_SIZE=`du -sb "$FILES_CDIR" | awk '{print $1}'` - echo "Finish: $FILES_CDIR total size is $TOTAL_SIZE" + TOTAL_SIZE=`du -sb "$FILES_CDIR" | awk '{print $1}'` + echo "Finish: $FILES_CDIR total size is $TOTAL_SIZE" fi exit 0 # -# End +# Local variables: +# tab-width: 4 +# c-basic-offset: 4 +# End: +# vim600: noet sw=4 ts=4 fdm=marker +# vim<600: noet sw=4 ts=4 # diff --git a/test/small-integration-test.sh b/test/small-integration-test.sh index 4829475..e429906 100755 --- a/test/small-integration-test.sh +++ b/test/small-integration-test.sh @@ -1,4 +1,23 @@ #!/bin/bash +# +# s3fs - FUSE-based file system backed by Amazon S3 +# +# Copyright 2007-2008 Randy Rizun +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# # # Test s3fs-fuse file system operations with @@ -63,3 +82,12 @@ done stop_s3proxy echo "$0: tests complete." + +# +# Local variables: +# tab-width: 4 +# c-basic-offset: 4 +# End: +# vim600: noet sw=4 ts=4 fdm=marker +# vim<600: noet sw=4 ts=4 +# diff --git a/test/test-utils.sh b/test/test-utils.sh index 9a5d684..5414069 100644 --- a/test/test-utils.sh +++ b/test/test-utils.sh @@ -1,4 +1,23 @@ #!/bin/bash +# +# s3fs - FUSE-based file system backed by Amazon S3 +# +# Copyright 2007-2008 Randy Rizun +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# #### Test utils @@ -279,3 +298,12 @@ function aws_cli() { fi aws $* --endpoint-url "${S3_URL}" --no-verify-ssl $FLAGS } + +# +# Local variables: +# tab-width: 4 +# c-basic-offset: 4 +# End: +# vim600: noet sw=4 ts=4 fdm=marker +# vim<600: noet sw=4 ts=4 +# diff --git a/test/ut_test.py b/test/ut_test.py index e55dad4..07befcf 100755 --- a/test/ut_test.py +++ b/test/ut_test.py @@ -1,4 +1,23 @@ #!/usr/bin/env python2 +# +# s3fs - FUSE-based file system backed by Amazon S3 +# +# Copyright 2007-2008 Randy Rizun +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# import os import unittest @@ -79,3 +98,11 @@ class OssfsUnitTest(unittest.TestCase): if __name__ == '__main__': unittest.main() +# +# Local variables: +# tab-width: 4 +# c-basic-offset: 4 +# End: +# vim600: noet sw=4 ts=4 fdm=marker +# vim<600: noet sw=4 ts=4 +# diff --git a/test/write_multiple_offsets.py b/test/write_multiple_offsets.py index c1887ac..d8db563 100755 --- a/test/write_multiple_offsets.py +++ b/test/write_multiple_offsets.py @@ -1,4 +1,23 @@ #!/usr/bin/env python2 +# +# s3fs - FUSE-based file system backed by Amazon S3 +# +# Copyright 2007-2008 Randy Rizun +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# import os import sys @@ -16,3 +35,12 @@ try: os.write(fd, data) finally: os.close(fd) + +# +# Local variables: +# tab-width: 4 +# c-basic-offset: 4 +# End: +# vim600: noet sw=4 ts=4 fdm=marker +# vim<600: noet sw=4 ts=4 +#