diff --git a/.travis.yml b/.travis.yml index f17adf9..1beea25 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,17 +1,16 @@ language: cpp +sudo: required dist: trusty cache: apt before_install: - sudo apt-get update -qq - - sudo apt-get install -qq libfuse-dev + - sudo apt-get install -qq cppcheck libfuse-dev openjdk-7-jdk script: - ./autogen.sh - ./configure - make + - make cppcheck - make check -C src - # Travis granted s3fs access to their upcoming alpha testing stack which may - # allow us to use FUSE. - # TODO: Travis changed their infrastructure some time in June 2015 such that - # this does not work currently - #- modprobe fuse - #- make check -C test + - modprobe fuse + - make check -C test + - cat test/test-suite.log diff --git a/Makefile.am b/Makefile.am index 6f92068..3c50467 100644 --- a/Makefile.am +++ b/Makefile.am @@ -19,7 +19,7 @@ ###################################################################### SUBDIRS=src test doc -EXTRA_DIST=doc +EXTRA_DIST=doc default_commit_hash dist-hook: rm -rf `find $(distdir)/doc -type d -name .svn` @@ -28,3 +28,12 @@ dist-hook: release : dist ../utils/release.sh ../utils/release.sh $(DIST_ARCHIVES) +cppcheck: + cppcheck --quiet --error-exitcode=1 \ + -U CURLE_PEER_FAILED_VERIFICATION \ + --enable=all \ + --suppress=missingIncludeSystem \ + --suppress=unsignedLessThanZero \ + --suppress=unusedFunction \ + --suppress=variableScope \ + src/ test/ diff --git a/NEWS b/NEWS deleted file mode 100644 index e69de29..0000000 diff --git a/README b/README deleted file mode 100644 index 7365065..0000000 --- a/README +++ /dev/null @@ -1,67 +0,0 @@ -THIS README CONTAINS OUTDATED INFORMATION - please refer to the wiki or --help - -S3FS-Fuse - -S3FS is FUSE (File System in User Space) based solution to mount/unmount an Amazon S3 storage buckets and use system commands with S3 just like it was another Hard Disk. - -In order to compile s3fs, You'll need the following requirements: - -* Kernel-devel packages (or kernel source) installed that is the SAME version of your running kernel -* LibXML2-devel packages -* CURL-devel packages (or compile curl from sources at: curl.haxx.se/ use 7.15.X) -* GCC, GCC-C++ -* pkgconfig -* FUSE (>= 2.8.4) -* FUSE Kernel module installed and running (RHEL 4.x/CentOS 4.x users - read below) -* OpenSSL-devel (0.9.8) - GnuTLS(gcrypt and nettle) - NSS -* Git - -If you're using YUM or APT to install those packages, then it might require additional packaging, allow it to be installed. - -Downloading & Compiling: ------------------------- -In order to download s3fs, download from following url: -https://github.com/s3fs-fuse/s3fs-fuse/archive/master.zip -Or clone the following command: -git clone git://github.com/s3fs-fuse/s3fs-fuse.git - -Go inside the directory that has been created (s3fs-fuse) and run: ./autogen.sh -This will generate a number of scripts in the project directory, including a configure script which you should run with: ./configure -If configure succeeded, you can now run: make. If it didn't, make sure you meet the dependencies above. -This should compile the code. If everything goes OK, you'll be greeted with "ok!" at the end and you'll have a binary file called "s3fs" -in the src/ directory. - -As root (you can use su, su -, sudo) do: "make install" -this will copy the "s3fs" binary to /usr/local/bin. - -Congratulations. S3fs is now compiled and installed. - -Usage: ------- -In order to use s3fs, make sure you have the Access Key and the Secret Key handy. (refer to the wiki) -First, create a directory where to mount the S3 bucket you want to use. -Example (as root): mkdir -p /mnt/s3 -Then run: s3fs mybucket[:path] /mnt/s3 - -This will mount your bucket to /mnt/s3. You can do a simple "ls -l /mnt/s3" to see the content of your bucket. - -If you want to allow other people access the same bucket in the same machine, you can add "-o allow_other" to read/write/delete content of the bucket. - -You can add a fixed mount point in /etc/fstab, here's an example: - -s3fs#mybucket /mnt/s3 fuse allow_other 0 0 - -This will mount upon reboot (or by launching: mount -a) your bucket on your machine. -If that does not work, probably you should specify with "_netdev" option in fstab. - -All other options can be read at: https://github.com/s3fs-fuse/s3fs-fuse/wiki/Fuse-Over-Amazon - -Known Issues: -------------- -s3fs should be working fine with S3 storage. However, There are couple of limitations: - -* Currently s3fs could hang the CPU if you have lots of time-outs. This is *NOT* a fault of s3fs but rather libcurl. This happens when you try to copy thousands of files in 1 session, it doesn't happen when you upload hundreds of files or less. -* CentOS 4.x/RHEL 4.x users - if you use the kernel that shipped with your distribution and didn't upgrade to the latest kernel RedHat/CentOS gives, you might have a problem loading the "fuse" kernel. Please upgrade to the latest kernel (2.6.16 or above) and make sure "fuse" kernel module is compiled and loadable since FUSE requires this kernel module and s3fs requires it as well. -* Moving/renaming/erasing files takes time since the whole file needs to be accessed first. A workaround could be to use s3fs's cache support with the use_cache option. - diff --git a/README.md b/README.md new file mode 100644 index 0000000..98b1a37 --- /dev/null +++ b/README.md @@ -0,0 +1,108 @@ +s3fs +==== + +s3fs allows Linux and Mac OS X to mount an S3 bucket via FUSE. +s3fs preserves the native object format for files, allowing use of other tools like [s3cmd](http://s3tools.org/s3cmd). + +Features +-------- + +* large subset of POSIX including reading/writing files, directories, symlinks, mode, uid/gid, and extended attributes +* compatible with Amazon S3, Google Cloud Storage, and other S3-based object stores +* large files via multi-part upload +* renames via server-side copy +* optional server-side encryption +* data integrity via MD5 hashes +* in-memory metadata caching +* local disk data caching +* user-specified regions, including Amazon GovCloud +* authenticate via v2 or v4 signatures + +Installation +------------ + +Ensure you have all the dependencies: + +On Ubuntu 14.04: + +``` +sudo apt-get install automake autotools-dev g++ git libcurl4-gnutls-dev libfuse-dev libssl-dev libxml2-dev make pkg-config +``` + +On CentOS 7: + +``` +sudo yum install automake fuse-devel gcc-c++ git libcurl-devel libxml2-devel make openssl-devel +``` + +Compile from master via the following commands: + +``` +git clone https://github.com/s3fs-fuse/s3fs-fuse.git +cd s3fs-fuse +./autogen.sh +./configure +make +sudo make install +``` + +Examples +-------- + +Enter your S3 identity and credential in a file `/path/to/passwd`: + +``` +echo MYIDENTITY:MYCREDENTIAL > /path/to/passwd +``` + +Make sure the file has proper permissions (if you get 'permissions' error when mounting) `/path/to/passwd`: + +``` +chmod 600 /path/to/passwd +``` + +Run s3fs with an existing bucket `mybucket` and directory `/path/to/mountpoint`: + +``` +s3fs mybucket /path/to/mountpoint -o passwd_file=/path/to/passwd +``` + +If you encounter any errors, enable debug output: + +``` +s3fs mybucket /path/to/mountpoint -o passwd_file=/path/to/passwd -d -d -f -o f2 -o curldbg +``` + +You can also mount on boot by entering the following line to `/etc/fstab`: + +``` +s3fs#mybucket /path/to/mountpoint fuse _netdev,allow_other 0 0 +``` + +Limitations +----------- + +Generally S3 cannot offer the same performance or semantics as a local file system. More specifically: + +* random writes or appends to files require rewriting the entire file +* metadata operations such as listing directories have poor performance due to network latency +* [eventual consistency](https://en.wikipedia.org/wiki/Eventual_consistency) can temporarily yield stale data +* no atomic renames of files or directories +* no coordination between multiple clients mounting the same bucket +* no hard links + +References +---------- + +* [s3backer](https://github.com/archiecobbs/s3backer) - mount an S3 bucket as a single file +* [s3fs-python](https://fedorahosted.org/s3fs/) - an older and less complete implementation written in Python +* [S3Proxy](https://github.com/andrewgaul/s3proxy) - combine with s3fs to mount EMC Atmos, Microsoft Azure, and OpenStack Swift buckets +* [s3ql](https://bitbucket.org/nikratio/s3ql/) - similar to s3fs but uses its own object format +* [YAS3FS](https://github.com/danilop/yas3fs) - similar to s3fs but uses SNS to allow multiple clients to mount a bucket + +License +------- + +Copyright (C) 2010 Randy Rizun + +Licensed under the GNU GPL version 2 diff --git a/autogen.sh b/autogen.sh index b1e9c84..ae16921 100755 --- a/autogen.sh +++ b/autogen.sh @@ -19,6 +19,28 @@ # # See the file ChangeLog for a revision history. +echo "--- Make commit hash file -------" + +SHORTHASH="unknown" +type git > /dev/null 2>&1 +if [ $? -eq 0 -a -d .git ]; then + RESULT=`git rev-parse --short HEAD` + if [ $? -eq 0 ]; then + SHORTHASH=${RESULT} + fi +fi +echo ${SHORTHASH} > default_commit_hash + +echo "--- Finished commit hash file ---" + +echo "--- Start autotools -------------" + aclocal \ +&& autoheader \ && automake --add-missing \ && autoconf + +echo "--- Finished autotools ----------" + +exit 0 + diff --git a/configure.ac b/configure.ac index 3186765..db9a005 100644 --- a/configure.ac +++ b/configure.ac @@ -21,15 +21,19 @@ dnl Process this file with autoconf to produce a configure script. AC_PREREQ(2.59) AC_INIT(s3fs, 1.79) +AC_CONFIG_HEADER([config.h]) AC_CANONICAL_SYSTEM -AM_INIT_AUTOMAKE() +AM_INIT_AUTOMAKE([foreign]) AC_PROG_CXX AC_PROG_CC CXXFLAGS="$CXXFLAGS -Wall -D_FILE_OFFSET_BITS=64" +dnl ---------------------------------------------- +dnl For OSX +dnl ---------------------------------------------- case "$target" in *-darwin* ) # Do something specific for mac @@ -219,12 +223,40 @@ AM_CONDITIONAL([USE_GNUTLS_NETTLE], [test "$auth_lib" = nettle]) AM_CONDITIONAL([USE_SSL_NSS], [test "$auth_lib" = nss]) dnl ---------------------------------------------- -dnl end of ssl library +dnl check functions dnl ---------------------------------------------- - dnl malloc_trim function -AC_CHECK_FUNCS(malloc_trim, , ) +AC_CHECK_FUNCS([malloc_trim]) +dnl ---------------------------------------------- +dnl output files +dnl ---------------------------------------------- AC_CONFIG_FILES(Makefile src/Makefile test/Makefile doc/Makefile) + +dnl ---------------------------------------------- +dnl short commit hash +dnl ---------------------------------------------- +AC_CHECK_PROG([GITCMD], [git —version], [yes], [no]) +AC_CHECK_FILE([.git], [DOTGITDIR=yes], [DOTGITDIR=no]) + +AC_MSG_CHECKING([github short commit hash]) +if test “x${GITCMD}” = “xyes” -a “x${DOTGITDIR}” = “xyes”; then + GITCOMMITHASH=`git rev-parse --short HEAD` +elif test -f default_commit_hash; then + GITCOMMITHASH=`cat default_commit_hash` +else + GITCOMMITHASH="unknown" +fi +AC_MSG_RESULT([${GITCOMMITHASH}]) + +AC_DEFINE_UNQUOTED([COMMIT_HASH_VAL], ["${GITCOMMITHASH}"], [short commit hash value on github]) + +dnl ---------------------------------------------- +dnl put +dnl ---------------------------------------------- AC_OUTPUT +dnl ---------------------------------------------- +dnl end configuration +dnl ---------------------------------------------- + diff --git a/doc/man/s3fs.1 b/doc/man/s3fs.1 index 934adfe..2bb06d9 100644 --- a/doc/man/s3fs.1 +++ b/doc/man/s3fs.1 @@ -62,19 +62,41 @@ local folder to use for local file cache. \fB\-o\fR del_cache - delete local file cache delete local file cache when s3fs starts and exits. .TP +\fB\-o\fR storage_class (default is standard) +store object with specified storage class. +this option replaces the old option use_rrs. +Possible values: standard, standard_ia, and reduced_redundancy. +.TP \fB\-o\fR use_rrs (default is disable) use Amazon's Reduced Redundancy Storage. this option can not be specified with use_sse. (can specify use_rrs=1 for old version) +this option has been replaced by new storage_class option. .TP \fB\-o\fR use_sse (default is disable) -use Amazon's Server-Site Encryption or Server-Side Encryption with Customer-Provided Encryption Keys. -this option can not be specified with use_rrs. specifying only "use_sse" or "use_sse=1" enables Server-Side Encryption.(use_sse=1 for old version) -specifying this option with file path which has some SSE-C secret key enables Server-Side Encryption with Customer-Provided Encryption Keys.(use_sse=file) -the file must be 600 permission. the file can have some lines, each line is one SSE-C key. the first line in file is used as Customer-Provided Encryption Keys for uploading and change headers etc. -if there are some keys after first line, those are used downloading object which are encripted by not first key. -so that, you can keep all SSE-C keys in file, that is SSE-C key history. -if AWSSSECKEYS environment is set, you can set SSE-C key instead of this option. +Specify three type Amazon's Server-Site Encryption: SSE-S3, SSE-C or SSE-KMS. SSE-S3 uses Amazon S3-managed encryption keys, SSE-C uses customer-provided encryption keys, and SSE-KMS uses the master key which you manage in AWS KMS. +You can specify "use_sse" or "use_sse=1" enables SSE-S3 type (use_sse=1 is old type parameter). +Case of setting SSE-C, you can specify "use_sse=custom", "use_sse=custom:" or "use_sse="(only specified is old type parameter). +You can use "c" for short "custom". +The custom key file must be 600 permission. The file can have some lines, each line is one SSE-C key. +The first line in file is used as Customer-Provided Encryption Keys for uploading and changing headers etc. +If there are some keys after first line, those are used downloading object which are encrypted by not first key. +So that, you can keep all SSE-C keys in file, that is SSE-C key history. +If you specify "custom"("c") without file path, you need to set custom key by load_sse_c option or AWSSSECKEYS environment.(AWSSSECKEYS environment has some SSE-C keys with ":" separator.) +This option is used to decide the SSE type. +So that if you do not want to encrypt a object at uploading, but you need to decrypt encrypted object at downloaing, you can use load_sse_c option instead of this option. +For setting SSE-KMS, specify "use_sse=kmsid" or "use_sse=kmsid:". +You can use "k" for short "kmsid". +If you san specify SSE-KMS type with your in AWS KMS, you can set it after "kmsid:"(or "k:"). +If you specify only "kmsid"("k"), you need to set AWSSSEKMSID environment which value is . +You must be careful about that you can not use the KMS id which is not same EC2 region. +.TP +\fB\-o\fR load_sse_c - specify SSE-C keys +Specify the custom-provided encription keys file path for decrypting at duwnloading. +If you use the custom-provided encription key at uploading, you specify with "use_sse=custom". +The file has many lines, one line means one custom key. +So that you can keep all SSE-C keys in file, that is SSE-C key history. +AWSSSECKEYS environment is as same as this file contents. .TP \fB\-o\fR passwd_file (default="") specify the path to the password file, which which takes precedence over the password in $HOME/.passwd-s3fs and /etc/passwd-s3fs @@ -118,7 +140,8 @@ s3fs always has to check whether file(or sub directory) exists under object(path It increases ListBucket request and makes performance bad. You can specify this option for performance, s3fs memorizes in stat cache that the object(file or directory) does not exist. .TP -\fB\-o\fR no_check_certificate (by default this option is disabled) - do not check ssl certificate. +\fB\-o\fR no_check_certificate (by default this option is disabled) +do not check ssl certificate. server certificate won't be checked against the available certificate authorities. .TP \fB\-o\fR nodnscache - disable dns cache. @@ -135,26 +158,23 @@ number of parallel request for uploading big objects. s3fs uploads large object(default:over 20MB) by multipart post request, and sends parallel requests. This option limits parallel request count which s3fs requests at once. It is necessary to set this value depending on a CPU and a network band. -This option is lated to fd_page_size option and affects it. -.TP -\fB\-o\fR fd_page_size(default="52428800"(50MB)) -number of internal management page size for each file descriptor. -For delayed reading and writing by s3fs, s3fs manages pages which is separated from object. Each pages has a status that data is already loaded(or not loaded yet). -This option should not be changed when you don't have a trouble with performance. -This value is changed automatically by parallel_count and multipart_size values(fd_page_size value = parallel_count * multipart_size). .TP \fB\-o\fR multipart_size(default="10"(10MB)) number of one part size in multipart uploading request. -The default size is 10MB(10485760byte), this value is minimum size. -Specify number of MB and over 10(MB). -This option is lated to fd_page_size option and affects it. +The default size is 10MB(10485760byte), minimum value is 5MB(5242880byte). +Specify number of MB and over 5(MB). +.TP +\fB\-o\fR ensure_diskfree(default the same as multipart_size value) +sets MB to ensure disk free space. This option means the threshold of free space size on disk which is used for the cache file by s3fs. +s3fs makes file for downloading, and uploading and caching files. +If the disk free space is smaller than this value, s3fs do not use diskspace as possible in exchange for the performance. .TP \fB\-o\fR url (default="http://s3.amazonaws.com") sets the url to use to access Amazon S3. If you want to use HTTPS, then you can set url=https://s3.amazonaws.com .TP \fB\-o\fR endpoint (default="us-east-1") sets the endpoint to use. -If this option is not specified, s3fs uses \"us-east-1\" region as the default. +If this option is not specified, s3fs uses "us-east-1" region as the default. If the s3fs could not connect to the region specified by this option, s3fs could not run. But if you do not specify this option, and if you can not connect with the default region, s3fs will retry to automatically connect to the other region. So s3fs can know the correct region name, because s3fs can find it in an error from the S3 server. @@ -194,6 +214,14 @@ If this option is specified with nocopapi, the s3fs ignores it. .TP \fB\-o\fR use_path_request_style (use legacy API calling style) Enble compatibility with S3-like APIs which do not support the virtual-host request style, by using the older path request style. +.TP +\fB\-o\fR dbglevel (default="crit") +Set the debug message level. set value as crit(critical), err(error), warn(warning), info(information) to debug level. default debug level is critical. +If s3fs run with "-d" option, the debug level is set information. +When s3fs catch the signal SIGUSR2, the debug level is bumpup. +.TP +\fB\-o\fR curldbg - put curl debug message +Put the debug message from libcurl when this option is specified. .SH FUSE/MOUNT OPTIONS .TP Most of the generic mount options described in 'man mount' are supported (ro, rw, suid, nosuid, dev, nodev, exec, noexec, atime, noatime, sync async, dirsync). Filesystems are mounted with '\-onodev,nosuid' by default, which can only be overridden by a privileged user. diff --git a/src/Makefile.am b/src/Makefile.am index f9edf84..5741f1b 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -39,6 +39,6 @@ s3fs_LDADD = $(DEPS_LIBS) noinst_PROGRAMS = test_string_util -test_string_util_SOURCES = string_util.cpp test_string_util.cpp +test_string_util_SOURCES = string_util.cpp test_string_util.cpp test_util.h TESTS = test_string_util diff --git a/src/cache.cpp b/src/cache.cpp index f3f63c4..abb57c9 100644 --- a/src/cache.cpp +++ b/src/cache.cpp @@ -164,11 +164,11 @@ bool StatCache::GetStat(string& key, struct stat* pst, headers_t* meta, bool ove } if(is_delete_cache){ // not hit by different ETag - DPRNNN("stat cache not hit by ETag[path=%s][time=%jd][hit count=%lu][ETag(%s)!=(%s)]", + S3FS_PRN_DBG("stat cache not hit by ETag[path=%s][time=%jd][hit count=%lu][ETag(%s)!=(%s)]", strpath.c_str(), (intmax_t)(ent->cache_date), ent->hit_count, petag ? petag : "null", ent->meta["ETag"].c_str()); }else{ // hit - DPRNNN("stat cache hit [path=%s][time=%jd][hit count=%lu]", strpath.c_str(), (intmax_t)(ent->cache_date), ent->hit_count); + S3FS_PRN_DBG("stat cache hit [path=%s][time=%jd][hit count=%lu]", strpath.c_str(), (intmax_t)(ent->cache_date), ent->hit_count); if(pst!= NULL){ *pst= ent->stbuf; @@ -245,12 +245,19 @@ bool StatCache::AddStat(std::string& key, headers_t& meta, bool forcedir) if(CacheSize< 1){ return true; } - DPRNNN("add stat cache entry[path=%s]", key.c_str()); + S3FS_PRN_INFO3("add stat cache entry[path=%s]", key.c_str()); - if(stat_cache.end() != stat_cache.find(key)){ + pthread_mutex_lock(&StatCache::stat_cache_lock); + + bool found = stat_cache.end() != stat_cache.find(key); + bool do_truncate = stat_cache.size() > CacheSize; + + pthread_mutex_unlock(&StatCache::stat_cache_lock); + + if(found){ DelStat(key.c_str()); }else{ - if(stat_cache.size() > CacheSize){ + if(do_truncate){ if(!TruncateCache()){ return false; } @@ -300,12 +307,19 @@ bool StatCache::AddNoObjectCache(string& key) if(CacheSize < 1){ return true; } - DPRNNN("add no object cache entry[path=%s]", key.c_str()); + S3FS_PRN_INFO3("add no object cache entry[path=%s]", key.c_str()); - if(stat_cache.end() != stat_cache.find(key)){ + pthread_mutex_lock(&StatCache::stat_cache_lock); + + bool found = stat_cache.end() != stat_cache.find(key); + bool do_truncate = stat_cache.size() > CacheSize; + + pthread_mutex_unlock(&StatCache::stat_cache_lock); + + if(found){ DelStat(key.c_str()); }else{ - if(stat_cache.size() > CacheSize){ + if(do_truncate){ if(!TruncateCache()){ return false; } @@ -330,17 +344,18 @@ bool StatCache::AddNoObjectCache(string& key) bool StatCache::TruncateCache(void) { - if(0 == stat_cache.size()){ + pthread_mutex_lock(&StatCache::stat_cache_lock); + + if(stat_cache.empty()){ + pthread_mutex_unlock(&StatCache::stat_cache_lock); return true; } - pthread_mutex_lock(&StatCache::stat_cache_lock); - time_t lowest_time = time(NULL) + 1; stat_cache_t::iterator iter_to_delete = stat_cache.end(); stat_cache_t::iterator iter; - for(iter = stat_cache.begin(); iter != stat_cache.end(); iter++) { + for(iter = stat_cache.begin(); iter != stat_cache.end(); ++iter) { if((*iter).second){ if(lowest_time > (*iter).second->cache_date){ lowest_time = (*iter).second->cache_date; @@ -349,7 +364,7 @@ bool StatCache::TruncateCache(void) } } if(stat_cache.end() != iter_to_delete){ - DPRNNN("truncate stat cache[path=%s]", (*iter_to_delete).first.c_str()); + S3FS_PRN_DBG("truncate stat cache[path=%s]", (*iter_to_delete).first.c_str()); if((*iter_to_delete).second){ delete (*iter_to_delete).second; } @@ -367,7 +382,7 @@ bool StatCache::DelStat(const char* key) if(!key){ return false; } - DPRNNN("delete stat cache entry[path=%s]", key); + S3FS_PRN_INFO3("delete stat cache entry[path=%s]", key); pthread_mutex_lock(&StatCache::stat_cache_lock); diff --git a/src/common.h b/src/common.h index 8cbce63..ee80603 100644 --- a/src/common.h +++ b/src/common.h @@ -17,64 +17,91 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ + #ifndef S3FS_COMMON_H_ #define S3FS_COMMON_H_ +#include "../config.h" + // // Macro // #define SAFESTRPTR(strptr) (strptr ? strptr : "") -// for debug -#define FPRINT_NEST_SPACE_0 "" -#define FPRINT_NEST_SPACE_1 " " -#define FPRINT_NEST_SPACE_2 " " -#define FPRINT_NEST_CHECK(NEST) \ - (0 == NEST ? FPRINT_NEST_SPACE_0 : 1 == NEST ? FPRINT_NEST_SPACE_1 : FPRINT_NEST_SPACE_2) +// +// Debug level +// +enum s3fs_log_level{ + S3FS_LOG_CRIT = 0, // LOG_CRIT + S3FS_LOG_ERR = 1, // LOG_ERR + S3FS_LOG_WARN = 3, // LOG_WARNING + S3FS_LOG_INFO = 7, // LOG_INFO + S3FS_LOG_DBG = 15 // LOG_DEBUG +}; -#define LOWFPRINT(NEST, ...) \ - printf("%s%s(%d): ", FPRINT_NEST_CHECK(NEST), __func__, __LINE__); \ - printf(__VA_ARGS__); \ - printf("\n"); \ +// +// Debug macros +// +#define IS_S3FS_LOG_CRIT() (S3FS_LOG_CRIT == debug_level) +#define IS_S3FS_LOG_ERR() (S3FS_LOG_ERR == (debug_level & S3FS_LOG_DBG)) +#define IS_S3FS_LOG_WARN() (S3FS_LOG_WARN == (debug_level & S3FS_LOG_DBG)) +#define IS_S3FS_LOG_INFO() (S3FS_LOG_INFO == (debug_level & S3FS_LOG_DBG)) +#define IS_S3FS_LOG_DBG() (S3FS_LOG_DBG == (debug_level & S3FS_LOG_DBG)) -#define FPRINT(NEST, ...) \ - if(foreground){ \ - LOWFPRINT(NEST, __VA_ARGS__); \ - } +#define S3FS_LOG_LEVEL_TO_SYSLOG(level) \ + ( S3FS_LOG_DBG == (level & S3FS_LOG_DBG) ? LOG_DEBUG : \ + S3FS_LOG_INFO == (level & S3FS_LOG_DBG) ? LOG_INFO : \ + S3FS_LOG_WARN == (level & S3FS_LOG_DBG) ? LOG_WARNING : \ + S3FS_LOG_ERR == (level & S3FS_LOG_DBG) ? LOG_ERR : LOG_CRIT ) -#define FPRINT2(NEST, ...) \ - if(foreground2){ \ - LOWFPRINT(NEST, __VA_ARGS__); \ - } +#define S3FS_LOG_LEVEL_STRING(level) \ + ( S3FS_LOG_DBG == (level & S3FS_LOG_DBG) ? "[DBG] " : \ + S3FS_LOG_INFO == (level & S3FS_LOG_DBG) ? "[INF] " : \ + S3FS_LOG_WARN == (level & S3FS_LOG_DBG) ? "[WAN] " : \ + S3FS_LOG_ERR == (level & S3FS_LOG_DBG) ? "[ERR] " : "[CRT] " ) -#define LOWSYSLOGPRINT(LEVEL, ...) \ - syslog(LEVEL, __VA_ARGS__); +#define S3FS_LOG_NEST_MAX 4 +#define S3FS_LOG_NEST(nest) (nest < S3FS_LOG_NEST_MAX ? s3fs_log_nest[nest] : s3fs_log_nest[S3FS_LOG_NEST_MAX - 1]) -#define SYSLOGPRINT(LEVEL, ...) \ - if(LEVEL <= LOG_CRIT || debug){ \ - LOWSYSLOGPRINT(LEVEL, __VA_ARGS__); \ - } +#define S3FS_LOW_LOGPRN(level, fmt, ...) \ + if(S3FS_LOG_CRIT == level || (S3FS_LOG_CRIT != debug_level && level == (debug_level & level))){ \ + if(foreground){ \ + fprintf(stdout, "%s%s:%s(%d): " fmt "%s\n", S3FS_LOG_LEVEL_STRING(level), __FILE__, __func__, __LINE__, __VA_ARGS__); \ + }else{ \ + syslog(S3FS_LOG_LEVEL_TO_SYSLOG(level), "%s:%s(%d): " fmt "%s", __FILE__, __func__, __LINE__, __VA_ARGS__); \ + } \ + } -#define DPRINT(LEVEL, NEST, ...) \ - FPRINT(NEST, __VA_ARGS__); \ - SYSLOGPRINT(LEVEL, __VA_ARGS__); +#define S3FS_LOW_LOGPRN2(level, nest, fmt, ...) \ + if(S3FS_LOG_CRIT == level || (S3FS_LOG_CRIT != debug_level && level == (debug_level & level))){ \ + if(foreground){ \ + fprintf(stdout, "%s%s%s:%s(%d): " fmt "%s\n", S3FS_LOG_LEVEL_STRING(level), S3FS_LOG_NEST(nest), __FILE__, __func__, __LINE__, __VA_ARGS__); \ + }else{ \ + syslog(S3FS_LOG_LEVEL_TO_SYSLOG(level), "%s" fmt "%s", S3FS_LOG_NEST(nest), __VA_ARGS__); \ + } \ + } -#define DPRINT2(LEVEL, ...) \ - FPRINT2(2, __VA_ARGS__); \ - SYSLOGPRINT(LEVEL, __VA_ARGS__); +#define S3FS_LOW_LOGPRN_EXIT(fmt, ...) \ + if(foreground){ \ + fprintf(stderr, "s3fs: " fmt "%s\n", __VA_ARGS__); \ + }else{ \ + syslog(S3FS_LOG_CRIT, "s3fs: " fmt "%s", __VA_ARGS__); \ + } -// print debug message -#define FPRN(...) FPRINT(0, __VA_ARGS__) -#define FPRNN(...) FPRINT(1, __VA_ARGS__) -#define FPRNNN(...) FPRINT(2, __VA_ARGS__) -#define FPRNINFO(...) FPRINT2(2, __VA_ARGS__) - -// print debug message with putting syslog -#define DPRNCRIT(...) DPRINT(LOG_CRIT, 0, __VA_ARGS__) -#define DPRN(...) DPRINT(LOG_ERR, 0, __VA_ARGS__) -#define DPRNN(...) DPRINT(LOG_DEBUG, 1, __VA_ARGS__) -#define DPRNNN(...) DPRINT(LOG_DEBUG, 2, __VA_ARGS__) -#define DPRNINFO(...) DPRINT2(LOG_INFO, __VA_ARGS__) +// [NOTE] +// small trick for VA_ARGS +// +#define S3FS_PRN_EXIT(fmt, ...) S3FS_LOW_LOGPRN_EXIT(fmt, ##__VA_ARGS__, "") +#define S3FS_PRN_CRIT(fmt, ...) S3FS_LOW_LOGPRN(S3FS_LOG_CRIT, fmt, ##__VA_ARGS__, "") +#define S3FS_PRN_ERR(fmt, ...) S3FS_LOW_LOGPRN(S3FS_LOG_ERR, fmt, ##__VA_ARGS__, "") +#define S3FS_PRN_WARN(fmt, ...) S3FS_LOW_LOGPRN(S3FS_LOG_WARN, fmt, ##__VA_ARGS__, "") +#define S3FS_PRN_DBG(fmt, ...) S3FS_LOW_LOGPRN(S3FS_LOG_DBG, fmt, ##__VA_ARGS__, "") +#define S3FS_PRN_INFO(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_INFO, 0, fmt, ##__VA_ARGS__, "") +#define S3FS_PRN_INFO0(fmt, ...) S3FS_LOG_INFO(fmt, __VA_ARGS__) +#define S3FS_PRN_INFO1(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_INFO, 1, fmt, ##__VA_ARGS__, "") +#define S3FS_PRN_INFO2(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_INFO, 2, fmt, ##__VA_ARGS__, "") +#define S3FS_PRN_INFO3(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_INFO, 3, fmt, ##__VA_ARGS__, "") +#define S3FS_PRN_CURL(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_CRIT, 0, fmt, ##__VA_ARGS__, "") // // Typedef @@ -90,7 +117,7 @@ typedef struct xattr_value{ unsigned char* pvalue; size_t length; - xattr_value(unsigned char* pval = NULL, size_t len = 0) : pvalue(pval), length(len) {} + explicit xattr_value(unsigned char* pval = NULL, size_t len = 0) : pvalue(pval), length(len) {} ~xattr_value() { if(pvalue){ @@ -104,17 +131,17 @@ typedef std::map xattrs_t; // // Global valiables // -extern bool debug; -extern bool foreground; -extern bool foreground2; -extern bool nomultipart; -extern bool pathrequeststyle; -extern std::string program_name; -extern std::string service_path; -extern std::string host; -extern std::string bucket; -extern std::string mount_prefix; -extern std::string endpoint; +extern bool foreground; +extern bool nomultipart; +extern bool pathrequeststyle; +extern std::string program_name; +extern std::string service_path; +extern std::string host; +extern std::string bucket; +extern std::string mount_prefix; +extern std::string endpoint; +extern s3fs_log_level debug_level; +extern const char* s3fs_log_nest[S3FS_LOG_NEST_MAX]; #endif // S3FS_COMMON_H_ diff --git a/src/common_auth.cpp b/src/common_auth.cpp index e6af4ac..778a5ab 100644 --- a/src/common_auth.cpp +++ b/src/common_auth.cpp @@ -18,104 +18,20 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ +#include #include #include #include #include #include "s3fs_auth.h" +#include "string_util.h" using namespace std; //------------------------------------------------------------------- // Utility Function //------------------------------------------------------------------- -char* s3fs_base64(const unsigned char* input, size_t length) -{ - static const char* base = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/="; - char* result; - - if(!input || 0 >= length){ - return NULL; - } - if(NULL == (result = (char*)malloc((((length / 3) + 1) * 4 + 1) * sizeof(char)))){ - return NULL; // ENOMEM - } - - unsigned char parts[4]; - size_t rpos; - size_t wpos; - for(rpos = 0, wpos = 0; rpos < length; rpos += 3){ - parts[0] = (input[rpos] & 0xfc) >> 2; - parts[1] = ((input[rpos] & 0x03) << 4) | ((((rpos + 1) < length ? input[rpos + 1] : 0x00) & 0xf0) >> 4); - parts[2] = (rpos + 1) < length ? (((input[rpos + 1] & 0x0f) << 2) | ((((rpos + 2) < length ? input[rpos + 2] : 0x00) & 0xc0) >> 6)) : 0x40; - parts[3] = (rpos + 2) < length ? (input[rpos + 2] & 0x3f) : 0x40; - - result[wpos++] = base[parts[0]]; - result[wpos++] = base[parts[1]]; - result[wpos++] = base[parts[2]]; - result[wpos++] = base[parts[3]]; - } - result[wpos] = '\0'; - - return result; -} - -inline unsigned char char_decode64(const char ch) -{ - unsigned char by; - if('A' <= ch && ch <= 'Z'){ // A - Z - by = static_cast(ch - 'A'); - }else if('a' <= ch && ch <= 'z'){ // a - z - by = static_cast(ch - 'a' + 26); - }else if('0' <= ch && ch <= '9'){ // 0 - 9 - by = static_cast(ch - '0' + 52); - }else if('+' == ch){ // + - by = 62; - }else if('/' == ch){ // / - by = 63; - }else if('=' == ch){ // = - by = 64; - }else{ // something wrong - by = 64; - } - return by; -} - -unsigned char* s3fs_decode64(const char* input, size_t* plength) -{ - unsigned char* result; - if(!input || 0 == strlen(input) || !plength){ - return NULL; - } - if(NULL == (result = (unsigned char*)malloc((strlen(input) + 1)))){ - return NULL; // ENOMEM - } - - unsigned char parts[4]; - size_t input_len = strlen(input); - size_t rpos; - size_t wpos; - for(rpos = 0, wpos = 0; rpos < input_len; rpos += 4){ - parts[0] = char_decode64(input[rpos]); - parts[1] = (rpos + 1) < input_len ? char_decode64(input[rpos + 1]) : 64; - parts[2] = (rpos + 2) < input_len ? char_decode64(input[rpos + 2]) : 64; - parts[3] = (rpos + 3) < input_len ? char_decode64(input[rpos + 3]) : 64; - - result[wpos++] = ((parts[0] << 2) & 0xfc) | ((parts[1] >> 4) & 0x03); - if(64 == parts[2]){ - break; - } - result[wpos++] = ((parts[1] << 4) & 0xf0) | ((parts[2] >> 2) & 0x0f); - if(64 == parts[3]){ - break; - } - result[wpos++] = ((parts[2] << 6) & 0xc0) | (parts[3] & 0x3f); - } - *plength = wpos; - return result; -} - string s3fs_get_content_md5(int fd) { unsigned char* md5hex; @@ -139,22 +55,16 @@ string s3fs_get_content_md5(int fd) string s3fs_md5sum(int fd, off_t start, ssize_t size) { size_t digestlen = get_md5_digest_length(); - char md5[2 * digestlen + 1]; - char hexbuf[3]; unsigned char* md5hex; if(NULL == (md5hex = s3fs_md5hexsum(fd, start, size))){ return string(""); } - memset(md5, 0, 2 * digestlen + 1); - for(size_t pos = 0; pos < digestlen; pos++){ - snprintf(hexbuf, 3, "%02x", md5hex[pos]); - strncat(md5, hexbuf, 2); - } + std::string md5 = s3fs_hex(md5hex, digestlen); free(md5hex); - return string(md5); + return md5; } string s3fs_sha256sum(int fd, off_t start, ssize_t size) diff --git a/src/curl.cpp b/src/curl.cpp index ab187e2..c68076c 100644 --- a/src/curl.cpp +++ b/src/curl.cpp @@ -63,30 +63,30 @@ static const std::string empty_payload_hash = "e3b0c44298fc1c149afbf4c8996fb9242 static bool make_md5_from_string(const char* pstr, string& md5) { if(!pstr || '\0' == pstr[0]){ - DPRN("Parameter is wrong."); + S3FS_PRN_ERR("Parameter is wrong."); return false; } FILE* fp; if(NULL == (fp = tmpfile())){ - FPRN("Could not make tmpfile."); + S3FS_PRN_ERR("Could not make tmpfile."); return false; } size_t length = strlen(pstr); if(length != fwrite(pstr, sizeof(char), length, fp)){ - FPRN("Failed to write tmpfile."); + S3FS_PRN_ERR("Failed to write tmpfile."); fclose(fp); return false; } int fd; if(0 != fflush(fp) || 0 != fseek(fp, 0L, SEEK_SET) || -1 == (fd = fileno(fp))){ - FPRN("Failed to make MD5."); + S3FS_PRN_ERR("Failed to make MD5."); fclose(fp); return false; } // base64 md5 md5 = s3fs_get_content_md5(fd); if(0 == md5.length()){ - FPRN("Failed to make MD5."); + S3FS_PRN_ERR("Failed to make MD5."); fclose(fp); return false; } @@ -96,27 +96,26 @@ static bool make_md5_from_string(const char* pstr, string& md5) static string url_to_host(const std::string &url) { - DPRNNN("url is %s", url.c_str()); + S3FS_PRN_INFO3("url is %s", url.c_str()); - static const string http = "http://"; - static const string https = "https://"; - std::string host; + static const string http = "http://"; + static const string https = "https://"; + std::string host; - if (url.compare(0, http.size(), http) == 0) { - host = url.substr(http.size()); - } else if (url.compare(0, https.size(), https) == 0) { - host = url.substr(https.size()); - } else { - assert(!"url does not begin with http:// or https://"); - } + if (url.compare(0, http.size(), http) == 0) { + host = url.substr(http.size()); + } else if (url.compare(0, https.size(), https) == 0) { + host = url.substr(https.size()); + } else { + assert(!"url does not begin with http:// or https://"); + } - size_t idx; - - if ((idx = host.find(':')) != string::npos || (idx = host.find('/')) != string::npos) { - return host.substr(0, idx); - } else { - return host; - } + size_t idx; + if ((idx = host.find(':')) != string::npos || (idx = host.find('/')) != string::npos) { + return host.substr(0, idx); + } else { + return host; + } } static string get_bucket_host() @@ -172,7 +171,7 @@ bool BodyData::Resize(size_t addbytes) // realloc char* newtext; if(NULL == (newtext = (char*)realloc(text, (bufsize + need_size)))){ - DPRNCRIT("not enough memory (realloc returned NULL)"); + S3FS_PRN_CRIT("not enough memory (realloc returned NULL)"); free(text); text = NULL; return false; @@ -252,9 +251,10 @@ time_t S3fsCurl::readwrite_timeout = 60; // default int S3fsCurl::retries = 3; // default bool S3fsCurl::is_public_bucket = false; string S3fsCurl::default_acl = "private"; -bool S3fsCurl::is_use_rrs = false; +storage_class_t S3fsCurl::storage_class = STANDARD; sseckeylist_t S3fsCurl::sseckeys; -bool S3fsCurl::is_use_sse = false; +std::string S3fsCurl::ssekmsid = ""; +sse_type_t S3fsCurl::ssetype = SSE_DISABLE; bool S3fsCurl::is_content_md5 = false; bool S3fsCurl::is_verbose = false; string S3fsCurl::AWSAccessKeyId; @@ -331,7 +331,7 @@ bool S3fsCurl::InitGlobalCurl(void) return false; } if(CURLE_OK != curl_global_init(CURL_GLOBAL_ALL)){ - DPRN("init_curl_global_all returns error."); + S3FS_PRN_ERR("init_curl_global_all returns error."); return false; } S3fsCurl::is_initglobal_done = true; @@ -353,45 +353,45 @@ bool S3fsCurl::InitShareCurl(void) CURLSHcode nSHCode; if(!S3fsCurl::is_dns_cache && !S3fsCurl::is_ssl_session_cache){ - DPRN("Curl does not share DNS data."); + S3FS_PRN_INFO("Curl does not share DNS data."); return true; } if(S3fsCurl::hCurlShare){ - DPRN("already initiated."); + S3FS_PRN_WARN("already initiated."); return false; } if(NULL == (S3fsCurl::hCurlShare = curl_share_init())){ - DPRN("curl_share_init failed"); + S3FS_PRN_ERR("curl_share_init failed"); return false; } if(CURLSHE_OK != (nSHCode = curl_share_setopt(S3fsCurl::hCurlShare, CURLSHOPT_LOCKFUNC, S3fsCurl::LockCurlShare))){ - DPRN("curl_share_setopt(LOCKFUNC) returns %d(%s)", nSHCode, curl_share_strerror(nSHCode)); + S3FS_PRN_ERR("curl_share_setopt(LOCKFUNC) returns %d(%s)", nSHCode, curl_share_strerror(nSHCode)); return false; } if(CURLSHE_OK != (nSHCode = curl_share_setopt(S3fsCurl::hCurlShare, CURLSHOPT_UNLOCKFUNC, S3fsCurl::UnlockCurlShare))){ - DPRN("curl_share_setopt(UNLOCKFUNC) returns %d(%s)", nSHCode, curl_share_strerror(nSHCode)); + S3FS_PRN_ERR("curl_share_setopt(UNLOCKFUNC) returns %d(%s)", nSHCode, curl_share_strerror(nSHCode)); return false; } if(S3fsCurl::is_dns_cache){ nSHCode = curl_share_setopt(S3fsCurl::hCurlShare, CURLSHOPT_SHARE, CURL_LOCK_DATA_DNS); if(CURLSHE_OK != nSHCode && CURLSHE_BAD_OPTION != nSHCode && CURLSHE_NOT_BUILT_IN != nSHCode){ - DPRN("curl_share_setopt(DNS) returns %d(%s)", nSHCode, curl_share_strerror(nSHCode)); + S3FS_PRN_ERR("curl_share_setopt(DNS) returns %d(%s)", nSHCode, curl_share_strerror(nSHCode)); return false; }else if(CURLSHE_BAD_OPTION == nSHCode || CURLSHE_NOT_BUILT_IN == nSHCode){ - DPRN("curl_share_setopt(DNS) returns %d(%s), but continue without shared dns data.", nSHCode, curl_share_strerror(nSHCode)); + S3FS_PRN_WARN("curl_share_setopt(DNS) returns %d(%s), but continue without shared dns data.", nSHCode, curl_share_strerror(nSHCode)); } } if(S3fsCurl::is_ssl_session_cache){ nSHCode = curl_share_setopt(S3fsCurl::hCurlShare, CURLSHOPT_SHARE, CURL_LOCK_DATA_SSL_SESSION); if(CURLSHE_OK != nSHCode && CURLSHE_BAD_OPTION != nSHCode && CURLSHE_NOT_BUILT_IN != nSHCode){ - DPRN("curl_share_setopt(SSL SESSION) returns %d(%s)", nSHCode, curl_share_strerror(nSHCode)); + S3FS_PRN_ERR("curl_share_setopt(SSL SESSION) returns %d(%s)", nSHCode, curl_share_strerror(nSHCode)); return false; }else if(CURLSHE_BAD_OPTION == nSHCode || CURLSHE_NOT_BUILT_IN == nSHCode){ - DPRN("curl_share_setopt(SSL SESSION) returns %d(%s), but continue without shared ssl session data.", nSHCode, curl_share_strerror(nSHCode)); + S3FS_PRN_WARN("curl_share_setopt(SSL SESSION) returns %d(%s), but continue without shared ssl session data.", nSHCode, curl_share_strerror(nSHCode)); } } if(CURLSHE_OK != (nSHCode = curl_share_setopt(S3fsCurl::hCurlShare, CURLSHOPT_USERDATA, (void*)&S3fsCurl::curl_share_lock[0]))){ - DPRN("curl_share_setopt(USERDATA) returns %d(%s)", nSHCode, curl_share_strerror(nSHCode)); + S3FS_PRN_ERR("curl_share_setopt(USERDATA) returns %d(%s)", nSHCode, curl_share_strerror(nSHCode)); return false; } return true; @@ -403,7 +403,7 @@ bool S3fsCurl::DestroyShareCurl(void) if(!S3fsCurl::is_dns_cache && !S3fsCurl::is_ssl_session_cache){ return true; } - DPRN("already destroy share curl."); + S3FS_PRN_WARN("already destroy share curl."); return false; } if(CURLSHE_OK != curl_share_cleanup(S3fsCurl::hCurlShare)){ @@ -467,7 +467,7 @@ int S3fsCurl::CurlProgress(void *clientp, double dltotal, double dlnow, double u // timeout? if(now - S3fsCurl::curl_times[curl] > readwrite_timeout){ pthread_mutex_unlock(&S3fsCurl::curl_handles_lock); - DPRN("timeout now: %jd, curl_times[curl]: %jd, readwrite_timeout: %jd", + S3FS_PRN_ERR("timeout now: %jd, curl_times[curl]: %jd, readwrite_timeout: %jd", (intmax_t)now, (intmax_t)(S3fsCurl::curl_times[curl]), (intmax_t)readwrite_timeout); return CURLE_ABORTED_BY_CALLBACK; } @@ -581,7 +581,7 @@ bool S3fsCurl::LocateBundle(void) // check for existence and readability of the file ifstream BF(CURL_CA_BUNDLE); if(!BF.good()){ - DPRN("%s: file specified by CURL_CA_BUNDLE environment variable is not readable", program_name.c_str()); + S3FS_PRN_ERR("%s: file specified by CURL_CA_BUNDLE environment variable is not readable", program_name.c_str()); return false; } BF.close(); @@ -611,7 +611,7 @@ bool S3fsCurl::LocateBundle(void) BF.close(); S3fsCurl::curl_ca_bundle.assign("/etc/pki/tls/certs/ca-bundle.crt"); }else{ - DPRN("%s: /etc/pki/tls/certs/ca-bundle.crt is not readable", program_name.c_str()); + S3FS_PRN_ERR("%s: /etc/pki/tls/certs/ca-bundle.crt is not readable", program_name.c_str()); return false; } return true; @@ -619,10 +619,10 @@ bool S3fsCurl::LocateBundle(void) size_t S3fsCurl::WriteMemoryCallback(void* ptr, size_t blockSize, size_t numBlocks, void* data) { - BodyData* body = (BodyData*)data; + BodyData* body = static_cast(data); if(!body->Append(ptr, blockSize, numBlocks)){ - DPRNCRIT("BodyData.Append() returned false."); + S3FS_PRN_CRIT("BodyData.Append() returned false."); S3FS_FUSE_EXIT(); return -1; } @@ -659,7 +659,7 @@ size_t S3fsCurl::HeaderCallback(void* data, size_t blockSize, size_t numBlocks, // Force to lower, only "x-amz" string lkey = key; transform(lkey.begin(), lkey.end(), lkey.begin(), static_cast(std::tolower)); - if(lkey.substr(0, 5) == "x-amz"){ + if(lkey.compare(0, 5, "x-amz") == 0){ key = lkey; } string value; @@ -691,7 +691,7 @@ size_t S3fsCurl::UploadReadCallback(void* ptr, size_t size, size_t nmemb, void* break; }else if(-1 == readbytes){ // error - DPRN("read file error(%d).", errno); + S3FS_PRN_ERR("read file error(%d).", errno); return 0; } } @@ -725,7 +725,7 @@ size_t S3fsCurl::DownloadWriteCallback(void* ptr, size_t size, size_t nmemb, voi break; }else if(-1 == writebytes){ // error - DPRN("write file error(%d).", errno); + S3FS_PRN_ERR("write file error(%d).", errno); return 0; } } @@ -790,10 +790,10 @@ string S3fsCurl::SetDefaultAcl(const char* acl) return old; } -bool S3fsCurl::SetUseRrs(bool flag) +storage_class_t S3fsCurl::SetStorageClass(storage_class_t storage_class) { - bool old = S3fsCurl::is_use_rrs; - S3fsCurl::is_use_rrs = flag; + storage_class_t old = S3fsCurl::storage_class; + S3fsCurl::storage_class = storage_class; return old; } @@ -809,7 +809,7 @@ bool S3fsCurl::PushbackSseKeys(string& onekey) // make base64 char* pbase64_key; if(NULL == (pbase64_key = s3fs_base64((unsigned char*)onekey.c_str(), onekey.length()))){ - FPRN("Failed to convert base64 from sse-c key %s", onekey.c_str()); + S3FS_PRN_ERR("Failed to convert base64 from SSE-C key %s", onekey.c_str()); return false; } string base64_key = pbase64_key; @@ -818,7 +818,7 @@ bool S3fsCurl::PushbackSseKeys(string& onekey) // make MD5 string strMd5; if(!make_md5_from_string(onekey.c_str(), strMd5)){ - FPRN("Could not make MD5 from SSE-C keys(%s).", onekey.c_str()); + S3FS_PRN_ERR("Could not make MD5 from SSE-C keys(%s).", onekey.c_str()); return false; } // mapped MD5 = SSE Key @@ -829,17 +829,34 @@ bool S3fsCurl::PushbackSseKeys(string& onekey) return true; } -bool S3fsCurl::SetSseKeys(const char* filepath) +sse_type_t S3fsCurl::SetSseType(sse_type_t type) +{ + sse_type_t old = S3fsCurl::ssetype; + S3fsCurl::ssetype = type; + return old; +} + +bool S3fsCurl::SetSseCKeys(const char* filepath) { if(!filepath){ - DPRN("SSE-C keys filepath is empty."); + S3FS_PRN_ERR("SSE-C keys filepath is empty."); return false; } + struct stat st; + if(0 != stat(filepath, &st)){ + S3FS_PRN_ERR("could not open use_sse keys file(%s).", filepath); + return false; + } + if(st.st_mode & (S_IXUSR | S_IRWXG | S_IRWXO)){ + S3FS_PRN_ERR("use_sse keys file %s should be 0600 permissions.", filepath); + return false; + } + S3fsCurl::sseckeys.clear(); ifstream ssefs(filepath); if(!ssefs.good()){ - FPRN("Could not open SSE-C keys file(%s).", filepath); + S3FS_PRN_ERR("Could not open SSE-C keys file(%s).", filepath); return false; } @@ -848,17 +865,59 @@ bool S3fsCurl::SetSseKeys(const char* filepath) S3fsCurl::PushbackSseKeys(line); } if(0 == S3fsCurl::sseckeys.size()){ - FPRN("There is no SSE Key in file(%s).", filepath); + S3FS_PRN_ERR("There is no SSE Key in file(%s).", filepath); + return false; + } + return true; +} + +bool S3fsCurl::SetSseKmsid(const char* kmsid) +{ + if(!kmsid || '\0' == kmsid[0]){ + S3FS_PRN_ERR("SSE-KMS kms id is empty."); + return false; + } + S3fsCurl::ssekmsid = kmsid; + return true; +} + +// [NOTE] +// Because SSE is set by some options and environment, +// this function check the integrity of the SSE data finally. +bool S3fsCurl::FinalCheckSse(void) +{ + if(SSE_DISABLE == S3fsCurl::ssetype){ + S3fsCurl::ssekmsid.erase(); + }else if(SSE_S3 == S3fsCurl::ssetype){ + S3fsCurl::ssekmsid.erase(); + }else if(SSE_C == S3fsCurl::ssetype){ + if(0 == S3fsCurl::sseckeys.size()){ + S3FS_PRN_ERR("sse type is SSE-C, but there is no custom key."); + return false; + } + S3fsCurl::ssekmsid.erase(); + }else if(SSE_KMS == S3fsCurl::ssetype){ + if(S3fsCurl::ssekmsid.empty()){ + S3FS_PRN_ERR("sse type is SSE-KMS, but there is no specified kms id."); + return false; + } + if(!S3fsCurl::IsSignatureV4()){ + S3FS_PRN_ERR("sse type is SSE-KMS, but signature type is not v4. SSE-KMS require signature v4."); + return false; + } + }else{ + S3FS_PRN_ERR("sse type is unknown(%d).", S3fsCurl::ssetype); return false; } return true; } -bool S3fsCurl::LoadEnvSseKeys(void) +bool S3fsCurl::LoadEnvSseCKeys(void) { char* envkeys = getenv("AWSSSECKEYS"); if(NULL == envkeys){ - return false; + // nothing to do + return true; } S3fsCurl::sseckeys.clear(); @@ -868,18 +927,28 @@ bool S3fsCurl::LoadEnvSseKeys(void) S3fsCurl::PushbackSseKeys(onekey); } if(0 == S3fsCurl::sseckeys.size()){ - FPRN("There is no SSE Key in environment(AWSSSECKEYS=%s).", envkeys); + S3FS_PRN_ERR("There is no SSE Key in environment(AWSSSECKEYS=%s).", envkeys); return false; } return true; } +bool S3fsCurl::LoadEnvSseKmsid(void) +{ + char* envkmsid = getenv("AWSSSEKMSID"); + if(NULL == envkmsid){ + // nothing to do + return true; + } + return S3fsCurl::SetSseKmsid(envkmsid); +} + // // If md5 is empty, returns first(current) sse key. // bool S3fsCurl::GetSseKey(string& md5, string& ssekey) { - for(sseckeylist_t::const_iterator iter = S3fsCurl::sseckeys.begin(); iter != S3fsCurl::sseckeys.end(); iter++){ + for(sseckeylist_t::const_iterator iter = S3fsCurl::sseckeys.begin(); iter != S3fsCurl::sseckeys.end(); ++iter){ if(0 == md5.length() || md5 == (*iter).begin()->first){ md5 = iter->begin()->first; ssekey = iter->begin()->second; @@ -898,7 +967,7 @@ bool S3fsCurl::GetSseKeyMd5(int pos, string& md5) return false; } int cnt = 0; - for(sseckeylist_t::const_iterator iter = S3fsCurl::sseckeys.begin(); iter != S3fsCurl::sseckeys.end(); iter++, cnt++){ + for(sseckeylist_t::const_iterator iter = S3fsCurl::sseckeys.begin(); iter != S3fsCurl::sseckeys.end(); ++iter, ++cnt){ if(pos == cnt){ md5 = iter->begin()->first; return true; @@ -912,18 +981,6 @@ int S3fsCurl::GetSseKeyCount(void) return S3fsCurl::sseckeys.size(); } -bool S3fsCurl::IsSseCustomMode(void) -{ - return (0 < S3fsCurl::sseckeys.size()); -} - -bool S3fsCurl::SetUseSse(bool flag) -{ - bool old = S3fsCurl::is_use_sse; - S3fsCurl::is_use_sse = flag; - return old; -} - bool S3fsCurl::SetContentMd5(bool flag) { bool old = S3fsCurl::is_content_md5; @@ -968,7 +1025,7 @@ string S3fsCurl::SetIAMRole(const char* role) bool S3fsCurl::SetMultipartSize(off_t size) { size = size * 1024 * 1024; - if(size < MULTIPART_SIZE){ + if(size < MIN_MULTIPART_SIZE){ return false; } S3fsCurl::multipart_size = size; @@ -1015,7 +1072,7 @@ S3fsCurl* S3fsCurl::UploadMultipartPostRetryCallback(S3fsCurl* s3fscurl) part_num = atoi(part_num_str.c_str()); if(s3fscurl->retry_count >= S3fsCurl::retries){ - DPRN("Over retry count(%d) limit(%s:%d).", s3fscurl->retry_count, s3fscurl->path.c_str(), part_num); + S3FS_PRN_ERR("Over retry count(%d) limit(%s:%d).", s3fscurl->retry_count, s3fscurl->path.c_str(), part_num); return NULL; } @@ -1032,7 +1089,7 @@ S3fsCurl* S3fsCurl::UploadMultipartPostRetryCallback(S3fsCurl* s3fscurl) // setup new curl object if(0 != newcurl->UploadMultipartPostSetup(s3fscurl->path.c_str(), part_num, upload_id)){ - DPRN("Could not duplicate curl object(%s:%d).", s3fscurl->path.c_str(), part_num); + S3FS_PRN_ERR("Could not duplicate curl object(%s:%d).", s3fscurl->path.c_str(), part_num); delete newcurl; return NULL; } @@ -1049,18 +1106,18 @@ int S3fsCurl::ParallelMultipartUploadRequest(const char* tpath, headers_t& meta, off_t remaining_bytes; S3fsCurl s3fscurl(true); - FPRNNN("[tpath=%s][fd=%d]", SAFESTRPTR(tpath), fd); + S3FS_PRN_INFO3("[tpath=%s][fd=%d]", SAFESTRPTR(tpath), fd); // duplicate fd if(-1 == (fd2 = dup(fd)) || 0 != lseek(fd2, 0, SEEK_SET)){ - DPRN("Could not duplicate file descriptor(errno=%d)", errno); + S3FS_PRN_ERR("Could not duplicate file descriptor(errno=%d)", errno); if(-1 != fd2){ close(fd2); } return -errno; } if(-1 == fstat(fd2, &st)){ - DPRN("Invalid file descriptor(errno=%d)", errno); + S3FS_PRN_ERR("Invalid file descriptor(errno=%d)", errno); close(fd2); return -errno; } @@ -1097,7 +1154,7 @@ int S3fsCurl::ParallelMultipartUploadRequest(const char* tpath, headers_t& meta, // initiate upload part for parallel if(0 != (result = s3fscurl_para->UploadMultipartPostSetup(tpath, list.size(), upload_id))){ - DPRN("failed uploading part setup(%d)", result); + S3FS_PRN_ERR("failed uploading part setup(%d)", result); close(fd2); delete s3fscurl_para; return result; @@ -1105,7 +1162,7 @@ int S3fsCurl::ParallelMultipartUploadRequest(const char* tpath, headers_t& meta, // set into parallel object if(!curlmulti.SetS3fsCurlObject(s3fscurl_para)){ - DPRN("Could not make curl object into multi curl(%s).", tpath); + S3FS_PRN_ERR("Could not make curl object into multi curl(%s).", tpath); close(fd2); delete s3fscurl_para; return -1; @@ -1114,7 +1171,7 @@ int S3fsCurl::ParallelMultipartUploadRequest(const char* tpath, headers_t& meta, // Multi request if(0 != (result = curlmulti.Request())){ - DPRN("error occuered in multi request(errno=%d).", result); + S3FS_PRN_ERR("error occuered in multi request(errno=%d).", result); break; } @@ -1137,15 +1194,16 @@ S3fsCurl* S3fsCurl::ParallelGetObjectRetryCallback(S3fsCurl* s3fscurl) return NULL; } if(s3fscurl->retry_count >= S3fsCurl::retries){ - DPRN("Over retry count(%d) limit(%s).", s3fscurl->retry_count, s3fscurl->path.c_str()); + S3FS_PRN_ERR("Over retry count(%d) limit(%s).", s3fscurl->retry_count, s3fscurl->path.c_str()); return NULL; } // duplicate request(setup new curl object) S3fsCurl* newcurl = new S3fsCurl(s3fscurl->IsUseAhbe()); - if(0 != (result = newcurl->PreGetObjectRequest( - s3fscurl->path.c_str(), s3fscurl->partdata.fd, s3fscurl->partdata.startpos, s3fscurl->partdata.size, s3fscurl->b_ssekey_md5))){ - DPRN("failed downloading part setup(%d)", result); + if(0 != (result = newcurl->PreGetObjectRequest(s3fscurl->path.c_str(), s3fscurl->partdata.fd, + s3fscurl->partdata.startpos, s3fscurl->partdata.size, s3fscurl->b_ssetype, s3fscurl->b_ssevalue))) + { + S3FS_PRN_ERR("failed downloading part setup(%d)", result); delete newcurl; return NULL;; } @@ -1156,16 +1214,15 @@ S3fsCurl* S3fsCurl::ParallelGetObjectRetryCallback(S3fsCurl* s3fscurl) int S3fsCurl::ParallelGetObjectRequest(const char* tpath, int fd, off_t start, ssize_t size) { - FPRNNN("[tpath=%s][fd=%d]", SAFESTRPTR(tpath), fd); + S3FS_PRN_INFO3("[tpath=%s][fd=%d]", SAFESTRPTR(tpath), fd); - string sseckeymd5(""); - char* psseckeymd5; - if(NULL != (psseckeymd5 = get_object_sseckey_md5(tpath))){ - sseckeymd5 = psseckeymd5; - free(psseckeymd5); + sse_type_t ssetype; + string ssevalue; + if(!get_object_sse_type(tpath, ssetype, ssevalue)){ + S3FS_PRN_WARN("Failed to get SSE type for file(%s).", SAFESTRPTR(tpath)); } - int result = 0; - ssize_t remaining_bytes; + int result = 0; + ssize_t remaining_bytes; // cycle through open fd, pulling off 10MB chunks at a time for(remaining_bytes = size; 0 < remaining_bytes; ){ @@ -1184,15 +1241,15 @@ int S3fsCurl::ParallelGetObjectRequest(const char* tpath, int fd, off_t start, s // s3fscurl sub object S3fsCurl* s3fscurl_para = new S3fsCurl(); - if(0 != (result = s3fscurl_para->PreGetObjectRequest(tpath, fd, (start + size - remaining_bytes), chunk, sseckeymd5))){ - DPRN("failed downloading part setup(%d)", result); + if(0 != (result = s3fscurl_para->PreGetObjectRequest(tpath, fd, (start + size - remaining_bytes), chunk, ssetype, ssevalue))){ + S3FS_PRN_ERR("failed downloading part setup(%d)", result); delete s3fscurl_para; return result; } // set into parallel object if(!curlmulti.SetS3fsCurlObject(s3fscurl_para)){ - DPRN("Could not make curl object into multi curl(%s).", tpath); + S3FS_PRN_ERR("Could not make curl object into multi curl(%s).", tpath); delete s3fscurl_para; return -1; } @@ -1200,7 +1257,7 @@ int S3fsCurl::ParallelGetObjectRequest(const char* tpath, int fd, off_t start, s // Multi request if(0 != (result = curlmulti.Request())){ - DPRN("error occuered in multi request(errno=%d).", result); + S3FS_PRN_ERR("error occuered in multi request(errno=%d).", result); break; } @@ -1251,7 +1308,7 @@ bool S3fsCurl::ParseIAMCredentialResponse(const char* response, iamcredmap_t& ke bool S3fsCurl::SetIAMCredentials(const char* response) { - FPRNINFO("IAM credential response = \"%s\"", response); + S3FS_PRN_INFO3("IAM credential response = \"%s\"", response); iamcredmap_t keyval; @@ -1286,6 +1343,41 @@ bool S3fsCurl::CheckIAMCredentialUpdate(void) return true; } +int S3fsCurl::CurlDebugFunc(CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr) +{ + if(!hcurl){ + // something wrong... + return 0; + } + switch(type){ + case CURLINFO_TEXT: + case CURLINFO_HEADER_IN: + case CURLINFO_HEADER_OUT: + char* buff; + if(NULL == (buff = reinterpret_cast(malloc(size + 2 + 1)))){ + // could not allocation memory + S3FS_PRN_CRIT("could not allocate memory"); + break; + } + buff[size + 2] = '\0'; + sprintf(buff, "%c ", (CURLINFO_TEXT == type ? '*' : CURLINFO_HEADER_IN == type ? '<' : '>')); + memcpy(&buff[2], data, size); + S3FS_PRN_CURL("%s", buff); // no blocking + free(buff); + break; + case CURLINFO_DATA_IN: + case CURLINFO_DATA_OUT: + case CURLINFO_SSL_DATA_IN: + case CURLINFO_SSL_DATA_OUT: + // not put + break; + default: + // why + break; + } + return 0; +} + //------------------------------------------------------------------- // Methods for S3fsCurl //------------------------------------------------------------------- @@ -1293,7 +1385,7 @@ S3fsCurl::S3fsCurl(bool ahbe) : hCurl(NULL), path(""), base_path(""), saved_path(""), url(""), requestHeaders(NULL), bodydata(NULL), headdata(NULL), LastResponseCode(-1), postdata(NULL), postdata_remaining(0), is_use_ahbe(ahbe), retry_count(0), b_infile(NULL), b_postdata(NULL), b_postdata_remaining(0), b_partdata_startpos(0), b_partdata_size(0), - b_ssekey_pos(-1), b_ssekey_md5("") + b_ssekey_pos(-1), b_ssevalue(""), b_ssetype(SSE_DISABLE) { type = REQTYPE_UNSET; } @@ -1327,12 +1419,15 @@ bool S3fsCurl::ResetHandle(void) curl_easy_setopt(hCurl, CURLOPT_SHARE, S3fsCurl::hCurlShare); } if(!S3fsCurl::is_cert_check) { - DPRN("'no_check_certificate' option in effect.") - DPRN("The server certificate won't be checked against the available certificate authorities.") + S3FS_PRN_DBG("'no_check_certificate' option in effect.") + S3FS_PRN_DBG("The server certificate won't be checked against the available certificate authorities.") curl_easy_setopt(hCurl, CURLOPT_SSL_VERIFYPEER, false); } if(S3fsCurl::is_verbose){ curl_easy_setopt(hCurl, CURLOPT_VERBOSE, true); + if(!foreground){ + curl_easy_setopt(hCurl, CURLOPT_DEBUGFUNCTION, S3fsCurl::CurlDebugFunc); + } } S3fsCurl::curl_times[hCurl] = time(0); @@ -1347,18 +1442,18 @@ bool S3fsCurl::CreateCurlHandle(bool force) if(hCurl){ if(!force){ - DPRN("already create handle."); + S3FS_PRN_WARN("already create handle."); return false; } if(!DestroyCurlHandle()){ - DPRN("could not destroy handle."); + S3FS_PRN_ERR("could not destroy handle."); return false; } - DPRN("already has handle, so destroied it."); + S3FS_PRN_INFO3("already has handle, so destroied it."); } if(NULL == (hCurl = curl_easy_init())){ - DPRN("Failed to create handle."); + S3FS_PRN_ERR("Failed to create handle."); return false; } type = REQTYPE_UNSET; @@ -1450,7 +1545,7 @@ bool S3fsCurl::GetResponseCode(long& responseCode) // bool S3fsCurl::RemakeHandle(void) { - DPRNNN("Retry request. [type=%d][url=%s][path=%s]", type, url.c_str(), path.c_str()); + S3FS_PRN_INFO3("Retry request. [type=%d][url=%s][path=%s]", type, url.c_str(), path.c_str()); if(REQTYPE_UNSET == type){ return false; @@ -1461,7 +1556,7 @@ bool S3fsCurl::RemakeHandle(void) if(b_infile){ rewind(b_infile); if(-1 == fstat(fileno(b_infile), &st)){ - DPRNNN("Could not get file stat(fd=%d)", fileno(b_infile)); + S3FS_PRN_WARN("Could not get file stat(fd=%d)", fileno(b_infile)); return false; } } @@ -1614,7 +1709,7 @@ bool S3fsCurl::RemakeHandle(void) break; default: - DPRNNN("request type is unknown(%d)", type); + S3FS_PRN_ERR("request type is unknown(%d)", type); return false; } return true; @@ -1625,10 +1720,10 @@ bool S3fsCurl::RemakeHandle(void) // int S3fsCurl::RequestPerform(void) { - if(debug){ + if(IS_S3FS_LOG_DBG()){ char* ptr_url = NULL; curl_easy_getinfo(hCurl, CURLINFO_EFFECTIVE_URL , &ptr_url); - DPRNNN("connecting to URL %s", SAFESTRPTR(ptr_url)); + S3FS_PRN_DBG("connecting to URL %s", SAFESTRPTR(ptr_url)); } // 1 attempt + retries... @@ -1641,15 +1736,15 @@ int S3fsCurl::RequestPerform(void) case CURLE_OK: // Need to look at the HTTP response code if(0 != curl_easy_getinfo(hCurl, CURLINFO_RESPONSE_CODE, &LastResponseCode)){ - DPRNNN("curl_easy_getinfo failed while trying to retrieve HTTP response code"); + S3FS_PRN_ERR("curl_easy_getinfo failed while trying to retrieve HTTP response code"); return -EIO; } if(400 > LastResponseCode){ - DPRNNN("HTTP response code %ld", LastResponseCode); + S3FS_PRN_INFO3("HTTP response code %ld", LastResponseCode); return 0; } if(500 <= LastResponseCode){ - DPRNNN("###HTTP response=%ld", LastResponseCode); + S3FS_PRN_INFO3("HTTP response code %ld", LastResponseCode); sleep(4); break; } @@ -1657,107 +1752,107 @@ int S3fsCurl::RequestPerform(void) // Service response codes which are >= 400 && < 500 switch(LastResponseCode){ case 400: - DPRNNN("HTTP response code 400 was returned, returing EIO."); - DPRNINFO("Body Text: %s", (bodydata ? bodydata->str() : "")); + S3FS_PRN_INFO3("HTTP response code 400 was returned, returing EIO."); + S3FS_PRN_DBG("Body Text: %s", (bodydata ? bodydata->str() : "")); return -EIO; case 403: - DPRNNN("HTTP response code 403 was returned, returning EPERM"); - DPRNINFO("Body Text: %s", (bodydata ? bodydata->str() : "")); + S3FS_PRN_INFO3("HTTP response code 403 was returned, returning EPERM"); + S3FS_PRN_DBG("Body Text: %s", (bodydata ? bodydata->str() : "")); return -EPERM; case 404: - DPRNNN("HTTP response code 404 was returned, returning ENOENT"); - DPRNINFO("Body Text: %s", (bodydata ? bodydata->str() : "")); + S3FS_PRN_INFO3("HTTP response code 404 was returned, returning ENOENT"); + S3FS_PRN_DBG("Body Text: %s", (bodydata ? bodydata->str() : "")); return -ENOENT; default: - DPRNNN("HTTP response code = %ld, returning EIO", LastResponseCode); - DPRNINFO("Body Text: %s", (bodydata ? bodydata->str() : "")); + S3FS_PRN_INFO3("HTTP response code = %ld, returning EIO", LastResponseCode); + S3FS_PRN_DBG("Body Text: %s", (bodydata ? bodydata->str() : "")); return -EIO; } break; case CURLE_WRITE_ERROR: - DPRN("### CURLE_WRITE_ERROR"); + S3FS_PRN_ERR("### CURLE_WRITE_ERROR"); sleep(2); break; case CURLE_OPERATION_TIMEDOUT: - DPRN("### CURLE_OPERATION_TIMEDOUT"); + S3FS_PRN_ERR("### CURLE_OPERATION_TIMEDOUT"); sleep(2); break; case CURLE_COULDNT_RESOLVE_HOST: - DPRN("### CURLE_COULDNT_RESOLVE_HOST"); + S3FS_PRN_ERR("### CURLE_COULDNT_RESOLVE_HOST"); sleep(2); break; case CURLE_COULDNT_CONNECT: - DPRN("### CURLE_COULDNT_CONNECT"); + S3FS_PRN_ERR("### CURLE_COULDNT_CONNECT"); sleep(4); break; case CURLE_GOT_NOTHING: - DPRN("### CURLE_GOT_NOTHING"); + S3FS_PRN_ERR("### CURLE_GOT_NOTHING"); sleep(4); break; case CURLE_ABORTED_BY_CALLBACK: - DPRN("### CURLE_ABORTED_BY_CALLBACK"); + S3FS_PRN_ERR("### CURLE_ABORTED_BY_CALLBACK"); sleep(4); S3fsCurl::curl_times[hCurl] = time(0); break; case CURLE_PARTIAL_FILE: - DPRN("### CURLE_PARTIAL_FILE"); + S3FS_PRN_ERR("### CURLE_PARTIAL_FILE"); sleep(4); break; case CURLE_SEND_ERROR: - DPRN("### CURLE_SEND_ERROR"); + S3FS_PRN_ERR("### CURLE_SEND_ERROR"); sleep(2); break; case CURLE_RECV_ERROR: - DPRN("### CURLE_RECV_ERROR"); + S3FS_PRN_ERR("### CURLE_RECV_ERROR"); sleep(2); break; case CURLE_SSL_CONNECT_ERROR: - DPRN("### CURLE_SSL_CONNECT_ERROR"); + S3FS_PRN_ERR("### CURLE_SSL_CONNECT_ERROR"); sleep(2); break; case CURLE_SSL_CACERT: - DPRN("### CURLE_SSL_CACERT"); + S3FS_PRN_ERR("### CURLE_SSL_CACERT"); // try to locate cert, if successful, then set the // option and continue if(0 == S3fsCurl::curl_ca_bundle.size()){ if(!S3fsCurl::LocateBundle()){ - DPRNCRIT("could not get CURL_CA_BUNDLE."); + S3FS_PRN_CRIT("could not get CURL_CA_BUNDLE."); exit(EXIT_FAILURE); } break; // retry with CAINFO } - DPRNCRIT("curlCode: %d msg: %s", curlCode, curl_easy_strerror(curlCode)); + S3FS_PRN_CRIT("curlCode: %d msg: %s", curlCode, curl_easy_strerror(curlCode)); exit(EXIT_FAILURE); break; #ifdef CURLE_PEER_FAILED_VERIFICATION case CURLE_PEER_FAILED_VERIFICATION: - DPRN("### CURLE_PEER_FAILED_VERIFICATION"); + S3FS_PRN_ERR("### CURLE_PEER_FAILED_VERIFICATION"); first_pos = bucket.find_first_of("."); if(first_pos != string::npos){ - FPRNNN("curl returned a CURL_PEER_FAILED_VERIFICATION error"); - FPRNNN("security issue found: buckets with periods in their name are incompatible with http"); - FPRNNN("This check can be over-ridden by using the -o ssl_verify_hostname=0"); - FPRNNN("The certificate will still be checked but the hostname will not be verified."); - FPRNNN("A more secure method would be to use a bucket name without periods."); - }else - DPRNNN("my_curl_easy_perform: curlCode: %d -- %s", curlCode, curl_easy_strerror(curlCode)); + S3FS_PRN_INFO("curl returned a CURL_PEER_FAILED_VERIFICATION error"); + S3FS_PRN_INFO("security issue found: buckets with periods in their name are incompatible with http"); + S3FS_PRN_INFO("This check can be over-ridden by using the -o ssl_verify_hostname=0"); + S3FS_PRN_INFO("The certificate will still be checked but the hostname will not be verified."); + S3FS_PRN_INFO("A more secure method would be to use a bucket name without periods."); + }else{ + S3FS_PRN_INFO("my_curl_easy_perform: curlCode: %d -- %s", curlCode, curl_easy_strerror(curlCode)); } exit(EXIT_FAILURE); break; @@ -1765,12 +1860,12 @@ int S3fsCurl::RequestPerform(void) // This should be invalid since curl option HTTP FAILONERROR is now off case CURLE_HTTP_RETURNED_ERROR: - DPRN("### CURLE_HTTP_RETURNED_ERROR"); + S3FS_PRN_ERR("### CURLE_HTTP_RETURNED_ERROR"); if(0 != curl_easy_getinfo(hCurl, CURLINFO_RESPONSE_CODE, &LastResponseCode)){ return -EIO; } - DPRN("HTTP response code =%ld", LastResponseCode); + S3FS_PRN_INFO3("HTTP response code =%ld", LastResponseCode); // Let's try to retrieve the if(404 == LastResponseCode){ @@ -1783,18 +1878,19 @@ int S3fsCurl::RequestPerform(void) // Unknown CURL return code default: - DPRNCRIT("###curlCode: %d msg: %s", curlCode, curl_easy_strerror(curlCode)); + S3FS_PRN_CRIT("###curlCode: %d msg: %s", curlCode, curl_easy_strerror(curlCode)); exit(EXIT_FAILURE); break; } - DPRNNN("### retrying..."); + S3FS_PRN_INFO("### retrying..."); if(!RemakeHandle()){ - DPRNNN("Failed to reset handle and internal data for retrying."); + S3FS_PRN_INFO("Failed to reset handle and internal data for retrying."); return -EIO; } } - DPRN("### giving up"); + S3FS_PRN_ERR("### giving up"); + return -EIO; } @@ -1813,7 +1909,7 @@ string S3fsCurl::CalcSignatureV2(string method, string strMD5, string content_ty if(0 < S3fsCurl::IAM_role.size()){ if(!S3fsCurl::CheckIAMCredentialUpdate()){ - DPRN("Something error occurred in checking IAM credential."); + S3FS_PRN_ERR("Something error occurred in checking IAM credential."); return Signature; // returns empty string, then it occures error. } requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-security-token", S3fsCurl::AWSAccessToken.c_str()); @@ -1855,7 +1951,7 @@ string S3fsCurl::CalcSignature(string method, string canonical_uri, string query if(0 < S3fsCurl::IAM_role.size()){ if(!S3fsCurl::CheckIAMCredentialUpdate()){ - DPRN("Something error occurred in checking IAM credential."); + S3FS_PRN_ERR("Something error occurred in checking IAM credential."); return Signature; // returns empty string, then it occures error. } requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-security-token", S3fsCurl::AWSAccessToken.c_str()); @@ -1970,7 +2066,7 @@ bool S3fsCurl::GetUploadId(string& upload_id) void S3fsCurl::insertV4Headers(const string &op, const string &path, const string &query_string, const string &payload_hash) { - DPRNNN("computing signature [%s] [%s] [%s] [%s]", op.c_str(), path.c_str(), query_string.c_str(), payload_hash.c_str()); + S3FS_PRN_INFO3("computing signature [%s] [%s] [%s] [%s]", op.c_str(), path.c_str(), query_string.c_str(), payload_hash.c_str()); string strdate; string date8601; get_date_sigv3(strdate, date8601); @@ -1992,7 +2088,7 @@ void S3fsCurl::insertV4Headers(const string &op, const string &path, const strin int S3fsCurl::DeleteRequest(const char* tpath) { - FPRNNN("[tpath=%s]", SAFESTRPTR(tpath)); + S3FS_PRN_INFO3("[tpath=%s]", SAFESTRPTR(tpath)); if(!tpath){ return -1; @@ -2037,10 +2133,10 @@ int S3fsCurl::DeleteRequest(const char* tpath) // int S3fsCurl::GetIAMCredentials(void) { - FPRNINFO("[IAM role=%s]", S3fsCurl::IAM_role.c_str()); + S3FS_PRN_INFO3("[IAM role=%s]", S3fsCurl::IAM_role.c_str()); if(0 == S3fsCurl::IAM_role.size()){ - DPRN("IAM role name is empty."); + S3FS_PRN_ERR("IAM role name is empty."); return -EIO; } // at first set type for handle @@ -2064,7 +2160,7 @@ int S3fsCurl::GetIAMCredentials(void) // analizing response if(0 == result && !S3fsCurl::SetIAMCredentials(bodydata->str())){ - DPRN("Something error occurred, could not get IAM credential."); + S3FS_PRN_ERR("Something error occurred, could not get IAM credential."); } delete bodydata; bodydata = NULL; @@ -2072,25 +2168,35 @@ int S3fsCurl::GetIAMCredentials(void) return result; } -// -// If md5 is empty, build by first(current) sse key -// -bool S3fsCurl::AddSseKeyRequestHead(string& md5, bool is_copy) +bool S3fsCurl::AddSseRequestHead(sse_type_t ssetype, string& ssevalue, bool is_only_c, bool is_copy) { - if(!S3fsCurl::IsSseCustomMode()){ - // Nothing to do - return true; - } - string sseckey; - if(S3fsCurl::GetSseKey(md5, sseckey)){ - if(is_copy){ - requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-copy-source-server-side-encryption-customer-algorithm", "AES256"); - requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-copy-source-server-side-encryption-customer-key", sseckey.c_str()); - requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-copy-source-server-side-encryption-customer-key-md5", md5.c_str()); + if(SSE_S3 == ssetype){ + if(!is_only_c){ + requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-server-side-encryption", "AES256"); + } + }else if(SSE_C == ssetype){ + string sseckey; + if(S3fsCurl::GetSseKey(ssevalue, sseckey)){ + if(is_copy){ + requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-copy-source-server-side-encryption-customer-algorithm", "AES256"); + requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-copy-source-server-side-encryption-customer-key", sseckey.c_str()); + requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-copy-source-server-side-encryption-customer-key-md5", ssevalue.c_str()); + }else{ + requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-server-side-encryption-customer-algorithm", "AES256"); + requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-server-side-encryption-customer-key", sseckey.c_str()); + requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-server-side-encryption-customer-key-md5", ssevalue.c_str()); + } }else{ - requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-server-side-encryption-customer-algorithm", "AES256"); - requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-server-side-encryption-customer-key", sseckey.c_str()); - requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-server-side-encryption-customer-key-md5", md5.c_str()); + S3FS_PRN_WARN("Failed to insert SSE-C header."); + } + + }else if(SSE_KMS == ssetype){ + if(!is_only_c){ + if(ssevalue.empty()){ + ssevalue = S3fsCurl::GetSseKmsId(); + } + requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-server-side-encryption", "aws:kms"); + requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-server-side-encryption-aws-kms-key-id", ssevalue.c_str()); } } return true; @@ -2100,12 +2206,12 @@ bool S3fsCurl::AddSseKeyRequestHead(string& md5, bool is_copy) // tpath : target path for head request // bpath : saved into base_path // savedpath : saved into saved_path -// ssekey_pos : -1 means "not use sse", 0 - X means "use sseckey" and "sseckey position". -// sseckey position 0 is latest key. +// ssekey_pos : -1 means "not" SSE-C type +// 0 - X means SSE-C type and position for SSE-C key(0 is latest key) // bool S3fsCurl::PreHeadRequest(const char* tpath, const char* bpath, const char* savedpath, int ssekey_pos) { - FPRNINFO("[tpath=%s][bpath=%s][save=%s]", SAFESTRPTR(tpath), SAFESTRPTR(bpath), SAFESTRPTR(savedpath)); + S3FS_PRN_INFO3("[tpath=%s][bpath=%s][save=%s][sseckeypos=%d]", SAFESTRPTR(tpath), SAFESTRPTR(bpath), SAFESTRPTR(savedpath), ssekey_pos); if(!tpath){ return false; @@ -2126,10 +2232,11 @@ bool S3fsCurl::PreHeadRequest(const char* tpath, const char* bpath, const char* responseHeaders.clear(); // requestHeaders - if(0 <= ssekey_pos && S3fsCurl::IsSseCustomMode()){ - string md5; - if(!S3fsCurl::GetSseKeyMd5(ssekey_pos, md5) || !AddSseKeyRequestHead(md5, false)){ - DPRN("Failed to set SSE-C headers for md5(%s).", md5.c_str()); + if(0 <= ssekey_pos){ + string md5(""); + if(!S3fsCurl::GetSseKeyMd5(ssekey_pos, md5) || !AddSseRequestHead(SSE_C, md5, true, false)){ + S3FS_PRN_ERR("Failed to set SSE-C headers for sse-c key pos(%d)(=md5(%s)).", ssekey_pos, md5.c_str()); + return false; } } b_ssekey_pos = ssekey_pos; @@ -2166,13 +2273,13 @@ int S3fsCurl::HeadRequest(const char* tpath, headers_t& meta) { int result = -1; - FPRNNN("[tpath=%s]", SAFESTRPTR(tpath)); + S3FS_PRN_INFO3("[tpath=%s]", SAFESTRPTR(tpath)); - if(S3fsCurl::IsSseCustomMode()){ - // SSE-C mode, check all sse-c key at first - int pos; - for(pos = 0; static_cast(pos) < S3fsCurl::sseckeys.size(); pos++){ - if(0 != pos && !DestroyCurlHandle()){ + // At first, try to get without SSE-C headers + if(!PreHeadRequest(tpath) || 0 != (result = RequestPerform())){ + // If has SSE-C keys, try to get with all SSE-C keys. + for(int pos = 0; static_cast(pos) < S3fsCurl::sseckeys.size(); pos++){ + if(!DestroyCurlHandle()){ return result; } if(!PreHeadRequest(tpath, NULL, NULL, pos)){ @@ -2182,16 +2289,8 @@ int S3fsCurl::HeadRequest(const char* tpath, headers_t& meta) break; } } - if(S3fsCurl::sseckeys.size() <= static_cast(pos)){ - // If sse-c mode is enable, s3fs fails to get head request for normal and sse object. - // So try to get head without sse-c header. - if(!DestroyCurlHandle() || !PreHeadRequest(tpath, NULL, NULL, -1) || 0 != (result = RequestPerform())){ - return result; - } - } - }else{ - // Not sse-c mode - if(!PreHeadRequest(tpath) || 0 != (result = RequestPerform())){ + if(0 != result){ + DestroyCurlHandle(); // not check result. return result; } } @@ -2219,7 +2318,7 @@ int S3fsCurl::HeadRequest(const char* tpath, headers_t& meta) int S3fsCurl::PutHeadRequest(const char* tpath, headers_t& meta, bool is_copy) { - FPRNNN("[tpath=%s]", SAFESTRPTR(tpath)); + S3FS_PRN_INFO3("[tpath=%s]", SAFESTRPTR(tpath)); if(!tpath){ return -1; @@ -2252,28 +2351,37 @@ int S3fsCurl::PutHeadRequest(const char* tpath, headers_t& meta, bool is_copy) }else if(key == "x-amz-copy-source"){ requestHeaders = curl_slist_sort_insert(requestHeaders, iter->first.c_str(), value.c_str()); }else if(key == "x-amz-server-side-encryption"){ - // skip this header, because this header is specified after logic. - }else if(key == "x-amz-server-side-encryption-customer-algorithm"){ - // skip this header, because this header is specified with "x-amz-...-customer-key-md5". - }else if(is_copy && key == "x-amz-server-side-encryption-customer-key-md5"){ // Only copy mode. - if(!AddSseKeyRequestHead(value, is_copy)){ - DPRNNN("Failed to insert sse(-c) header."); + if(is_copy && !AddSseRequestHead(SSE_S3, value, false, true)){ + S3FS_PRN_WARN("Failed to insert SSE-S3 header."); + } + }else if(key == "x-amz-server-side-encryption-customer-algorithm"){ + // Only copy mode. + if(is_copy && !value.empty() && !AddSseRequestHead(SSE_KMS, value, false, true)){ + S3FS_PRN_WARN("Failed to insert SSE-KMS header."); + } + }else if(key == "x-amz-server-side-encryption-customer-key-md5"){ + // Only copy mode. + if(is_copy){ + if(!AddSseRequestHead(SSE_C, value, true, true) || !AddSseRequestHead(SSE_C, value, true, false)){ + S3FS_PRN_WARN("Failed to insert SSE-C header."); + } } } } - // "x-amz-acl", rrs, sse + // "x-amz-acl", storage class, sse requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-acl", S3fsCurl::default_acl.c_str()); - if(S3fsCurl::is_use_rrs){ + if(REDUCED_REDUNDANCY == GetStorageClass()){ requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-storage-class", "REDUCED_REDUNDANCY"); + } else if(STANDARD_IA == GetStorageClass()){ + requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-storage-class", "STANDARD_IA"); } - if(S3fsCurl::is_use_sse){ - requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-server-side-encryption", "AES256"); - }else if(S3fsCurl::IsSseCustomMode()){ - string md5; - if(!AddSseKeyRequestHead(md5, false)){ - DPRNNN("Failed to insert sse(-c) header."); + // SSE + if(!is_copy){ + string ssevalue(""); + if(!AddSseRequestHead(S3fsCurl::GetSseType(), ssevalue, false, false)){ + S3FS_PRN_WARN("Failed to set SSE header, but continue..."); } } if(is_use_ahbe){ @@ -2304,7 +2412,7 @@ int S3fsCurl::PutHeadRequest(const char* tpath, headers_t& meta, bool is_copy) type = REQTYPE_PUTHEAD; - DPRNNN("copying... [path=%s]", tpath); + S3FS_PRN_INFO3("copying... [path=%s]", tpath); int result = RequestPerform(); delete bodydata; @@ -2319,7 +2427,7 @@ int S3fsCurl::PutRequest(const char* tpath, headers_t& meta, int fd) FILE* file = NULL; int fd2; - FPRNNN("[tpath=%s]", SAFESTRPTR(tpath)); + S3FS_PRN_INFO3("[tpath=%s]", SAFESTRPTR(tpath)); if(!tpath){ return -1; @@ -2327,13 +2435,16 @@ int S3fsCurl::PutRequest(const char* tpath, headers_t& meta, int fd) if(-1 != fd){ // duplicate fd if(-1 == (fd2 = dup(fd)) || -1 == fstat(fd2, &st) || 0 != lseek(fd2, 0, SEEK_SET) || NULL == (file = fdopen(fd2, "rb"))){ - DPRN("Could not duplicate file discriptor(errno=%d)", errno); + S3FS_PRN_ERR("Could not duplicate file descriptor(errno=%d)", errno); + if(-1 != fd2){ + close(fd2); + } return -errno; } b_infile = file; }else{ // This case is creating zero byte obejct.(calling by create_file_object()) - DPRNNN("create zero byte file object."); + S3FS_PRN_INFO3("create zero byte file object."); } if(!CreateCurlHandle(true)){ @@ -2378,18 +2489,17 @@ int S3fsCurl::PutRequest(const char* tpath, headers_t& meta, int fd) // skip this header, because this header is specified after logic. } } - // "x-amz-acl", rrs, sse + // "x-amz-acl", storage class, sse requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-acl", S3fsCurl::default_acl.c_str()); - if(S3fsCurl::is_use_rrs){ + if(REDUCED_REDUNDANCY == GetStorageClass()){ requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-storage-class", "REDUCED_REDUNDANCY"); + } else if(STANDARD_IA == GetStorageClass()){ + requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-storage-class", "STANDARD_IA"); } - if(S3fsCurl::is_use_sse){ - requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-server-side-encryption", "AES256"); - }else if(S3fsCurl::IsSseCustomMode()){ - string md5; - if(!AddSseKeyRequestHead(md5, false)){ - DPRNNN("Failed to insert sse(-c) header."); - } + // SSE + string ssevalue(""); + if(!AddSseRequestHead(S3fsCurl::GetSseType(), ssevalue, false, false)){ + S3FS_PRN_WARN("Failed to set SSE header, but continue..."); } if(is_use_ahbe){ // set additional header by ahbe conf @@ -2425,7 +2535,7 @@ int S3fsCurl::PutRequest(const char* tpath, headers_t& meta, int fd) type = REQTYPE_PUT; - DPRNNN("uploading... [path=%s][fd=%d][size=%jd]", tpath, fd, (intmax_t)(-1 != fd ? st.st_size : 0)); + S3FS_PRN_INFO3("uploading... [path=%s][fd=%d][size=%jd]", tpath, fd, (intmax_t)(-1 != fd ? st.st_size : 0)); int result = RequestPerform(); delete bodydata; @@ -2437,9 +2547,9 @@ int S3fsCurl::PutRequest(const char* tpath, headers_t& meta, int fd) return result; } -int S3fsCurl::PreGetObjectRequest(const char* tpath, int fd, off_t start, ssize_t size, string& ssekeymd5) +int S3fsCurl::PreGetObjectRequest(const char* tpath, int fd, off_t start, ssize_t size, sse_type_t ssetype, string& ssevalue) { - FPRNNN("[tpath=%s][start=%jd][size=%zd]", SAFESTRPTR(tpath), (intmax_t)start, size); + S3FS_PRN_INFO3("[tpath=%s][start=%jd][size=%zd]", SAFESTRPTR(tpath), (intmax_t)start, size); if(!tpath || -1 == fd || 0 > start || 0 >= size){ return -1; @@ -2464,12 +2574,10 @@ int S3fsCurl::PreGetObjectRequest(const char* tpath, int fd, off_t start, ssize_ range += str(start + size - 1); requestHeaders = curl_slist_sort_insert(requestHeaders, "Range", range.c_str()); } - if(0 < ssekeymd5.length()){ - if(!AddSseKeyRequestHead(ssekeymd5, false)){ - DPRNNN("Failed to insert sse(-c) header."); - } + // SSE + if(!AddSseRequestHead(ssetype, ssevalue, true, false)){ + S3FS_PRN_WARN("Failed to set SSE header, but continue..."); } - if(!S3fsCurl::is_sigv4){ string date = get_date_rfc850(); requestHeaders = curl_slist_sort_insert(requestHeaders, "Date", date.c_str()); @@ -2498,7 +2606,9 @@ int S3fsCurl::PreGetObjectRequest(const char* tpath, int fd, off_t start, ssize_ partdata.size = size; b_partdata_startpos = start; b_partdata_size = size; - b_ssekey_md5 = ssekeymd5; + b_ssetype = ssetype; + b_ssevalue = ssevalue; + b_ssekey_pos = -1; // not use this value for get object. type = REQTYPE_GET; @@ -2509,22 +2619,22 @@ int S3fsCurl::GetObjectRequest(const char* tpath, int fd, off_t start, ssize_t s { int result; - FPRNNN("[tpath=%s][start=%jd][size=%zd]", SAFESTRPTR(tpath), (intmax_t)start, size); + S3FS_PRN_INFO3("[tpath=%s][start=%jd][size=%zd]", SAFESTRPTR(tpath), (intmax_t)start, size); if(!tpath){ return -1; } - string sseckeymd5(""); - char* psseckeymd5; - if(NULL != (psseckeymd5 = get_object_sseckey_md5(tpath))){ - sseckeymd5 = psseckeymd5; - free(psseckeymd5); + sse_type_t ssetype; + string ssevalue; + if(!get_object_sse_type(tpath, ssetype, ssevalue)){ + S3FS_PRN_WARN("Failed to get SSE type for file(%s).", SAFESTRPTR(tpath)); } - if(0 != (result = PreGetObjectRequest(tpath, fd, start, size, sseckeymd5))){ + + if(0 != (result = PreGetObjectRequest(tpath, fd, start, size, ssetype, ssevalue))){ return result; } - DPRNNN("downloading... [path=%s][fd=%d]", tpath, fd); + S3FS_PRN_INFO3("downloading... [path=%s][fd=%d]", tpath, fd); result = RequestPerform(); partdata.clear(); @@ -2534,7 +2644,7 @@ int S3fsCurl::GetObjectRequest(const char* tpath, int fd, off_t start, ssize_t s int S3fsCurl::CheckBucket(void) { - FPRNNN("check a bucket."); + S3FS_PRN_INFO3("check a bucket."); if(!CreateCurlHandle(true)){ return -1; @@ -2572,14 +2682,14 @@ int S3fsCurl::CheckBucket(void) int result = RequestPerform(); if (result != 0) { - DPRN("Check bucket failed, S3 response: %s", (bodydata ? bodydata->str() : "")); + S3FS_PRN_ERR("Check bucket failed, S3 response: %s", (bodydata ? bodydata->str() : "")); } return result; } int S3fsCurl::ListBucketRequest(const char* tpath, const char* query) { - FPRNNN("[tpath=%s]", SAFESTRPTR(tpath)); + S3FS_PRN_INFO3("[tpath=%s]", SAFESTRPTR(tpath)); if(!tpath){ return -1; @@ -2637,7 +2747,7 @@ int S3fsCurl::ListBucketRequest(const char* tpath, const char* query) // int S3fsCurl::PreMultipartPostRequest(const char* tpath, headers_t& meta, string& upload_id, bool is_copy) { - FPRNNN("[tpath=%s]", SAFESTRPTR(tpath)); + S3FS_PRN_INFO3("[tpath=%s]", SAFESTRPTR(tpath)); if(!tpath){ return -1; @@ -2671,27 +2781,36 @@ int S3fsCurl::PreMultipartPostRequest(const char* tpath, headers_t& meta, string }else if(key.substr(0, 10) == "x-amz-meta"){ requestHeaders = curl_slist_sort_insert(requestHeaders, iter->first.c_str(), value.c_str()); }else if(key == "x-amz-server-side-encryption"){ - // skip this header, because this header is specified after logic. - }else if(key == "x-amz-server-side-encryption-customer-algorithm"){ - // skip this header, because this header is specified with "x-amz-...-customer-key-md5". - }else if(is_copy && key == "x-amz-server-side-encryption-customer-key-md5"){ // Only copy mode. - if(!AddSseKeyRequestHead(value, is_copy)){ - DPRNNN("Failed to insert sse(-c) header."); + if(is_copy && !AddSseRequestHead(SSE_S3, value, false, true)){ + S3FS_PRN_WARN("Failed to insert SSE-S3 header."); + } + }else if(key == "x-amz-server-side-encryption-customer-algorithm"){ + // Only copy mode. + if(is_copy && !value.empty() && !AddSseRequestHead(SSE_KMS, value, false, true)){ + S3FS_PRN_WARN("Failed to insert SSE-KMS header."); + } + }else if(key == "x-amz-server-side-encryption-customer-key-md5"){ + // Only copy mode. + if(is_copy){ + if(!AddSseRequestHead(SSE_C, value, true, true) || !AddSseRequestHead(SSE_C, value, true, false)){ + S3FS_PRN_WARN("Failed to insert SSE-C header."); + } } } } - // "x-amz-acl", rrs, sse + // "x-amz-acl", storage class, sse requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-acl", S3fsCurl::default_acl.c_str()); - if(S3fsCurl::is_use_rrs){ + if(REDUCED_REDUNDANCY == GetStorageClass()){ requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-storage-class", "REDUCED_REDUNDANCY"); + } else if(STANDARD_IA == GetStorageClass()){ + requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-storage-class", "STANDARD_IA"); } - if(S3fsCurl::is_use_sse){ - requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-server-side-encryption", "AES256"); - }else if(S3fsCurl::IsSseCustomMode()){ - string md5; - if(!AddSseKeyRequestHead(md5, false)){ - DPRNNN("Failed to insert sse(-c) header."); + // SSE + if(!is_copy){ + string ssevalue(""); + if(!AddSseRequestHead(S3fsCurl::GetSseType(), ssevalue, false, false)){ + S3FS_PRN_WARN("Failed to set SSE header, but continue..."); } } if(is_use_ahbe){ @@ -2750,7 +2869,7 @@ int S3fsCurl::PreMultipartPostRequest(const char* tpath, headers_t& meta, string int S3fsCurl::CompleteMultipartPostRequest(const char* tpath, string& upload_id, etaglist_t& parts) { - FPRNNN("[tpath=%s][parts=%zu]", SAFESTRPTR(tpath), parts.size()); + S3FS_PRN_INFO3("[tpath=%s][parts=%zu]", SAFESTRPTR(tpath), parts.size()); if(!tpath){ return -1; @@ -2761,12 +2880,12 @@ int S3fsCurl::CompleteMultipartPostRequest(const char* tpath, string& upload_id, postContent += "\n"; for(int cnt = 0; cnt < (int)parts.size(); cnt++){ if(0 == parts[cnt].length()){ - DPRN("%d file part is not finished uploading.", cnt + 1); + S3FS_PRN_ERR("%d file part is not finished uploading.", cnt + 1); return -1; } postContent += "\n"; - postContent += " " + IntToStr(cnt + 1) + "\n"; - postContent += " \"" + parts[cnt] + "\"\n"; + postContent += " " + str(cnt + 1) + "\n"; + postContent += " \"" + parts[cnt] + "\"\n"; postContent += "\n"; } postContent += "\n"; @@ -2818,6 +2937,7 @@ int S3fsCurl::CompleteMultipartPostRequest(const char* tpath, string& upload_id, for(cnt = 0; cnt < sRequest_len; cnt++){ sprintf(&hexsRequest[cnt * 2], "%02x", sRequest[cnt]); } + free(sRequest); payload_hash.assign(hexsRequest, &hexsRequest[sRequest_len * 2]); requestHeaders = curl_slist_sort_insert(requestHeaders, "Date", get_date_rfc850().c_str()); @@ -2850,7 +2970,7 @@ int S3fsCurl::CompleteMultipartPostRequest(const char* tpath, string& upload_id, int S3fsCurl::MultipartListRequest(string& body) { - FPRNNN("list request(multipart)"); + S3FS_PRN_INFO3("list request(multipart)"); if(!CreateCurlHandle(true)){ return -1; @@ -2903,7 +3023,7 @@ int S3fsCurl::MultipartListRequest(string& body) int S3fsCurl::AbortMultipartUpload(const char* tpath, string& upload_id) { - FPRNNN("[tpath=%s]", SAFESTRPTR(tpath)); + S3FS_PRN_INFO3("[tpath=%s]", SAFESTRPTR(tpath)); if(!tpath){ return -1; @@ -2961,18 +3081,23 @@ int S3fsCurl::AbortMultipartUpload(const char* tpath, string& upload_id) int S3fsCurl::UploadMultipartPostSetup(const char* tpath, int part_num, string& upload_id) { - FPRNNN("[tpath=%s][start=%jd][size=%zd][part=%d]", SAFESTRPTR(tpath), (intmax_t)(partdata.startpos), partdata.size, part_num); + S3FS_PRN_INFO3("[tpath=%s][start=%jd][size=%zd][part=%d]", SAFESTRPTR(tpath), (intmax_t)(partdata.startpos), partdata.size, part_num); if(-1 == partdata.fd || -1 == partdata.startpos || -1 == partdata.size){ return -1; } // make md5 and file pointer - partdata.etag = s3fs_md5sum(partdata.fd, partdata.startpos, partdata.size); - if(partdata.etag.empty()){ - DPRN("Could not make md5 for file(part %d)", part_num); + unsigned char *md5raw = s3fs_md5hexsum(partdata.fd, partdata.startpos, partdata.size); + if(md5raw == NULL){ + S3FS_PRN_ERR("Could not make md5 for file(part %d)", part_num); return -1; } + partdata.etag = s3fs_hex(md5raw, get_md5_digest_length()); + char* md5base64p = s3fs_base64(md5raw, get_md5_digest_length()); + std::string md5base64 = md5base64p; + free(md5base64p); + free(md5raw); // create handle if(!CreateCurlHandle(true)){ @@ -2980,7 +3105,7 @@ int S3fsCurl::UploadMultipartPostSetup(const char* tpath, int part_num, string& } // make request - string request_uri = "partNumber=" + IntToStr(part_num) + "&uploadId=" + upload_id; + string request_uri = "partNumber=" + str(part_num) + "&uploadId=" + upload_id; string urlargs = "?" + request_uri; string resource; string turl; @@ -3000,8 +3125,14 @@ int S3fsCurl::UploadMultipartPostSetup(const char* tpath, int part_num, string& requestHeaders = curl_slist_sort_insert(requestHeaders, "Date", date.c_str()); requestHeaders = curl_slist_sort_insert(requestHeaders, "Accept", NULL); + string strMD5; + if(S3fsCurl::is_content_md5){ + strMD5 = md5base64; + requestHeaders = curl_slist_sort_insert(requestHeaders, "Content-MD5", strMD5.c_str()); + } + if(!S3fsCurl::IsPublicBucket()){ - string Signature = CalcSignatureV2("PUT", "", "", date, resource); + string Signature = CalcSignatureV2("PUT", strMD5, "", date, resource); requestHeaders = curl_slist_sort_insert(requestHeaders, "Authorization", string("AWS " + AWSAccessKeyId + ":" + Signature).c_str()); } @@ -3031,7 +3162,7 @@ int S3fsCurl::UploadMultipartPostRequest(const char* tpath, int part_num, string { int result; - FPRNNN("[tpath=%s][start=%jd][size=%zd][part=%d]", SAFESTRPTR(tpath), (intmax_t)(partdata.startpos), partdata.size, part_num); + S3FS_PRN_INFO3("[tpath=%s][start=%jd][size=%zd][part=%d]", SAFESTRPTR(tpath), (intmax_t)(partdata.startpos), partdata.size, part_num); // setup if(0 != (result = S3fsCurl::UploadMultipartPostSetup(tpath, part_num, upload_id))){ @@ -3059,7 +3190,7 @@ int S3fsCurl::UploadMultipartPostRequest(const char* tpath, int part_num, string int S3fsCurl::CopyMultipartPostRequest(const char* from, const char* to, int part_num, string& upload_id, headers_t& meta) { - FPRNNN("[from=%s][to=%s][part=%d]", SAFESTRPTR(from), SAFESTRPTR(to), part_num); + S3FS_PRN_INFO3("[from=%s][to=%s][part=%d]", SAFESTRPTR(from), SAFESTRPTR(to), part_num); if(!from || !to){ return -1; @@ -3067,7 +3198,7 @@ int S3fsCurl::CopyMultipartPostRequest(const char* from, const char* to, int par if(!CreateCurlHandle(true)){ return -1; } - string urlargs = "?partNumber=" + IntToStr(part_num) + "&uploadId=" + upload_id; + string urlargs = "?partNumber=" + str(part_num) + "&uploadId=" + upload_id; string resource; string turl; MakeUrlResource(get_realpath(to).c_str(), resource, turl); @@ -3123,16 +3254,39 @@ int S3fsCurl::CopyMultipartPostRequest(const char* from, const char* to, int par type = REQTYPE_COPYMULTIPOST; // request - DPRNNN("copying... [from=%s][to=%s][part=%d]", from, to, part_num); + S3FS_PRN_INFO3("copying... [from=%s][to=%s][part=%d]", from, to, part_num); int result = RequestPerform(); if(0 == result){ - const char* start_etag= strstr(bodydata->str(), "ETag"); - const char* end_etag = strstr(bodydata->str(), "/ETag>"); - - partdata.etag.assign((start_etag + 11), (size_t)(end_etag - (start_etag + 11) - 7)); - partdata.uploaded = true; + // parse ETag from response + xmlDocPtr doc; + if(NULL == (doc = xmlReadMemory(bodydata->str(), bodydata->size(), "", NULL, 0))){ + return result; + } + if(NULL == doc->children){ + S3FS_XMLFREEDOC(doc); + return result; + } + for(xmlNodePtr cur_node = doc->children->children; NULL != cur_node; cur_node = cur_node->next){ + if(XML_ELEMENT_NODE == cur_node->type){ + string elementName = reinterpret_cast(cur_node->name); + if(cur_node->children){ + if(XML_TEXT_NODE == cur_node->children->type){ + if(elementName == "ETag") { + string etag = reinterpret_cast(cur_node->children->content); + if(etag.size() >= 2 && *etag.begin() == '"' && *etag.rbegin() == '"'){ + etag.assign(etag.substr(1, etag.size() - 2)); + } + partdata.etag.assign(etag); + partdata.uploaded = true; + } + } + } + } + } + S3FS_XMLFREEDOC(doc); } + delete bodydata; bodydata = NULL; delete headdata; @@ -3150,7 +3304,7 @@ int S3fsCurl::MultipartHeadRequest(const char* tpath, off_t size, headers_t& met etaglist_t list; stringstream strrange; - FPRNNN("[tpath=%s]", SAFESTRPTR(tpath)); + S3FS_PRN_INFO3("[tpath=%s]", SAFESTRPTR(tpath)); if(0 != (result = PreMultipartPostRequest(tpath, meta, upload_id, is_copy))){ return result; @@ -3188,18 +3342,18 @@ int S3fsCurl::MultipartUploadRequest(const char* tpath, headers_t& meta, int fd, off_t remaining_bytes; off_t chunk; - FPRNNN("[tpath=%s][fd=%d]", SAFESTRPTR(tpath), fd); + S3FS_PRN_INFO3("[tpath=%s][fd=%d]", SAFESTRPTR(tpath), fd); // duplicate fd if(-1 == (fd2 = dup(fd)) || 0 != lseek(fd2, 0, SEEK_SET)){ - DPRN("Could not duplicate file descriptor(errno=%d)", errno); + S3FS_PRN_ERR("Could not duplicate file descriptor(errno=%d)", errno); if(-1 != fd2){ close(fd2); } return -errno; } if(-1 == fstat(fd2, &st)){ - DPRN("Invalid file descriptor(errno=%d)", errno); + S3FS_PRN_ERR("Invalid file descriptor(errno=%d)", errno); close(fd2); return -errno; } @@ -3224,7 +3378,7 @@ int S3fsCurl::MultipartUploadRequest(const char* tpath, headers_t& meta, int fd, // upload part if(0 != (result = UploadMultipartPostRequest(tpath, (list.size() + 1), upload_id))){ - DPRN("failed uploading part(%d)", result); + S3FS_PRN_ERR("failed uploading part(%d)", result); close(fd2); return result; } @@ -3239,6 +3393,41 @@ int S3fsCurl::MultipartUploadRequest(const char* tpath, headers_t& meta, int fd, return 0; } +int S3fsCurl::MultipartUploadRequest(string upload_id, const char* tpath, int fd, off_t offset, size_t size, etaglist_t& list) +{ + S3FS_PRN_INFO3("[upload_id=%s][tpath=%s][fd=%d][offset=%jd][size=%jd]", upload_id.c_str(), SAFESTRPTR(tpath), fd, (intmax_t)offset, (intmax_t)size); + + // duplicate fd + int fd2; + if(-1 == (fd2 = dup(fd)) || 0 != lseek(fd2, 0, SEEK_SET)){ + S3FS_PRN_ERR("Could not duplicate file descriptor(errno=%d)", errno); + if(-1 != fd2){ + close(fd2); + } + return -errno; + } + + // set + partdata.fd = fd2; + partdata.startpos = offset; + partdata.size = size; + b_partdata_startpos = partdata.startpos; + b_partdata_size = partdata.size; + + // upload part + int result; + if(0 != (result = UploadMultipartPostRequest(tpath, (list.size() + 1), upload_id))){ + S3FS_PRN_ERR("failed uploading part(%d)", result); + close(fd2); + return result; + } + list.push_back(partdata.etag); + DestroyCurlHandle(); + close(fd2); + + return 0; +} + int S3fsCurl::MultipartRenameRequest(const char* from, const char* to, headers_t& meta, off_t size) { int result; @@ -3248,7 +3437,7 @@ int S3fsCurl::MultipartRenameRequest(const char* from, const char* to, headers_t etaglist_t list; stringstream strrange; - FPRNNN("[from=%s][to=%s]", SAFESTRPTR(from), SAFESTRPTR(to)); + S3FS_PRN_INFO3("[from=%s][to=%s]", SAFESTRPTR(from), SAFESTRPTR(to)); string srcresource; string srcurl; @@ -3361,7 +3550,7 @@ S3fsMultiRetryCallback S3fsMultiCurl::SetRetryCallback(S3fsMultiRetryCallback fu bool S3fsMultiCurl::SetS3fsCurlObject(S3fsCurl* s3fscurl) { if(hMulti){ - DPRN("Internal error: hMulti is not null"); + S3FS_PRN_ERR("Internal error: hMulti is not null"); return false; } if(!s3fscurl){ @@ -3392,7 +3581,7 @@ int S3fsMultiCurl::MultiPerform(void) } while(curlm_code == CURLM_CALL_MULTI_PERFORM); if(curlm_code != CURLM_OK) { - DPRNNN("curl_multi_perform code: %d msg: %s", curlm_code, curl_multi_strerror(curlm_code)); + S3FS_PRN_DBG("curl_multi_perform code: %d msg: %s", curlm_code, curl_multi_strerror(curlm_code)); } // Set timer when still running @@ -3406,7 +3595,7 @@ int S3fsMultiCurl::MultiPerform(void) FD_ZERO(&e_fd); if(CURLM_OK != (curlm_code = curl_multi_timeout(hMulti, &milliseconds))){ - DPRNNN("curl_multi_timeout code: %d msg: %s", curlm_code, curl_multi_strerror(curlm_code)); + S3FS_PRN_DBG("curl_multi_timeout code: %d msg: %s", curlm_code, curl_multi_strerror(curlm_code)); } if(milliseconds < 0){ milliseconds = 50; @@ -3418,11 +3607,11 @@ int S3fsMultiCurl::MultiPerform(void) timeout.tv_usec = 1000 * milliseconds % 1000000; if(CURLM_OK != (curlm_code = curl_multi_fdset(hMulti, &r_fd, &w_fd, &e_fd, &max_fd))){ - DPRN("curl_multi_fdset code: %d msg: %s", curlm_code, curl_multi_strerror(curlm_code)); + S3FS_PRN_ERR("curl_multi_fdset code: %d msg: %s", curlm_code, curl_multi_strerror(curlm_code)); return -EIO; } if(-1 == select(max_fd + 1, &r_fd, &w_fd, &e_fd, &timeout)){ - DPRN("failed select - errno(%d)", errno); + S3FS_PRN_ERR("failed select - errno(%d)", errno); return -errno; } } @@ -3442,12 +3631,13 @@ int S3fsMultiCurl::MultiRead(void) while(NULL != (msg = curl_multi_info_read(hMulti, &remaining_messages))){ if(CURLMSG_DONE != msg->msg){ - DPRN("curl_multi_info_read code: %d", msg->msg); + S3FS_PRN_ERR("curl_multi_info_read code: %d", msg->msg); return -EIO; } hCurl = msg->easy_handle; - if(cMap_req.end() != cMap_req.find(hCurl)){ - s3fscurl = cMap_req[hCurl]; + s3fscurlmap_t::iterator iter; + if(cMap_req.end() != (iter = cMap_req.find(hCurl))){ + s3fscurl = iter->second; }else{ s3fscurl = NULL; } @@ -3461,31 +3651,31 @@ int S3fsMultiCurl::MultiRead(void) if(400 > responseCode){ // add into stat cache if(SuccessCallback && !SuccessCallback(s3fscurl)){ - DPRN("error from callback function(%s).", s3fscurl->url.c_str()); + S3FS_PRN_WARN("error from callback function(%s).", s3fscurl->url.c_str()); } }else if(400 == responseCode){ // as possibly in multipart - DPRN("failed a request(%ld: %s)", responseCode, s3fscurl->url.c_str()); + S3FS_PRN_WARN("failed a request(%ld: %s)", responseCode, s3fscurl->url.c_str()); isRetry = true; }else if(404 == responseCode){ // not found - DPRN("failed a request(%ld: %s)", responseCode, s3fscurl->url.c_str()); + S3FS_PRN_WARN("failed a request(%ld: %s)", responseCode, s3fscurl->url.c_str()); }else if(500 == responseCode){ // case of all other result, do retry.(11/13/2013) // because it was found that s3fs got 500 error from S3, but could success // to retry it. - DPRN("failed a request(%ld: %s)", responseCode, s3fscurl->url.c_str()); + S3FS_PRN_WARN("failed a request(%ld: %s)", responseCode, s3fscurl->url.c_str()); isRetry = true; }else{ // Retry in other case. - DPRN("failed a request(%ld: %s)", responseCode, s3fscurl->url.c_str()); + S3FS_PRN_WARN("failed a request(%ld: %s)", responseCode, s3fscurl->url.c_str()); isRetry = true; } }else{ - DPRN("failed a request(Unknown respons code: %s)", s3fscurl->url.c_str()); + S3FS_PRN_ERR("failed a request(Unknown respons code: %s)", s3fscurl->url.c_str()); } }else{ - DPRN("failed to read(remaining: %d code: %d msg: %s), so retry this.", + S3FS_PRN_WARN("failed to read(remaining: %d code: %d msg: %s), so retry this.", remaining_messages, msg->data.result, curl_easy_strerror(msg->data.result)); isRetry = true; } @@ -3527,10 +3717,10 @@ int S3fsMultiCurl::Request(void) int result; CURLMcode curlm_code; - FPRNNN("[count=%zu]", cMap_all.size()); + S3FS_PRN_INFO3("[count=%zu]", cMap_all.size()); if(hMulti){ - DPRNNN("Warning: hMulti is not null, thus clear itself."); + S3FS_PRN_DBG("Warning: hMulti is not null, thus clear itself."); ClearEx(false); } @@ -3539,7 +3729,7 @@ int S3fsMultiCurl::Request(void) // Send multi request loop( with retry ) // (When many request is sends, sometimes gets "Couldn't connect to server") // - while(0 < cMap_all.size()){ + while(!cMap_all.empty()){ // populate the multi interface with an initial set of requests if(NULL == (hMulti = curl_multi_init())){ Clear(); @@ -3554,7 +3744,7 @@ int S3fsMultiCurl::Request(void) S3fsCurl* s3fscurl = (*iter).second; if(CURLM_OK != (curlm_code = curl_multi_add_handle(hMulti, hCurl))){ - DPRN("curl_multi_add_handle code: %d msg: %s", curlm_code, curl_multi_strerror(curlm_code)); + S3FS_PRN_ERR("curl_multi_add_handle code: %d msg: %s", curlm_code, curl_multi_strerror(curlm_code)); Clear(); return -EIO; } @@ -3608,14 +3798,14 @@ AdditionalHeader::~AdditionalHeader() bool AdditionalHeader::Load(const char* file) { if(!file){ - DPRNNN("file is NULL."); + S3FS_PRN_WARN("file is NULL."); return false; } Unload(); ifstream AH(file); if(!AH.good()){ - DPRNNN("Could not open file(%s).", file); + S3FS_PRN_WARN("Could not open file(%s).", file); return false; } @@ -3648,7 +3838,7 @@ bool AdditionalHeader::Load(const char* file) if(0 == key.size()){ continue; } - DPRNNN("file format error: %s key(suffix) is no HTTP header value.", key.c_str()); + S3FS_PRN_ERR("file format error: %s key(suffix) is no HTTP header value.", key.c_str()); Unload(); return false; } @@ -3665,12 +3855,13 @@ bool AdditionalHeader::Load(const char* file) charcntlist.push_back(keylen); } // set addheader - if(addheader.end() == addheader.find(key)){ + addheader_t::iterator aiter; + if(addheader.end() == (aiter = addheader.find(key))){ headerpair_t hpair; hpair[head] = value; addheader[key] = hpair; }else{ - (addheader[key])[head] = value; + aiter->second[head] = value; } // set flag if(!is_enable){ @@ -3693,7 +3884,7 @@ bool AdditionalHeader::AddHeader(headers_t& meta, const char* path) const return true; } if(!path){ - DPRNNN("path is NULL."); + S3FS_PRN_WARN("path is NULL."); return false; } int nPathLen = strlen(path); @@ -3704,10 +3895,11 @@ bool AdditionalHeader::AddHeader(headers_t& meta, const char* path) const } // make target suffix(same character count) & find string suffix(&path[nPathLen - (*iter)]); - if(addheader.end() == addheader.find(suffix)){ + addheader_t::const_iterator aiter; + if(addheader.end() == (aiter = addheader.find(suffix))){ continue; } - for(headerpair_t::const_iterator piter = addheader.at(suffix).begin(); piter != addheader.at(suffix).end(); ++piter){ + for(headerpair_t::const_iterator piter = aiter->second.begin(); piter != aiter->second.end(); ++piter){ // Adding header meta[(*piter).first] = (*piter).second; } @@ -3733,7 +3925,7 @@ struct curl_slist* AdditionalHeader::AddHeader(struct curl_slist* list, const ch bool AdditionalHeader::Dump(void) const { - if(!foreground2){ + if(!IS_S3FS_LOG_DBG()){ return true; } // character count list @@ -3758,7 +3950,7 @@ bool AdditionalHeader::Dump(void) const ssdbg << "}"; // print all - FPRNINFO("%s", ssdbg.str().c_str()); + S3FS_PRN_DBG("%s", ssdbg.str().c_str()); return true; } @@ -3945,7 +4137,7 @@ bool MakeUrlResource(const char* realpath, string& resourcepath, string& url) string prepare_url(const char* url) { - FPRNINFO("URL is %s", url); + S3FS_PRN_INFO3("URL is %s", url); string uri; string host; @@ -3977,7 +4169,7 @@ string prepare_url(const char* url) url_str = uri + host + path; - FPRNINFO("URL changed is %s", url_str.c_str()); + S3FS_PRN_INFO3("URL changed is %s", url_str.c_str()); return str(url_str); } diff --git a/src/curl.h b/src/curl.h index 6726c7e..0da7ccb 100644 --- a/src/curl.h +++ b/src/curl.h @@ -20,6 +20,11 @@ #ifndef S3FS_CURL_H_ #define S3FS_CURL_H_ +//---------------------------------------------- +// Symbols +//---------------------------------------------- +#define MIN_MULTIPART_SIZE 5242880 // 5MB + //---------------------------------------------- // class BodyData //---------------------------------------------- @@ -122,6 +127,21 @@ typedef std::map iamcredmap_t; typedef std::map sseckeymap_t; typedef std::list sseckeylist_t; +// strage class(rrs) +enum storage_class_t { + STANDARD, + STANDARD_IA, + REDUCED_REDUNDANCY, +}; + +// sse type +enum sse_type_t { + SSE_DISABLE = 0, // not use server side encrypting + SSE_S3, // server side encrypting by S3 key + SSE_C, // server side encrypting by custom key + SSE_KMS // server side encrypting by kms id +}; + // share #define SHARE_MUTEX_DNS 0 #define SHARE_MUTEX_SSL_SESSION 1 @@ -165,9 +185,10 @@ class S3fsCurl static int retries; static bool is_public_bucket; static std::string default_acl; // TODO: to enum - static bool is_use_rrs; + static storage_class_t storage_class; static sseckeylist_t sseckeys; - static bool is_use_sse; + static std::string ssekmsid; + static sse_type_t ssetype; static bool is_content_md5; static bool is_verbose; static std::string AWSAccessKeyId; @@ -206,12 +227,13 @@ class S3fsCurl int b_postdata_remaining; // backup for retrying off_t b_partdata_startpos; // backup for retrying ssize_t b_partdata_size; // backup for retrying - bool b_ssekey_pos; // backup for retrying - std::string b_ssekey_md5; // backup for retrying + int b_ssekey_pos; // backup for retrying + std::string b_ssevalue; // backup for retrying + sse_type_t b_ssetype; // backup for retrying public: // constructor/destructor - S3fsCurl(bool ahbe = false); + explicit S3fsCurl(bool ahbe = false); ~S3fsCurl(); private: @@ -240,8 +262,12 @@ class S3fsCurl static bool ParseIAMCredentialResponse(const char* response, iamcredmap_t& keyval); static bool SetIAMCredentials(const char* response); + static bool LoadEnvSseCKeys(void); + static bool LoadEnvSseKmsid(void); static bool PushbackSseKeys(std::string& onekey); + static int CurlDebugFunc(CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr); + // methods bool ResetHandle(void); bool RemakeHandle(void); @@ -252,10 +278,7 @@ class S3fsCurl bool GetUploadId(std::string& upload_id); int GetIAMCredentials(void); - int PreMultipartPostRequest(const char* tpath, headers_t& meta, std::string& upload_id, bool is_copy); - int CompleteMultipartPostRequest(const char* tpath, std::string& upload_id, etaglist_t& parts); int UploadMultipartPostSetup(const char* tpath, int part_num, std::string& upload_id); - int UploadMultipartPostRequest(const char* tpath, int part_num, std::string& upload_id); int CopyMultipartPostRequest(const char* from, const char* to, int part_num, std::string& upload_id, headers_t& meta); public: @@ -278,16 +301,23 @@ class S3fsCurl static bool SetPublicBucket(bool flag); static bool IsPublicBucket(void) { return S3fsCurl::is_public_bucket; } static std::string SetDefaultAcl(const char* acl); - static bool SetUseRrs(bool flag); - static bool GetUseRrs(void) { return S3fsCurl::is_use_rrs; } - static bool SetSseKeys(const char* filepath); - static bool LoadEnvSseKeys(void); + static storage_class_t SetStorageClass(storage_class_t storage_class); + static storage_class_t GetStorageClass() { return S3fsCurl::storage_class; } + static bool LoadEnvSse(void) { return (S3fsCurl::LoadEnvSseCKeys() && S3fsCurl::LoadEnvSseKmsid()); } + static sse_type_t SetSseType(sse_type_t type); + static sse_type_t GetSseType(void) { return S3fsCurl::ssetype; } + static bool IsSseDisable(void) { return (SSE_DISABLE == S3fsCurl::ssetype); } + static bool IsSseS3Type(void) { return (SSE_S3 == S3fsCurl::ssetype); } + static bool IsSseCType(void) { return (SSE_C == S3fsCurl::ssetype); } + static bool IsSseKmsType(void) { return (SSE_KMS == S3fsCurl::ssetype); } + static bool FinalCheckSse(void); + static bool SetSseCKeys(const char* filepath); + static bool SetSseKmsid(const char* kmsid); + static bool IsSetSseKmsId(void) { return !S3fsCurl::ssekmsid.empty(); } + static const char* GetSseKmsId(void) { return S3fsCurl::ssekmsid.c_str(); } static bool GetSseKey(std::string& md5, std::string& ssekey); static bool GetSseKeyMd5(int pos, std::string& md5); static int GetSseKeyCount(void); - static bool IsSseCustomMode(void); - static bool SetUseSse(bool flag); - static bool GetUseSse(void) { return S3fsCurl::is_use_sse; } static bool SetContentMd5(bool flag); static bool SetVerbose(bool flag); static bool GetVerbose(void) { return S3fsCurl::is_verbose; } @@ -310,7 +340,7 @@ class S3fsCurl bool CreateCurlHandle(bool force = false); bool DestroyCurlHandle(void); - bool AddSseKeyRequestHead(std::string& md5, bool is_copy); + bool AddSseRequestHead(sse_type_t ssetype, std::string& ssevalue, bool is_only_c, bool is_copy); bool GetResponseCode(long& responseCode); int RequestPerform(void); int DeleteRequest(const char* tpath); @@ -321,14 +351,18 @@ class S3fsCurl int HeadRequest(const char* tpath, headers_t& meta); int PutHeadRequest(const char* tpath, headers_t& meta, bool is_copy); int PutRequest(const char* tpath, headers_t& meta, int fd); - int PreGetObjectRequest(const char* tpath, int fd, off_t start, ssize_t size, std::string& ssekeymd5); + int PreGetObjectRequest(const char* tpath, int fd, off_t start, ssize_t size, sse_type_t ssetype, std::string& ssevalue); int GetObjectRequest(const char* tpath, int fd, off_t start = -1, ssize_t size = -1); int CheckBucket(void); int ListBucketRequest(const char* tpath, const char* query); + int PreMultipartPostRequest(const char* tpath, headers_t& meta, std::string& upload_id, bool is_copy); + int CompleteMultipartPostRequest(const char* tpath, std::string& upload_id, etaglist_t& parts); + int UploadMultipartPostRequest(const char* tpath, int part_num, std::string& upload_id); int MultipartListRequest(std::string& body); int AbortMultipartUpload(const char* tpath, std::string& upload_id); int MultipartHeadRequest(const char* tpath, off_t size, headers_t& meta, bool is_copy); int MultipartUploadRequest(const char* tpath, headers_t& meta, int fd, bool is_copy); + int MultipartUploadRequest(std::string upload_id, const char* tpath, int fd, off_t offset, size_t size, etaglist_t& list); int MultipartRenameRequest(const char* from, const char* to, headers_t& meta, off_t size); // methods(valiables) @@ -433,6 +467,7 @@ std::string get_sorted_header_keys(const struct curl_slist* list); std::string get_canonical_headers(const struct curl_slist* list, bool only_amz = false); bool MakeUrlResource(const char* realpath, std::string& resourcepath, std::string& url); std::string prepare_url(const char* url); +bool get_object_sse_type(const char* path, sse_type_t& ssetype, std::string& ssevalue); // implement in s3fs.cpp #endif // S3FS_CURL_H_ diff --git a/src/fdcache.cpp b/src/fdcache.cpp index c2e9975..19786f4 100644 --- a/src/fdcache.cpp +++ b/src/fdcache.cpp @@ -52,7 +52,15 @@ using namespace std; // Symbols //------------------------------------------------ #define MAX_MULTIPART_CNT 10000 // S3 multipart max count -#define FDPAGE_SIZE (50 * 1024 * 1024) // 50MB(parallel uploading is 5 parallel(default) * 10 MB) + +// +// For cache directory top path +// +#if defined(P_tmpdir) +#define TMPFILE_DIR_0PATH P_tmpdir +#else +#define TMPFILE_DIR_0PATH "/tmp" +#endif //------------------------------------------------ // CacheFileStat class methods @@ -66,7 +74,11 @@ bool CacheFileStat::MakeCacheFileStatPath(const char* path, string& sfile_path, top_path += ".stat"; if(is_create_dir){ - mkdirp(top_path + mydirname(path), 0777); + int result; + if(0 != (result = mkdirp(top_path + mydirname(path), 0777))){ + S3FS_PRN_ERR("failed to create dir(%s) by errno(%d).", path, result); + return false; + } } if(!path || '\0' == path[0]){ sfile_path = top_path; @@ -76,6 +88,20 @@ bool CacheFileStat::MakeCacheFileStatPath(const char* path, string& sfile_path, return true; } +bool CacheFileStat::CheckCacheFileStatTopDir(void) +{ + if(!FdManager::IsCacheDir()){ + return true; + } + // make stat dir top path( "//..stat" ) + string top_path = FdManager::GetCacheDir(); + top_path += "/."; + top_path += bucket; + top_path += ".stat"; + + return check_exist_dir_permission(top_path.c_str()); +} + bool CacheFileStat::DeleteCacheFileStat(const char* path) { if(!path || '\0' == path[0]){ @@ -84,11 +110,15 @@ bool CacheFileStat::DeleteCacheFileStat(const char* path) // stat path string sfile_path; if(!CacheFileStat::MakeCacheFileStatPath(path, sfile_path, false)){ - DPRNINFO("failed to create cache stat file path(%s)", path); + S3FS_PRN_ERR("failed to create cache stat file path(%s)", path); return false; } if(0 != unlink(sfile_path.c_str())){ - DPRNINFO("failed to delete file(%s): errno=%d", path, errno); + if(ENOENT == errno){ + S3FS_PRN_DBG("failed to delete file(%s): errno=%d", path, errno); + }else{ + S3FS_PRN_ERR("failed to delete file(%s): errno=%d", path, errno); + } return false; } return true; @@ -139,30 +169,30 @@ bool CacheFileStat::Open(void) // stat path string sfile_path; if(!CacheFileStat::MakeCacheFileStatPath(path.c_str(), sfile_path, true)){ - DPRN("failed to create cache stat file path(%s)", path.c_str()); + S3FS_PRN_ERR("failed to create cache stat file path(%s)", path.c_str()); return false; } // open if(-1 == (fd = open(sfile_path.c_str(), O_CREAT|O_RDWR, 0600))){ - DPRNINFO("failed to open cache stat file path(%s) - errno(%d)", path.c_str(), errno); + S3FS_PRN_ERR("failed to open cache stat file path(%s) - errno(%d)", path.c_str(), errno); return false; } // lock if(-1 == flock(fd, LOCK_EX)){ - DPRN("failed to lock cache stat file(%s) - errno(%d)", path.c_str(), errno); + S3FS_PRN_ERR("failed to lock cache stat file(%s) - errno(%d)", path.c_str(), errno); close(fd); fd = -1; return false; } // seek top if(0 != lseek(fd, 0, SEEK_SET)){ - DPRN("failed to lseek cache stat file(%s) - errno(%d)", path.c_str(), errno); + S3FS_PRN_ERR("failed to lseek cache stat file(%s) - errno(%d)", path.c_str(), errno); flock(fd, LOCK_UN); close(fd); fd = -1; return false; } - DPRNINFO("file locked(%s - %s)", path.c_str(), sfile_path.c_str()); + S3FS_PRN_DBG("file locked(%s - %s)", path.c_str(), sfile_path.c_str()); return true; } @@ -175,13 +205,13 @@ bool CacheFileStat::Release(void) } // unlock if(-1 == flock(fd, LOCK_UN)){ - DPRN("failed to unlock cache stat file(%s) - errno(%d)", path.c_str(), errno); + S3FS_PRN_ERR("failed to unlock cache stat file(%s) - errno(%d)", path.c_str(), errno); return false; } - DPRNINFO("file unlocked(%s)", path.c_str()); + S3FS_PRN_DBG("file unlocked(%s)", path.c_str()); if(-1 == close(fd)){ - DPRN("failed to close cache stat file(%s) - errno(%d)", path.c_str(), errno); + S3FS_PRN_ERR("failed to close cache stat file(%s) - errno(%d)", path.c_str(), errno); return false; } fd = -1; @@ -200,9 +230,9 @@ void PageList::FreeList(fdpage_list_t& list) list.clear(); } -PageList::PageList(off_t size, bool is_init) +PageList::PageList(size_t size, bool is_loaded) { - Init(size, is_init); + Init(size, is_loaded); } PageList::~PageList() @@ -210,132 +240,160 @@ PageList::~PageList() Clear(); } -off_t PageList::Size(void) const -{ - if(0 == pages.size()){ - return 0; - } - fdpage_list_t::const_reverse_iterator riter = pages.rbegin(); - return ((*riter)->offset + (*riter)->bytes); -} - -int PageList::Resize(off_t size, bool is_init) -{ - off_t total = Size(); - - if(0 == total){ - Init(size, is_init); - - }else if(total < size){ - off_t remain = size - total; // remaining bytes - fdpage_list_t::reverse_iterator riter = pages.rbegin(); - - if((*riter)->bytes < FdManager::GetPageSize()){ - // resize last area - remain += (*riter)->bytes; // remaining bytes(without last page) - (*riter)->bytes = remain > static_cast(FdManager::GetPageSize()) ? FdManager::GetPageSize() : static_cast(remain); // reset page size - remain -= (*riter)->bytes; // remaining bytes(after last page) - (*riter)->init = is_init; - } - - // add new area - for(off_t next = (*riter)->next(); 0 < remain; remain -= size, next += size){ - size = remain > static_cast(FdManager::GetPageSize()) ? static_cast(FdManager::GetPageSize()) : remain; - fdpage* page = new fdpage(next, size, is_init); - pages.push_back(page); - } - - }else if(total > size){ - for(fdpage_list_t::reverse_iterator riter = pages.rbegin(); riter != pages.rend(); riter++){ - if((*riter)->offset < size){ - (*riter)->bytes = static_cast(size - (*riter)->offset); - break; - } - } - } - return true; -} - void PageList::Clear(void) { PageList::FreeList(pages); } -int PageList::Init(off_t size, bool is_init) +bool PageList::Init(size_t size, bool is_loaded) { Clear(); - for(off_t total = 0; total < size; total += FdManager::GetPageSize()){ - size_t areasize = (total + static_cast(FdManager::GetPageSize())) < size ? FdManager::GetPageSize() : static_cast(size - total); - fdpage* page = new fdpage(total, areasize, is_init); - pages.push_back(page); - } - return pages.size(); + fdpage* page = new fdpage(0, size, is_loaded); + pages.push_back(page); + return true; } -bool PageList::IsInit(off_t start, off_t size) +size_t PageList::Size(void) const { - off_t next = start + size; + if(pages.empty()){ + return 0; + } + fdpage_list_t::const_reverse_iterator riter = pages.rbegin(); + return static_cast((*riter)->next()); +} - if(0 == pages.size()){ - return false; - } - // check end - fdpage_list_t::reverse_iterator riter = pages.rbegin(); - if((*riter)->next() < next){ - // size is over end of page list. - return false; - } - for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); iter++){ - if(next <= (*iter)->offset){ - break; - } - if((start <= (*iter)->offset && (*iter)->offset < next) || // start < iter-start < end - (start <= (*iter)->end() && (*iter)->end() < next) || // start < iter-end < end - ((*iter)->offset <= start && next <= (*iter)->end()) ) // iter-start < start < end < iter-end - { - if(!(*iter)->init){ - return false; +bool PageList::Compress(void) +{ + bool is_first = true; + bool is_last_loaded = false; + for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ){ + if(is_first){ + is_first = false; + is_last_loaded = (*iter)->loaded; + ++iter; + }else{ + if(is_last_loaded == (*iter)->loaded){ + fdpage_list_t::iterator biter = iter; + --biter; + (*biter)->bytes += (*iter)->bytes; + iter = pages.erase(iter); + }else{ + is_last_loaded = (*iter)->loaded; + ++iter; } } } return true; } -bool PageList::SetInit(off_t start, off_t size, bool is_init) +bool PageList::Parse(off_t new_pos) { - // check size & resize - if(Size() < (start + size)){ - Resize(start + size, false); - } - - off_t next = start + size; - for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); iter++){ - if((*iter)->end() < start){ - // out of area - // iter:start < iter:end < start < end - continue; - }else if(next <= (*iter)->offset){ - // out of area - // start < end < iter:start < iter:end - break; + for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ++iter){ + if(new_pos == (*iter)->offset){ + // nothing to do + return true; + }else if((*iter)->offset < new_pos && new_pos < (*iter)->next()){ + fdpage* page = new fdpage((*iter)->offset, static_cast(new_pos - (*iter)->offset), (*iter)->loaded); + (*iter)->bytes -= (new_pos - (*iter)->offset); + (*iter)->offset = new_pos; + pages.insert(iter, page); + return true; } - // area of target overlaps with iter area - // iter:start < start < iter:end < end - // iter:start < start < end < iter:end - // start < iter:start < iter:end < end - // start < iter:start < end < iter:end - if((*iter)->init != is_init){ - (*iter)->init = is_init; + } + return false; +} + +bool PageList::Resize(size_t size, bool is_loaded) +{ + size_t total = Size(); + + if(0 == total){ + Init(size, is_loaded); + + }else if(total < size){ + // add new area + fdpage* page = new fdpage(static_cast(total), (size - total), is_loaded); + pages.push_back(page); + + }else if(size < total){ + // cut area + for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ){ + if(static_cast((*iter)->next()) <= size){ + ++iter; + }else{ + if(size <= static_cast((*iter)->offset)){ + iter = pages.erase(iter); + }else{ + (*iter)->bytes = size - static_cast((*iter)->offset); + } + } + } + }else{ // total == size + // nothing to do + } + // compress area + return Compress(); +} + +bool PageList::IsPageLoaded(off_t start, size_t size) const +{ + for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){ + if((*iter)->end() < start){ + continue; + } + if(!(*iter)->loaded){ + return false; + } + if(0 != size && static_cast(start + size) <= static_cast((*iter)->next())){ + break; } } return true; } -bool PageList::FindUninitPage(off_t start, off_t& resstart, size_t& ressize) +bool PageList::SetPageLoadedStatus(off_t start, size_t size, bool is_loaded, bool is_compress) { - for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); iter++){ + size_t now_size = Size(); + + if(now_size <= static_cast(start)){ + if(now_size < static_cast(start)){ + // add + Resize(static_cast(start), false); + } + Resize(static_cast(start + size), is_loaded); + + }else if(now_size <= static_cast(start + size)){ + // cut + Resize(static_cast(start), false); + // add + Resize(static_cast(start + size), is_loaded); + + }else{ + // start-size are inner pages area + // parse "start", and "start + size" position + Parse(start); + Parse(start + size); + + // set loaded flag + for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ++iter){ + if((*iter)->end() < start){ + continue; + }else if(static_cast(start + size) <= (*iter)->offset){ + break; + }else{ + (*iter)->loaded = is_loaded; + } + } + } + // compress area + return (is_compress ? Compress() : true); +} + +bool PageList::FindUnloadedPage(off_t start, off_t& resstart, size_t& ressize) const +{ + for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){ if(start <= (*iter)->end()){ - if(!(*iter)->init){ + if(!(*iter)->loaded){ resstart = (*iter)->offset; ressize = (*iter)->bytes; return true; @@ -345,29 +403,76 @@ bool PageList::FindUninitPage(off_t start, off_t& resstart, size_t& ressize) return false; } -int PageList::GetUninitPages(fdpage_list_t& uninit_list, off_t start, off_t size) +size_t PageList::GetTotalUnloadedPageSize(off_t start, size_t size) const { - for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); iter++){ - if(start <= (*iter)->end()){ - if((start + size) <= (*iter)->offset){ - // reach to end - break; + size_t restsize = 0; + off_t next = static_cast(start + size); + for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){ + if((*iter)->next() <= start){ + continue; + } + if(next <= (*iter)->offset){ + break; + } + if((*iter)->loaded){ + continue; + } + size_t tmpsize; + if((*iter)->offset <= start){ + if((*iter)->next() <= next){ + tmpsize = static_cast((*iter)->next() - start); + }else{ + tmpsize = static_cast(next - start); // = size } - // after start pos - if(!(*iter)->init){ - // found uninitialized area - fdpage_list_t::reverse_iterator riter = uninit_list.rbegin(); - if(riter != uninit_list.rend() && (*riter)->next() == (*iter)->offset){ - // merge to before page - (*riter)->bytes += (*iter)->bytes; - }else{ - fdpage* page = new fdpage((*iter)->offset, (*iter)->bytes, false); - uninit_list.push_back(page); - } + }else{ + if((*iter)->next() <= next){ + tmpsize = static_cast((*iter)->next() - (*iter)->offset); // = (*iter)->bytes + }else{ + tmpsize = static_cast(next - (*iter)->offset); } } + restsize += tmpsize; } - return uninit_list.size(); + return restsize; +} + +int PageList::GetUnloadedPages(fdpage_list_t& unloaded_list, off_t start, size_t size) const +{ + // If size is 0, it means loading to end. + if(0 == size){ + if(static_cast(start) < Size()){ + size = static_cast(Size() - start); + } + } + off_t next = static_cast(start + size); + + for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){ + if((*iter)->next() <= start){ + continue; + } + if(next <= (*iter)->offset){ + break; + } + if((*iter)->loaded){ + continue; // already loaded + } + + // page area + off_t page_start = max((*iter)->offset, start); + off_t page_next = min((*iter)->next(), next); + size_t page_size = static_cast(page_next - page_start); + + // add list + fdpage_list_t::reverse_iterator riter = unloaded_list.rbegin(); + if(riter != unloaded_list.rend() && (*riter)->next() == page_start){ + // merge to before page + (*riter)->bytes += page_size; + }else{ + fdpage* page = new fdpage(page_start, page_size, false); + unloaded_list.push_back(page); + } + } + return unloaded_list.size(); } bool PageList::Serialize(CacheFileStat& file, bool is_output) @@ -382,13 +487,13 @@ bool PageList::Serialize(CacheFileStat& file, bool is_output) stringstream ssall; ssall << Size(); - for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); iter++){ - ssall << "\n" << (*iter)->offset << ":" << (*iter)->bytes << ":" << ((*iter)->init ? "1" : "0"); + for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ++iter){ + ssall << "\n" << (*iter)->offset << ":" << (*iter)->bytes << ":" << ((*iter)->loaded ? "1" : "0"); } string strall = ssall.str(); if(0 >= pwrite(file.GetFd(), strall.c_str(), strall.length(), 0)){ - DPRN("failed to write stats(%d)", errno); + S3FS_PRN_ERR("failed to write stats(%d)", errno); return false; } @@ -399,7 +504,7 @@ bool PageList::Serialize(CacheFileStat& file, bool is_output) struct stat st; memset(&st, 0, sizeof(struct stat)); if(-1 == fstat(file.GetFd(), &st)){ - DPRN("fstat is failed. errno(%d)", errno); + S3FS_PRN_ERR("fstat is failed. errno(%d)", errno); return false; } if(0 >= st.st_size){ @@ -409,29 +514,29 @@ bool PageList::Serialize(CacheFileStat& file, bool is_output) } char* ptmp; if(NULL == (ptmp = (char*)calloc(st.st_size + 1, sizeof(char)))){ - DPRNCRIT("could not allocate memory."); + S3FS_PRN_CRIT("could not allocate memory."); S3FS_FUSE_EXIT(); return false; } // read from file if(0 >= pread(file.GetFd(), ptmp, st.st_size, 0)){ - DPRN("failed to read stats(%d)", errno); + S3FS_PRN_ERR("failed to read stats(%d)", errno); free(ptmp); return false; } string oneline; stringstream ssall(ptmp); - // init + // loaded Clear(); // load(size) if(!getline(ssall, oneline, '\n')){ - DPRN("failed to parse stats."); + S3FS_PRN_ERR("failed to parse stats."); free(ptmp); return false; } - off_t total = s3fs_strtoofft(oneline.c_str()); + size_t total = s3fs_strtoofft(oneline.c_str()); // load each part bool is_err = false; @@ -450,25 +555,25 @@ bool PageList::Serialize(CacheFileStat& file, bool is_output) break; } off_t size = s3fs_strtoofft(part.c_str()); - // init + // loaded if(!getline(ssparts, part, ':')){ is_err = true; break; } - bool is_init = (1 == s3fs_strtoofft(part.c_str()) ? true : false); + bool is_loaded = (1 == s3fs_strtoofft(part.c_str()) ? true : false); // add new area - SetInit(offset, size, is_init); + SetPageLoadedStatus(offset, size, is_loaded); } free(ptmp); if(is_err){ - DPRN("failed to parse stats."); + S3FS_PRN_ERR("failed to parse stats."); Clear(); return false; } // check size if(total != Size()){ - DPRN("different size(%jd - %jd).", (intmax_t)total, (intmax_t)Size()); + S3FS_PRN_ERR("different size(%jd - %jd).", (intmax_t)total, (intmax_t)Size()); Clear(); return false; } @@ -480,24 +585,45 @@ void PageList::Dump(void) { int cnt = 0; - DPRNINFO("pages = {"); - for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); iter++, cnt++){ - DPRNINFO(" [%08d] -> {%014jd - %014zu : %s}", cnt, (intmax_t)((*iter)->offset), (*iter)->bytes, (*iter)->init ? "true" : "false"); + S3FS_PRN_DBG("pages = {"); + for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ++iter, ++cnt){ + S3FS_PRN_DBG(" [%08d] -> {%014jd - %014zu : %s}", cnt, (intmax_t)((*iter)->offset), (*iter)->bytes, (*iter)->loaded ? "true" : "false"); } - DPRNINFO("}"); + S3FS_PRN_DBG("}"); +} + +//------------------------------------------------ +// FdEntity class methods +//------------------------------------------------ +int FdEntity::FillFile(int fd, unsigned char byte, size_t size, off_t start) +{ + unsigned char bytes[1024 * 32]; // 32kb + memset(bytes, byte, min(sizeof(bytes), size)); + + for(ssize_t total = 0, onewrote = 0; static_cast(total) < size; total += onewrote){ + if(-1 == (onewrote = pwrite(fd, bytes, min(sizeof(bytes), (size - static_cast(total))), start + total))){ + S3FS_PRN_ERR("pwrite failed. errno(%d)", errno); + return -errno; + } + } + return 0; } //------------------------------------------------ // FdEntity methods //------------------------------------------------ FdEntity::FdEntity(const char* tpath, const char* cpath) - : is_lock_init(false), path(SAFESTRPTR(tpath)), cachepath(SAFESTRPTR(cpath)), fd(-1), file(NULL), is_modify(false) + : is_lock_init(false), refcnt(0), path(SAFESTRPTR(tpath)), cachepath(SAFESTRPTR(cpath)), + fd(-1), pfile(NULL), is_modify(false), size_orgmeta(0), upload_id(""), mp_start(0), mp_size(0) { try{ - pthread_mutex_init(&fdent_lock, NULL); + pthread_mutexattr_t attr; + pthread_mutexattr_init(&attr); + pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE_NP); // recursive mutex + pthread_mutex_init(&fdent_lock, &attr); is_lock_init = true; }catch(exception& e){ - DPRNCRIT("failed to init mutex"); + S3FS_PRN_CRIT("failed to init mutex"); } } @@ -509,7 +635,7 @@ FdEntity::~FdEntity() try{ pthread_mutex_destroy(&fdent_lock); }catch(exception& e){ - DPRNCRIT("failed to destroy mutex"); + S3FS_PRN_CRIT("failed to destroy mutex"); } is_lock_init = false; } @@ -519,16 +645,16 @@ void FdEntity::Clear(void) { AutoLock auto_lock(&fdent_lock); - if(file){ + if(pfile){ if(0 != cachepath.size()){ CacheFileStat cfstat(path.c_str()); if(!pagelist.Serialize(cfstat, true)){ - DPRN("failed to save cache stat file(%s).", path.c_str()); + S3FS_PRN_WARN("failed to save cache stat file(%s).", path.c_str()); } } - fclose(file); - file = NULL; - fd = -1; + fclose(pfile); + pfile = NULL; + fd = -1; } pagelist.Init(0, false); refcnt = 0; @@ -539,7 +665,7 @@ void FdEntity::Clear(void) void FdEntity::Close(void) { - FPRNINFO("[path=%s][fd=%d][refcnt=%d]", path.c_str(), fd, (-1 != fd ? refcnt - 1 : refcnt)); + S3FS_PRN_DBG("[path=%s][fd=%d][refcnt=%d]", path.c_str(), fd, (-1 != fd ? refcnt - 1 : refcnt)); if(-1 != fd){ AutoLock auto_lock(&fdent_lock); @@ -551,19 +677,19 @@ void FdEntity::Close(void) if(0 != cachepath.size()){ CacheFileStat cfstat(path.c_str()); if(!pagelist.Serialize(cfstat, true)){ - DPRN("failed to save cache stat file(%s).", path.c_str()); + S3FS_PRN_WARN("failed to save cache stat file(%s).", path.c_str()); } } - fclose(file); - file = NULL; - fd = -1; + fclose(pfile); + pfile = NULL; + fd = -1; } } } int FdEntity::Dup(void) { - FPRNINFO("[path=%s][fd=%d][refcnt=%d]", path.c_str(), fd, (-1 != fd ? refcnt + 1 : refcnt)); + S3FS_PRN_DBG("[path=%s][fd=%d][refcnt=%d]", path.c_str(), fd, (-1 != fd ? refcnt + 1 : refcnt)); if(-1 != fd){ AutoLock auto_lock(&fdent_lock); @@ -572,97 +698,106 @@ int FdEntity::Dup(void) return fd; } -int FdEntity::Open(off_t size, time_t time) +// [NOTE] +// This method does not lock fdent_lock, because FdManager::fd_manager_lock +// is locked before calling. +// +int FdEntity::Open(headers_t* pmeta, ssize_t size, time_t time) { - bool already_opened = false; // already opened fd - bool is_csf_loaded = false; // loaded by cache stat file - bool is_truncate = false; // need to truncate - bool init_value = false; // value for pagelist - - FPRNINFO("[path=%s][fd=%d][size=%jd][time=%jd]", path.c_str(), fd, (intmax_t)size, (intmax_t)time); + S3FS_PRN_DBG("[path=%s][fd=%d][size=%jd][time=%jd]", path.c_str(), fd, (intmax_t)size, (intmax_t)time); if(-1 != fd){ // already opened, needs to increment refcnt. - already_opened = true; + Dup(); + return 0; + } - }else{ - // open - if(0 != cachepath.size()){ - // At first, open & flock stat file. - { - CacheFileStat cfstat(path.c_str()); - is_csf_loaded = pagelist.Serialize(cfstat, false); - } + bool need_save_csf = false; // need to save(reset) cache stat file + bool is_truncate = false; // need to truncate - // open cache file - if(is_csf_loaded && -1 != (fd = open(cachepath.c_str(), O_RDWR))){ - // file exists - struct stat st; - memset(&st, 0, sizeof(struct stat)); - if(-1 == fstat(fd, &st)){ - DPRN("fstat is failed. errno(%d)", errno); - fclose(file); - file = NULL; - fd = -1; - return (0 == errno ? -EIO : -errno); - } - if((-1 != size && size != pagelist.Size()) || st.st_size != pagelist.Size()){ - is_csf_loaded = false; // reinitializing - if(-1 == size){ - size = st.st_size; - }else{ - is_truncate = true; - } - }else{ - // size OK! --> no initialize after this line. - } + if(0 != cachepath.size()){ + // using cache - }else{ - // file does not exist -> create & open - if(-1 == (fd = open(cachepath.c_str(), O_CREAT|O_RDWR|O_TRUNC, 0600))){ - DPRN("failed to open file(%s). errno(%d)", cachepath.c_str(), errno); - return (0 == errno ? -EIO : -errno); - } - if(-1 == size){ - size = 0; - }else{ - is_truncate = true; - } - is_csf_loaded = false; - } - // make file pointer(for being same tmpfile) - if(NULL == (file = fdopen(fd, "wb"))){ - DPRN("failed to get fileno(%s). errno(%d)", cachepath.c_str(), errno); - close(fd); + // open cache and cache stat file, load page info. + CacheFileStat cfstat(path.c_str()); + + if(pagelist.Serialize(cfstat, false) && -1 != (fd = open(cachepath.c_str(), O_RDWR))){ + // success to open cache file + struct stat st; + memset(&st, 0, sizeof(struct stat)); + if(-1 == fstat(fd, &st)){ + S3FS_PRN_ERR("fstat is failed. errno(%d)", errno); fd = -1; return (0 == errno ? -EIO : -errno); } - - }else{ - // open temporary file - if(NULL == (file = tmpfile()) || -1 ==(fd = fileno(file))){ - DPRN("failed to open tmp file. err(%d)", errno); - if(file){ - fclose(file); - file = NULL; + // check size, st_size, loading stat file + if(-1 == size){ + if(static_cast(st.st_size) != pagelist.Size()){ + pagelist.Resize(st.st_size, false); + need_save_csf = true; // need to update page info } + size = static_cast(st.st_size); + }else{ + if(static_cast(size) != pagelist.Size()){ + pagelist.Resize(static_cast(size), false); + need_save_csf = true; // need to update page info + } + if(static_cast(size) != static_cast(st.st_size)){ + is_truncate = true; + } + } + }else{ + // could not load stat file or open file + if(-1 == (fd = open(cachepath.c_str(), O_CREAT|O_RDWR|O_TRUNC, 0600))){ + S3FS_PRN_ERR("failed to open file(%s). errno(%d)", cachepath.c_str(), errno); return (0 == errno ? -EIO : -errno); } + need_save_csf = true; // need to update page info if(-1 == size){ size = 0; + pagelist.Init(0, false); }else{ + pagelist.Resize(static_cast(size), false); is_truncate = true; } } + + // make file pointer(for being same tmpfile) + if(NULL == (pfile = fdopen(fd, "wb"))){ + S3FS_PRN_ERR("failed to get fileno(%s). errno(%d)", cachepath.c_str(), errno); + close(fd); + fd = -1; + return (0 == errno ? -EIO : -errno); + } + + }else{ + // not using cache + + // open temporary file + if(NULL == (pfile = tmpfile()) || -1 ==(fd = fileno(pfile))){ + S3FS_PRN_ERR("failed to open tmp file. err(%d)", errno); + if(pfile){ + fclose(pfile); + pfile = NULL; + } + return (0 == errno ? -EIO : -errno); + } + if(-1 == size){ + size = 0; + pagelist.Init(0, false); + }else{ + pagelist.Resize(static_cast(size), false); + is_truncate = true; + } } - // truncate + // truncate cache(tmp) file if(is_truncate){ - if(0 != ftruncate(fd, size) || 0 != fsync(fd)){ - DPRN("ftruncate(%s) or fsync returned err(%d)", cachepath.c_str(), errno); - fclose(file); - file = NULL; - fd = -1; + if(0 != ftruncate(fd, static_cast(size)) || 0 != fsync(fd)){ + S3FS_PRN_ERR("ftruncate(%s) or fsync returned err(%d)", cachepath.c_str(), errno); + fclose(pfile); + pfile = NULL; + fd = -1; return (0 == errno ? -EIO : -errno); } } @@ -670,30 +805,93 @@ int FdEntity::Open(off_t size, time_t time) // set mtime if(-1 != time){ if(0 != SetMtime(time)){ - DPRN("failed to set mtime. errno(%d)", errno); - fclose(file); - file = NULL; - fd = -1; + S3FS_PRN_ERR("failed to set mtime. errno(%d)", errno); + fclose(pfile); + pfile = NULL; + fd = -1; return (0 == errno ? -EIO : -errno); } } - // set internal data - if(already_opened){ - Dup(); - }else{ - if(!is_csf_loaded){ - pagelist.Init(size, init_value); + // reset cache stat file + if(need_save_csf){ + CacheFileStat cfstat(path.c_str()); + if(!pagelist.Serialize(cfstat, true)){ + S3FS_PRN_WARN("failed to save cache stat file(%s), but continue...", path.c_str()); } - refcnt = 1; + } + + // init internal data + refcnt = 1; + is_modify = false; + + // set original headers and size in it. + if(pmeta){ + orgmeta = *pmeta; + size_orgmeta = static_cast(get_size(orgmeta)); + }else{ + orgmeta.clear(); + size_orgmeta = 0; + } + + return 0; +} + +// [NOTE] +// This method is called from olny nocopapi functions. +// So we do not check disk space for this option mode, if there is no enough +// disk space this method will be failed. +// +bool FdEntity::OpenAndLoadAll(headers_t* pmeta, size_t* size, bool force_load) +{ + int result; + + S3FS_PRN_INFO3("[path=%s][fd=%d]", path.c_str(), fd); + + if(-1 == fd){ + if(0 != Open(pmeta)){ + return false; + } + } + AutoLock auto_lock(&fdent_lock); + + if(force_load){ + SetAllStatusUnloaded(); + } + // + // TODO: possibly do background for delay loading + // + if(0 != (result = Load())){ + S3FS_PRN_ERR("could not download, result(%d)", result); + return false; + } + if(is_modify){ is_modify = false; } - return 0; + if(size){ + *size = pagelist.Size(); + } + return true; +} + +bool FdEntity::GetStats(struct stat& st) +{ + if(-1 == fd){ + return false; + } + AutoLock auto_lock(&fdent_lock); + + memset(&st, 0, sizeof(struct stat)); + if(-1 == fstat(fd, &st)){ + S3FS_PRN_ERR("fstat failed. errno(%d)", errno); + return false; + } + return true; } int FdEntity::SetMtime(time_t time) { - FPRNINFO("[path=%s][fd=%d][time=%jd]", path.c_str(), fd, (intmax_t)time); + S3FS_PRN_INFO3("[path=%s][fd=%d][time=%jd]", path.c_str(), fd, (intmax_t)time); if(-1 == time){ return 0; @@ -707,7 +905,7 @@ int FdEntity::SetMtime(time_t time) tv[1].tv_sec = tv[0].tv_sec; tv[1].tv_usec= 0L; if(-1 == futimes(fd, tv)){ - DPRN("futimes failed. errno(%d)", errno); + S3FS_PRN_ERR("futimes failed. errno(%d)", errno); return -errno; } }else if(0 < cachepath.size()){ @@ -716,14 +914,26 @@ int FdEntity::SetMtime(time_t time) n_mtime.modtime = time; n_mtime.actime = time; if(-1 == utime(cachepath.c_str(), &n_mtime)){ - DPRNINFO("utime failed. errno(%d)", errno); + S3FS_PRN_ERR("utime failed. errno(%d)", errno); return -errno; } } + orgmeta["x-amz-meta-mtime"] = str(time); + return 0; } -bool FdEntity::GetSize(off_t& size) +bool FdEntity::UpdateMtime(void) +{ + struct stat st; + if(!GetStats(st)){ + return false; + } + orgmeta["x-amz-meta-mtime"] = str(st.st_mtime); + return true; +} + +bool FdEntity::GetSize(size_t& size) { if(-1 == fd){ return false; @@ -734,137 +944,359 @@ bool FdEntity::GetSize(off_t& size) return true; } -bool FdEntity::GetMtime(time_t& time) +bool FdEntity::SetMode(mode_t mode) { - struct stat st; - - if(!GetStats(st)){ - return false; - } - time = st.st_mtime; + orgmeta["x-amz-meta-mode"] = str(mode); return true; } -bool FdEntity::GetStats(struct stat& st) +bool FdEntity::SetUId(uid_t uid) { - if(-1 == fd){ - return false; - } - AutoLock auto_lock(&fdent_lock); - - memset(&st, 0, sizeof(struct stat)); - if(-1 == fstat(fd, &st)){ - DPRN("fstat failed. errno(%d)", errno); - return false; - } + orgmeta["x-amz-meta-uid"] = str(uid); return true; } -bool FdEntity::SetAllStatus(bool is_enable) +bool FdEntity::SetGId(gid_t gid) { - FPRNINFO("[path=%s][fd=%d][%s]", path.c_str(), fd, is_enable ? "enable" : "disable"); + orgmeta["x-amz-meta-gid"] = str(gid); + return true; +} + +bool FdEntity::SetContentType(const char* path) +{ + if(!path){ + return false; + } + orgmeta["Content-Type"] = S3fsCurl::LookupMimeType(string(path)); + return true; +} + +bool FdEntity::SetAllStatus(bool is_loaded) +{ + S3FS_PRN_INFO3("[path=%s][fd=%d][%s]", path.c_str(), fd, is_loaded ? "loaded" : "unloaded"); if(-1 == fd){ return false; } - AutoLock auto_lock(&fdent_lock); + // [NOTE] + // this method is only internal use, and calling after locking. + // so do not lock now. + // + //AutoLock auto_lock(&fdent_lock); // get file size struct stat st; memset(&st, 0, sizeof(struct stat)); if(-1 == fstat(fd, &st)){ - DPRN("fstat is failed. errno(%d)", errno); + S3FS_PRN_ERR("fstat is failed. errno(%d)", errno); return false; } // Reinit - pagelist.Init(st.st_size, is_enable); + pagelist.Init(st.st_size, is_loaded); return true; } -int FdEntity::Load(off_t start, off_t size) +int FdEntity::Load(off_t start, size_t size) { - int result = 0; - - FPRNINFO("[path=%s][fd=%d][offset=%jd][size=%jd]", path.c_str(), fd, (intmax_t)start, (intmax_t)size); + S3FS_PRN_DBG("[path=%s][fd=%d][offset=%jd][size=%jd]", path.c_str(), fd, (intmax_t)start, (intmax_t)size); if(-1 == fd){ return -EBADF; } AutoLock auto_lock(&fdent_lock); + int result = 0; + // check loaded area & load - fdpage_list_t uninit_list; - if(0 < pagelist.GetUninitPages(uninit_list, start, size)){ - for(fdpage_list_t::iterator iter = uninit_list.begin(); iter != uninit_list.end(); iter++){ - if(-1 != size && (start + size) <= (*iter)->offset){ + fdpage_list_t unloaded_list; + if(0 < pagelist.GetUnloadedPages(unloaded_list, start, size)){ + for(fdpage_list_t::iterator iter = unloaded_list.begin(); iter != unloaded_list.end(); ++iter){ + if(0 != size && static_cast(start + size) <= static_cast((*iter)->offset)){ + // reached end break; } + // check loading size + size_t need_load_size = 0; + if(static_cast((*iter)->offset) < size_orgmeta){ + // original file size(on S3) is smaller than request. + need_load_size = (static_cast((*iter)->next()) <= size_orgmeta ? (*iter)->bytes : (size_orgmeta - (*iter)->offset)); + } + size_t over_size = (*iter)->bytes - need_load_size; + // download - if((*iter)->bytes >= static_cast(2 * S3fsCurl::GetMultipartSize()) && !nomultipart){ // default 20MB + if(static_cast(2 * S3fsCurl::GetMultipartSize()) < need_load_size && !nomultipart){ // default 20MB // parallel request // Additional time is needed for large files time_t backup = 0; if(120 > S3fsCurl::GetReadwriteTimeout()){ backup = S3fsCurl::SetReadwriteTimeout(120); } - result = S3fsCurl::ParallelGetObjectRequest(path.c_str(), fd, (*iter)->offset, (*iter)->bytes); + result = S3fsCurl::ParallelGetObjectRequest(path.c_str(), fd, (*iter)->offset, need_load_size); if(0 != backup){ S3fsCurl::SetReadwriteTimeout(backup); } }else{ // single request S3fsCurl s3fscurl; - result = s3fscurl.GetObjectRequest(path.c_str(), fd, (*iter)->offset, (*iter)->bytes); + result = s3fscurl.GetObjectRequest(path.c_str(), fd, (*iter)->offset, need_load_size); } if(0 != result){ break; } - // Set init flag - pagelist.SetInit((*iter)->offset, static_cast((*iter)->bytes), true); + // initialize for the area of over original size + if(0 < over_size){ + if(0 != (result = FdEntity::FillFile(fd, 0, over_size, (*iter)->offset + need_load_size))){ + S3FS_PRN_ERR("failed to fill rest bytes for fd(%d). errno(%d)", fd, result); + break; + } + // set modify flag + is_modify = false; + } + + // Set loaded flag + pagelist.SetPageLoadedStatus((*iter)->offset, static_cast((*iter)->bytes), true); } - PageList::FreeList(uninit_list); + PageList::FreeList(unloaded_list); } return result; } -bool FdEntity::LoadFull(off_t* size, bool force_load) +// [NOTE] +// At no disk space for caching object. +// This method is downloading by dividing an object of the specified range +// and uploading by multipart after finishing downloading it. +// +// [NOTICE] +// Need to lock before calling this method. +// +int FdEntity::NoCacheLoadAndPost(off_t start, size_t size) { - int result; + int result = 0; - FPRNINFO("[path=%s][fd=%d]", path.c_str(), fd); + S3FS_PRN_INFO3("[path=%s][fd=%d][offset=%jd][size=%jd]", path.c_str(), fd, (intmax_t)start, (intmax_t)size); if(-1 == fd){ - if(0 != Open()){ - return false; + return -EBADF; + } + + // [NOTE] + // This method calling means that the cache file is never used no more. + // + if(0 != cachepath.size()){ + // remove cache files(and cache stat file) + FdManager::DeleteCacheFile(path.c_str()); + // cache file path does not use no more. + cachepath.erase(); + } + + // Change entity key in manager mapping + FdManager::get()->ChangeEntityToTempPath(this, path.c_str()); + + // open temporary file + FILE* ptmpfp; + int tmpfd; + if(NULL == (ptmpfp = tmpfile()) || -1 ==(tmpfd = fileno(ptmpfp))){ + S3FS_PRN_ERR("failed to open tmp file. err(%d)", errno); + if(ptmpfp){ + fclose(ptmpfp); + } + return (0 == errno ? -EIO : -errno); + } + + // loop uploading by multipart + for(fdpage_list_t::iterator iter = pagelist.pages.begin(); iter != pagelist.pages.end(); ++iter){ + if((*iter)->end() < start){ + continue; + } + if(0 != size && static_cast(start + size) <= static_cast((*iter)->offset)){ + break; + } + // download earch multipart size(default 10MB) in unit + for(size_t oneread = 0, totalread = ((*iter)->offset < start ? start : 0); totalread < (*iter)->bytes; totalread += oneread){ + int upload_fd = fd; + off_t offset = (*iter)->offset + totalread; + oneread = min(((*iter)->bytes - totalread), static_cast(S3fsCurl::GetMultipartSize())); + + // check rest size is over minimum part size + // + // [NOTE] + // If the final part size is smaller than 5MB, it is not allowed by S3 API. + // For this case, if the previous part of the final part is not over 5GB, + // we incorporate the final part to the previous part. If the previous part + // is over 5GB, we want to even out the last part and the previous part. + // + if(((*iter)->bytes - totalread - oneread) < MIN_MULTIPART_SIZE){ + if(FIVE_GB < ((*iter)->bytes - totalread)){ + oneread = ((*iter)->bytes - totalread) / 2; + }else{ + oneread = ((*iter)->bytes - totalread); + } + } + + if(!(*iter)->loaded){ + // + // loading or initializing + // + upload_fd = tmpfd; + + // load offset & size + size_t need_load_size = 0; + if(size_orgmeta <= static_cast(offset)){ + // all area is over of original size + need_load_size = 0; + }else{ + if(size_orgmeta < (offset + oneread)){ + // original file size(on S3) is smaller than request. + need_load_size = size_orgmeta - offset; + }else{ + need_load_size = oneread; + } + } + size_t over_size = oneread - need_load_size; + + // [NOTE] + // truncate file to zero and set length to part offset + size + // after this, file length is (offset + size), but file does not use any disk space. + // + if(-1 == ftruncate(tmpfd, 0) || -1 == ftruncate(tmpfd, (offset + oneread))){ + S3FS_PRN_ERR("failed to tatic_castruncate temporary file(%d).", tmpfd); + result = -EIO; + break; + } + + // single area get request + if(0 < need_load_size){ + S3fsCurl s3fscurl; + if(0 != (result = s3fscurl.GetObjectRequest(path.c_str(), tmpfd, offset, oneread))){ + S3FS_PRN_ERR("failed to get object(start=%zd, size=%zu) for file(%d).", offset, oneread, tmpfd); + break; + } + } + // initialize fd without loading + if(0 < over_size){ + if(0 != (result = FdEntity::FillFile(tmpfd, 0, over_size, offset + need_load_size))){ + S3FS_PRN_ERR("failed to fill rest bytes for fd(%d). errno(%d)", tmpfd, result); + break; + } + // set modify flag + is_modify = false; + } + + }else{ + // already loaded area + } + + // single area upload by multipart post + if(0 != (result = NoCacheMultipartPost(upload_fd, offset, oneread))){ + S3FS_PRN_ERR("failed to multipart post(start=%zd, size=%zu) for file(%d).", offset, oneread, upload_fd); + break; + } + } + if(0 != result){ + break; + } + + // set loaded flag + if(!(*iter)->loaded){ + if((*iter)->offset < start){ + fdpage* page = new fdpage((*iter)->offset, static_cast(start - (*iter)->offset), (*iter)->loaded); + (*iter)->bytes -= (start - (*iter)->offset); + (*iter)->offset = start; + pagelist.pages.insert(iter, page); + } + if(0 != size && static_cast(start + size) < static_cast((*iter)->next())){ + fdpage* page = new fdpage((*iter)->offset, static_cast((start + size) - (*iter)->offset), true); + (*iter)->bytes -= static_cast((start + size) - (*iter)->offset); + (*iter)->offset = start + size; + pagelist.pages.insert(iter, page); + }else{ + (*iter)->loaded = true; + } } } - if(force_load){ - SetAllDisable(); + if(0 == result){ + // compress pagelist + pagelist.Compress(); + + // fd data do empty + if(-1 == ftruncate(fd, 0)){ + S3FS_PRN_ERR("failed to truncate file(%d), but continue...", fd); + } } - // - // TODO: possibly do background for delay loading - // - if(0 != (result = Load(0, pagelist.Size()))){ - DPRN("could not download, result(%d)", result); - return false; - } - if(is_modify){ - AutoLock auto_lock(&fdent_lock); - is_modify = false; - } - if(size){ - *size = pagelist.Size(); - } - return true; + + // close temporary + fclose(ptmpfp); + + return result; } -int FdEntity::RowFlush(const char* tpath, headers_t& meta, bool force_sync) +// [NOTE] +// At no disk space for caching object. +// This method is starting multipart uploading. +// +int FdEntity::NoCachePreMultipartPost(void) +{ + // initialize multipart upload values + upload_id.erase(); + etaglist.clear(); + + S3fsCurl s3fscurl(true); + int result; + if(0 != (result = s3fscurl.PreMultipartPostRequest(path.c_str(), orgmeta, upload_id, false))){ + return result; + } + s3fscurl.DestroyCurlHandle(); + return 0; +} + +// [NOTE] +// At no disk space for caching object. +// This method is uploading one part of multipart. +// +int FdEntity::NoCacheMultipartPost(int tgfd, off_t start, size_t size) +{ + if(-1 == tgfd || upload_id.empty()){ + S3FS_PRN_ERR("Need to initialize for multipart post."); + return -EIO; + } + S3fsCurl s3fscurl(true); + return s3fscurl.MultipartUploadRequest(upload_id, path.c_str(), tgfd, start, size, etaglist); +} + +// [NOTE] +// At no disk space for caching object. +// This method is finishing multipart uploading. +// +int FdEntity::NoCacheCompleteMultipartPost(void) +{ + if(upload_id.empty() || etaglist.empty()){ + S3FS_PRN_ERR("There is no upload id or etag list."); + return -EIO; + } + + S3fsCurl s3fscurl(true); + int result; + if(0 != (result = s3fscurl.CompleteMultipartPostRequest(path.c_str(), upload_id, etaglist))){ + return result; + } + s3fscurl.DestroyCurlHandle(); + + // reset values + upload_id.erase(); + etaglist.clear(); + mp_start = 0; + mp_size = 0; + + return 0; +} + +int FdEntity::RowFlush(const char* tpath, bool force_sync) { int result; - FPRNINFO("[tpath=%s][path=%s][fd=%d]", SAFESTRPTR(tpath), path.c_str(), fd); + S3FS_PRN_INFO3("[tpath=%s][path=%s][fd=%d]", SAFESTRPTR(tpath), path.c_str(), fd); if(-1 == fd){ return -EBADF; @@ -876,50 +1308,100 @@ int FdEntity::RowFlush(const char* tpath, headers_t& meta, bool force_sync) return 0; } - /* - * Make decision to do multi upload (or not) based upon file size - * - * According to the AWS spec: - * - 1 to 10,000 parts are allowed - * - minimum size of parts is 5MB (expect for the last part) - * - * For our application, we will define minimum part size to be 10MB (10 * 2^20 Bytes) - * minimum file size will be 64 GB - 2 ** 36 - * - * Initially uploads will be done serially - * - * If file is > 20MB, then multipart will kick in - */ - if(pagelist.Size() > (MAX_MULTIPART_CNT * S3fsCurl::GetMultipartSize())){ - // close f ? - return -ENOTSUP; + // If there is no loading all of the area, loading all area. + size_t restsize = pagelist.GetTotalUnloadedPageSize(); + if(0 < restsize){ + if(0 == upload_id.length()){ + // check disk space + if(FdManager::IsSafeDiskSpace(NULL, restsize)){ + // enough disk space + // Load all unitialized area + if(0 != (result = Load())){ + S3FS_PRN_ERR("failed to upload all area(errno=%d)", result); + return static_cast(result); + } + }else{ + // no enough disk space + // upload all by multipart uploading + if(0 != (result = NoCacheLoadAndPost())){ + S3FS_PRN_ERR("failed to upload all area by multipart uploading(errno=%d)", result); + return static_cast(result); + } + } + }else{ + // alreay start miltipart uploading + } } - // seek to head of file. - if(0 != lseek(fd, 0, SEEK_SET)){ - DPRN("lseek error(%d)", errno); - return -errno; - } + if(0 == upload_id.length()){ + // normal uploading - if(pagelist.Size() >= (2 * S3fsCurl::GetMultipartSize()) && !nomultipart){ // default 20MB - // Additional time is needed for large files - time_t backup = 0; - if(120 > S3fsCurl::GetReadwriteTimeout()){ - backup = S3fsCurl::SetReadwriteTimeout(120); + /* + * Make decision to do multi upload (or not) based upon file size + * + * According to the AWS spec: + * - 1 to 10,000 parts are allowed + * - minimum size of parts is 5MB (expect for the last part) + * + * For our application, we will define minimum part size to be 10MB (10 * 2^20 Bytes) + * minimum file size will be 64 GB - 2 ** 36 + * + * Initially uploads will be done serially + * + * If file is > 20MB, then multipart will kick in + */ + if(pagelist.Size() > static_cast(MAX_MULTIPART_CNT * S3fsCurl::GetMultipartSize())){ + // close f ? + return -ENOTSUP; } - result = S3fsCurl::ParallelMultipartUploadRequest(tpath ? tpath : path.c_str(), meta, fd); - if(0 != backup){ - S3fsCurl::SetReadwriteTimeout(backup); + + // seek to head of file. + if(0 != lseek(fd, 0, SEEK_SET)){ + S3FS_PRN_ERR("lseek error(%d)", errno); + return -errno; } + + if(pagelist.Size() >= static_cast(2 * S3fsCurl::GetMultipartSize()) && !nomultipart){ // default 20MB + // Additional time is needed for large files + time_t backup = 0; + if(120 > S3fsCurl::GetReadwriteTimeout()){ + backup = S3fsCurl::SetReadwriteTimeout(120); + } + result = S3fsCurl::ParallelMultipartUploadRequest(tpath ? tpath : path.c_str(), orgmeta, fd); + if(0 != backup){ + S3fsCurl::SetReadwriteTimeout(backup); + } + }else{ + S3fsCurl s3fscurl(true); + result = s3fscurl.PutRequest(tpath ? tpath : path.c_str(), orgmeta, fd); + } + + // seek to head of file. + if(0 == result && 0 != lseek(fd, 0, SEEK_SET)){ + S3FS_PRN_ERR("lseek error(%d)", errno); + return -errno; + } + }else{ - S3fsCurl s3fscurl(true); - result = s3fscurl.PutRequest(tpath ? tpath : path.c_str(), meta, fd); - } - - // seek to head of file. - if(0 == result && 0 != lseek(fd, 0, SEEK_SET)){ - DPRN("lseek error(%d)", errno); - return -errno; + // upload rest data + if(0 < mp_size){ + if(0 != (result = NoCacheMultipartPost(fd, mp_start, mp_size))){ + S3FS_PRN_ERR("failed to multipart post(start=%zd, size=%zu) for file(%d).", mp_start, mp_size, fd); + return result; + } + mp_start = 0; + mp_size = 0; + } + // complete multipart uploading. + if(0 != (result = NoCacheCompleteMultipartPost())){ + S3FS_PRN_ERR("failed to complete(finish) multipart post for file(%d).", fd); + return result; + } + // truncate file to zero + if(-1 == ftruncate(fd, 0)){ + // So the file has already been removed, skip error. + S3FS_PRN_ERR("failed to truncate file(%d) to zero, but continue...", fd); + } } if(0 == result){ @@ -930,65 +1412,136 @@ int FdEntity::RowFlush(const char* tpath, headers_t& meta, bool force_sync) ssize_t FdEntity::Read(char* bytes, off_t start, size_t size, bool force_load) { - int result; - ssize_t rsize; - - FPRNINFO("[path=%s][fd=%d][offset=%jd][size=%zu]", path.c_str(), fd, (intmax_t)start, size); + S3FS_PRN_DBG("[path=%s][fd=%d][offset=%jd][size=%zu]", path.c_str(), fd, (intmax_t)start, size); if(-1 == fd){ return -EBADF; } + AutoLock auto_lock(&fdent_lock); + if(force_load){ - AutoLock auto_lock(&fdent_lock); - pagelist.SetInit(start, static_cast(size), false); + pagelist.SetPageLoadedStatus(start, size, false); } - // Loading - if(0 != (result = Load(start, size))){ - DPRN("could not download. start(%jd), size(%zu), errno(%d)", (intmax_t)start, size, result); - return -EIO; + + int result; + ssize_t rsize; + + // check disk space + if(0 < pagelist.GetTotalUnloadedPageSize(start, size)){ + if(!FdManager::IsSafeDiskSpace(NULL, size)){ + // [NOTE] + // If the area of this entity fd used can be released, try to do it. + // But If file data is updated, we can not even release of fd. + // Fundamentally, this method will fail as long as the disk capacity + // is not ensured. + // + if(!is_modify){ + // try to clear all cache for this fd. + pagelist.Init(pagelist.Size(), false); + if(-1 == ftruncate(fd, 0) || -1 == ftruncate(fd, pagelist.Size())){ + S3FS_PRN_ERR("failed to truncate temporary file(%d).", fd); + return -ENOSPC; + } + } + } + + // load size(for prefetch) + size_t load_size = size; + if(static_cast(start + size) < pagelist.Size()){ + size_t prefetch_max_size = max(size, static_cast(S3fsCurl::GetMultipartSize())); + + if(static_cast(start + prefetch_max_size) < pagelist.Size()){ + load_size = prefetch_max_size; + }else{ + load_size = static_cast(pagelist.Size() - start); + } + } + // Loading + if(0 < size && 0 != (result = Load(start, load_size))){ + S3FS_PRN_ERR("could not download. start(%jd), size(%zu), errno(%d)", (intmax_t)start, size, result); + return -EIO; + } } // Reading - { - AutoLock auto_lock(&fdent_lock); - - if(-1 == (rsize = pread(fd, bytes, size, start))){ - DPRN("pread failed. errno(%d)", errno); - return -errno; - } + if(-1 == (rsize = pread(fd, bytes, size, start))){ + S3FS_PRN_ERR("pread failed. errno(%d)", errno); + return -errno; } return rsize; } ssize_t FdEntity::Write(const char* bytes, off_t start, size_t size) { - int result; - ssize_t wsize; - - FPRNINFO("[path=%s][fd=%d][offset=%jd][size=%zu]", path.c_str(), fd, (intmax_t)start, size); + S3FS_PRN_DBG("[path=%s][fd=%d][offset=%jd][size=%zu]", path.c_str(), fd, (intmax_t)start, size); if(-1 == fd){ return -EBADF; } + AutoLock auto_lock(&fdent_lock); - // Load unitialized area which starts from 0 to (start + size) before writing. - if(0 != (result = Load(0, start))){ - DPRN("failed to load uninitialized area before writing(errno=%d)", result); - return static_cast(result); + int result; + ssize_t wsize; + + if(0 == upload_id.length()){ + // check disk space + size_t restsize = pagelist.GetTotalUnloadedPageSize(0, start) + size; + if(FdManager::IsSafeDiskSpace(NULL, restsize)){ + // enough disk space + + // Load unitialized area which starts from 0 to (start + size) before writing. + if(0 < start && 0 != (result = Load(0, static_cast(start)))){ + S3FS_PRN_ERR("failed to load uninitialized area before writing(errno=%d)", result); + return static_cast(result); + } + }else{ + // no enough disk space + if(0 != (result = NoCachePreMultipartPost())){ + S3FS_PRN_ERR("failed to switch multipart uploading with no cache(errno=%d)", result); + return static_cast(result); + } + // start multipart uploading + if(0 != (result = NoCacheLoadAndPost(0, start))){ + S3FS_PRN_ERR("failed to load uninitialized area and multipart uploading it(errno=%d)", result); + return static_cast(result); + } + mp_start = start; + mp_size = 0; + } + }else{ + // alreay start miltipart uploading } // Writing - { - AutoLock auto_lock(&fdent_lock); + if(-1 == (wsize = pwrite(fd, bytes, size, start))){ + S3FS_PRN_ERR("pwrite failed. errno(%d)", errno); + return -errno; + } + if(!is_modify){ + is_modify = true; + } + if(0 < wsize){ + pagelist.SetPageLoadedStatus(start, static_cast(wsize), true); + } - if(-1 == (wsize = pwrite(fd, bytes, size, start))){ - DPRN("pwrite failed. errno(%d)", errno); - return -errno; - } - if(!is_modify){ - is_modify = true; - } - if(0 < wsize){ - pagelist.SetInit(start, static_cast(wsize), true); + // check multipart uploading + if(0 < upload_id.length()){ + mp_size += static_cast(wsize); + if(static_cast(S3fsCurl::GetMultipartSize()) <= mp_size){ + // over one multipart size + if(0 != (result = NoCacheMultipartPost(fd, mp_start, mp_size))){ + S3FS_PRN_ERR("failed to multipart post(start=%zd, size=%zu) for file(%d).", mp_start, mp_size, fd); + return result; + } + // [NOTE] + // truncate file to zero and set length to part offset + size + // after this, file length is (offset + size), but file does not use any disk space. + // + if(-1 == ftruncate(fd, 0) || -1 == ftruncate(fd, (mp_start + mp_size))){ + S3FS_PRN_ERR("failed to truncate file(%d).", fd); + return -EIO; + } + mp_start += mp_size; + mp_size = 0; } } return wsize; @@ -1019,7 +1572,7 @@ FdManager FdManager::singleton; pthread_mutex_t FdManager::fd_manager_lock; bool FdManager::is_lock_init(false); string FdManager::cache_dir(""); -size_t FdManager::page_size(FDPAGE_SIZE); +size_t FdManager::free_disk_space = 0; //------------------------------------------------ // FdManager class methods @@ -1034,17 +1587,6 @@ bool FdManager::SetCacheDir(const char* dir) return true; } -size_t FdManager::SetPageSize(size_t size) -{ - // If already has entries, this function is failed. - if(0 < FdManager::get()->fent.size()){ - return -1; - } - size_t old = FdManager::page_size; - FdManager::page_size = size; - return old; -} - bool FdManager::DeleteCacheDirectory(void) { if(0 == FdManager::cache_dir.size()){ @@ -1059,7 +1601,7 @@ bool FdManager::DeleteCacheDirectory(void) int FdManager::DeleteCacheFile(const char* path) { - FPRNINFO("[path=%s]", SAFESTRPTR(path)); + S3FS_PRN_INFO3("[path=%s]", SAFESTRPTR(path)); if(!path){ return -EIO; @@ -1073,11 +1615,19 @@ int FdManager::DeleteCacheFile(const char* path) } int result = 0; if(0 != unlink(cache_path.c_str())){ - DPRNINFO("failed to delete file(%s): errno=%d", path, errno); + if(ENOENT == errno){ + S3FS_PRN_DBG("failed to delete file(%s): errno=%d", path, errno); + }else{ + S3FS_PRN_ERR("failed to delete file(%s): errno=%d", path, errno); + } result = -errno; } if(!CacheFileStat::DeleteCacheFileStat(path)){ - DPRNINFO("failed to delete stat file(%s): errno=%d", path, errno); + if(ENOENT == errno){ + S3FS_PRN_DBG("failed to delete stat file(%s): errno=%d", path, errno); + }else{ + S3FS_PRN_ERR("failed to delete stat file(%s): errno=%d", path, errno); + } if(0 != errno){ result = -errno; }else{ @@ -1095,7 +1645,11 @@ bool FdManager::MakeCachePath(const char* path, string& cache_path, bool is_crea } string resolved_path(FdManager::cache_dir + "/" + bucket); if(is_create_dir){ - mkdirp(resolved_path + mydirname(path), 0777); + int result; + if(0 != (result = mkdirp(resolved_path + mydirname(path), 0777))){ + S3FS_PRN_ERR("failed to create dir(%s) by errno(%d).", path, result); + return false; + } } if(!path || '\0' == path[0]){ cache_path = resolved_path; @@ -1105,6 +1659,16 @@ bool FdManager::MakeCachePath(const char* path, string& cache_path, bool is_crea return true; } +bool FdManager::CheckCacheTopDir(void) +{ + if(0 == FdManager::cache_dir.size()){ + return true; + } + string toppath(FdManager::cache_dir + "/" + bucket); + + return check_exist_dir_permission(toppath.c_str()); +} + bool FdManager::MakeRandomTempPath(const char* path, string& tmppath) { char szBuff[64]; @@ -1115,6 +1679,52 @@ bool FdManager::MakeRandomTempPath(const char* path, string& tmppath) return true; } +size_t FdManager::SetEnsureFreeDiskSpace(size_t size) +{ + size_t old = FdManager::free_disk_space; + if(0 == size){ + if(0 == FdManager::free_disk_space){ + FdManager::free_disk_space = static_cast(S3fsCurl::GetMultipartSize()); + } + }else{ + if(0 == FdManager::free_disk_space){ + FdManager::free_disk_space = max(size, static_cast(S3fsCurl::GetMultipartSize())); + }else{ + if(static_cast(S3fsCurl::GetMultipartSize()) <= size){ + FdManager::free_disk_space = size; + } + } + } + return old; +} + +fsblkcnt_t FdManager::GetFreeDiskSpace(const char* path) +{ + struct statvfs vfsbuf; + string ctoppath; + if(0 < FdManager::cache_dir.size()){ + ctoppath = FdManager::cache_dir + "/"; + }else{ + ctoppath = TMPFILE_DIR_0PATH "/"; + } + if(path && '\0' != *path){ + ctoppath += path; + }else{ + ctoppath += "."; + } + if(-1 == statvfs(ctoppath.c_str(), &vfsbuf)){ + S3FS_PRN_ERR("could not get vfs stat by errno(%d)", errno); + return 0; + } + return (vfsbuf.f_bavail * vfsbuf.f_bsize); +} + +bool FdManager::IsSafeDiskSpace(const char* path, size_t size) +{ + fsblkcnt_t fsize = FdManager::GetFreeDiskSpace(path); + return ((size + FdManager::GetEnsureFreeDiskSpace()) <= fsize); +} + //------------------------------------------------ // FdManager methods //------------------------------------------------ @@ -1126,7 +1736,7 @@ FdManager::FdManager() FdManager::is_lock_init = true; }catch(exception& e){ FdManager::is_lock_init = false; - DPRNCRIT("failed to init mutex"); + S3FS_PRN_CRIT("failed to init mutex"); } }else{ assert(false); @@ -1136,7 +1746,7 @@ FdManager::FdManager() FdManager::~FdManager() { if(this == FdManager::get()){ - for(fdent_map_t::iterator iter = fent.begin(); fent.end() != iter; iter++){ + for(fdent_map_t::iterator iter = fent.begin(); fent.end() != iter; ++iter){ FdEntity* ent = (*iter).second; delete ent; } @@ -1146,7 +1756,7 @@ FdManager::~FdManager() try{ pthread_mutex_destroy(&FdManager::fd_manager_lock); }catch(exception& e){ - DPRNCRIT("failed to init mutex"); + S3FS_PRN_CRIT("failed to init mutex"); } FdManager::is_lock_init = false; } @@ -1157,7 +1767,7 @@ FdManager::~FdManager() FdEntity* FdManager::GetFdEntity(const char* path, int existfd) { - FPRNINFO("[path=%s][fd=%d]", SAFESTRPTR(path), existfd); + S3FS_PRN_INFO3("[path=%s][fd=%d]", SAFESTRPTR(path), existfd); if(!path || '\0' == path[0]){ return NULL; @@ -1170,7 +1780,7 @@ FdEntity* FdManager::GetFdEntity(const char* path, int existfd) } if(-1 != existfd){ - for(iter = fent.begin(); iter != fent.end(); iter++){ + for(iter = fent.begin(); iter != fent.end(); ++iter){ if((*iter).second && (*iter).second->GetFd() == existfd){ // found opend fd in map if(0 == strcmp((*iter).second->GetPath(), path)){ @@ -1185,19 +1795,17 @@ FdEntity* FdManager::GetFdEntity(const char* path, int existfd) return NULL; } -FdEntity* FdManager::Open(const char* path, off_t size, time_t time, bool force_tmpfile, bool is_create) +FdEntity* FdManager::Open(const char* path, headers_t* pmeta, ssize_t size, time_t time, bool force_tmpfile, bool is_create) { - FdEntity* ent; - - FPRNINFO("[path=%s][size=%jd][time=%jd]", SAFESTRPTR(path), (intmax_t)size, (intmax_t)time); + S3FS_PRN_DBG("[path=%s][size=%jd][time=%jd]", SAFESTRPTR(path), (intmax_t)size, (intmax_t)time); if(!path || '\0' == path[0]){ return NULL; } - AutoLock auto_lock(&FdManager::fd_manager_lock); fdent_map_t::iterator iter = fent.find(string(path)); + FdEntity* ent; if(fent.end() != iter){ // found ent = (*iter).second; @@ -1206,7 +1814,7 @@ FdEntity* FdManager::Open(const char* path, off_t size, time_t time, bool force_ // not found string cache_path = ""; if(!force_tmpfile && !FdManager::MakeCachePath(path, cache_path, true)){ - DPRN("failed to make cache path for object(%s).", path); + S3FS_PRN_ERR("failed to make cache path for object(%s).", path); return NULL; } // make new obj @@ -1232,7 +1840,7 @@ FdEntity* FdManager::Open(const char* path, off_t size, time_t time, bool force_ } // open - if(-1 == ent->Open(size, time)){ + if(-1 == ent->Open(pmeta, size, time)){ return NULL; } return ent; @@ -1240,24 +1848,21 @@ FdEntity* FdManager::Open(const char* path, off_t size, time_t time, bool force_ FdEntity* FdManager::ExistOpen(const char* path, int existfd) { - FPRNINFO("[path=%s][fd=%d]", SAFESTRPTR(path), existfd); + S3FS_PRN_DBG("[path=%s][fd=%d]", SAFESTRPTR(path), existfd); // search by real path - FdEntity* ent = Open(path, -1, -1, false, false); + FdEntity* ent = Open(path, NULL, -1, -1, false, false); if(!ent && -1 != existfd){ // search from all fdentity because of not using cache. AutoLock auto_lock(&FdManager::fd_manager_lock); - for(fdent_map_t::iterator iter = fent.begin(); iter != fent.end(); iter++){ + for(fdent_map_t::iterator iter = fent.begin(); iter != fent.end(); ++iter){ if((*iter).second && (*iter).second->GetFd() == existfd && (*iter).second->IsOpen()){ // found opend fd in map if(0 == strcmp((*iter).second->GetPath(), path)){ ent = (*iter).second; - // open - if(-1 == ent->Open(-1, -1)){ - return NULL; - } + ent->Dup(); }else{ // found fd, but it is used another file(file descriptor is recycled) // so returns NULL. @@ -1274,7 +1879,7 @@ void FdManager::Rename(const std::string &from, const std::string &to) fdent_map_t::iterator iter = fent.find(from); if(fent.end() != iter){ // found - FPRNINFO("[from=%s][to=%s]", from.c_str(), to.c_str()); + S3FS_PRN_DBG("[from=%s][to=%s]", from.c_str(), to.c_str()); FdEntity* ent = (*iter).second; fent.erase(iter); ent->SetPath(to); @@ -1284,11 +1889,11 @@ void FdManager::Rename(const std::string &from, const std::string &to) bool FdManager::Close(FdEntity* ent) { - FPRNINFO("[ent->file=%s][ent->fd=%d]", ent ? ent->GetPath() : "", ent ? ent->GetFd() : -1); + S3FS_PRN_DBG("[ent->file=%s][ent->fd=%d]", ent ? ent->GetPath() : "", ent ? ent->GetFd() : -1); AutoLock auto_lock(&FdManager::fd_manager_lock); - for(fdent_map_t::iterator iter = fent.begin(); iter != fent.end(); iter++){ + for(fdent_map_t::iterator iter = fent.begin(); iter != fent.end(); ++iter){ if((*iter).second == ent){ ent->Close(); if(!ent->IsOpen()){ @@ -1301,6 +1906,24 @@ bool FdManager::Close(FdEntity* ent) return false; } +bool FdManager::ChangeEntityToTempPath(FdEntity* ent, const char* path) +{ + AutoLock auto_lock(&FdManager::fd_manager_lock); + + for(fdent_map_t::iterator iter = fent.begin(); iter != fent.end(); ){ + if((*iter).second == ent){ + fent.erase(iter++); + + string tmppath(""); + FdManager::MakeRandomTempPath(path, tmppath); + fent[tmppath] = ent; + }else{ + ++iter; + } + } + return false; +} + /* * Local variables: * tab-width: 4 diff --git a/src/fdcache.h b/src/fdcache.h index d5ebd97..932f1cb 100644 --- a/src/fdcache.h +++ b/src/fdcache.h @@ -20,6 +20,9 @@ #ifndef FD_CACHE_H_ #define FD_CACHE_H_ +#include +#include "curl.h" + //------------------------------------------------ // CacheFileStat //------------------------------------------------ @@ -34,8 +37,9 @@ class CacheFileStat public: static bool DeleteCacheFileStat(const char* path); + static bool CheckCacheFileStatTopDir(void); - CacheFileStat(const char* tpath = NULL); + explicit CacheFileStat(const char* tpath = NULL); ~CacheFileStat(); bool Open(void); @@ -52,40 +56,49 @@ struct fdpage { off_t offset; size_t bytes; - bool init; + bool loaded; - fdpage(off_t start = 0, size_t size = 0, bool is_init = false) - : offset(start), bytes(size), init(is_init) {} + fdpage(off_t start = 0, size_t size = 0, bool is_loaded = false) + : offset(start), bytes(size), loaded(is_loaded) {} off_t next(void) const { return (offset + bytes); } off_t end(void) const { return (0 < bytes ? offset + bytes - 1 : 0); } }; typedef std::list fdpage_list_t; +class FdEntity; + // // Management of loading area/modifying // class PageList { + friend class FdEntity; // only one method access directly pages. + private: fdpage_list_t pages; private: void Clear(void); + bool Compress(void); + bool Parse(off_t new_pos); public: static void FreeList(fdpage_list_t& list); - PageList(off_t size = 0, bool is_init = false); + explicit PageList(size_t size = 0, bool is_loaded = false); ~PageList(); - off_t Size(void) const; - int Resize(off_t size, bool is_init); - int Init(off_t size, bool is_init); - bool IsInit(off_t start, off_t size); - bool SetInit(off_t start, off_t size, bool is_init = true); - bool FindUninitPage(off_t start, off_t& resstart, size_t& ressize); - int GetUninitPages(fdpage_list_t& uninit_list, off_t start = 0, off_t size = -1); + bool Init(size_t size, bool is_loaded); + size_t Size(void) const; + bool Resize(size_t size, bool is_loaded); + + bool IsPageLoaded(off_t start = 0, size_t size = 0) const; // size=0 is checking to end of list + bool SetPageLoadedStatus(off_t start, size_t size, bool is_loaded = true, bool is_compress = true); + bool FindUnloadedPage(off_t start, off_t& resstart, size_t& ressize) const; + size_t GetTotalUnloadedPageSize(off_t start = 0, size_t size = 0) const; // size=0 is checking to end of list + int GetUnloadedPages(fdpage_list_t& unloaded_list, off_t start = 0, size_t size = 0) const; // size=0 is checking to end of list + bool Serialize(CacheFileStat& file, bool is_output); void Dump(void); }; @@ -99,39 +112,61 @@ class FdEntity pthread_mutex_t fdent_lock; bool is_lock_init; PageList pagelist; - int refcnt; // reference count - std::string path; // object path - std::string cachepath; // local cache file path - int fd; // file descriptor(tmp file or cache file) - FILE* file; // file pointer(tmp file or cache file) - bool is_modify; // if file is changed, this flag is true + int refcnt; // reference count + std::string path; // object path + std::string cachepath; // local cache file path + // (if this is empty, does not load/save pagelist.) + int fd; // file descriptor(tmp file or cache file) + FILE* pfile; // file pointer(tmp file or cache file) + bool is_modify; // if file is changed, this flag is true + headers_t orgmeta; // original headers at opening + size_t size_orgmeta; // original file size in original headers + + std::string upload_id; // for no cached multipart uploading when no disk space + etaglist_t etaglist; // for no cached multipart uploading when no disk space + off_t mp_start; // start position for no cached multipart(write method only) + size_t mp_size; // size for no cached multipart(write method only) private: + static int FillFile(int fd, unsigned char byte, size_t size, off_t start); + void Clear(void); - int Dup(void); - bool SetAllStatus(bool is_enable); + bool SetAllStatus(bool is_loaded); // [NOTE] not locking + //bool SetAllStatusLoaded(void) { return SetAllStatus(true); } + bool SetAllStatusUnloaded(void) { return SetAllStatus(false); } public: - FdEntity(const char* tpath = NULL, const char* cpath = NULL); + explicit FdEntity(const char* tpath = NULL, const char* cpath = NULL); ~FdEntity(); void Close(void); bool IsOpen(void) const { return (-1 != fd); } - int Open(off_t size = -1, time_t time = -1); + int Open(headers_t* pmeta = NULL, ssize_t size = -1, time_t time = -1); + bool OpenAndLoadAll(headers_t* pmeta = NULL, size_t* size = NULL, bool force_load = false); + int Dup(void); + const char* GetPath(void) const { return path.c_str(); } void SetPath(const std::string &newpath) { path = newpath; } int GetFd(void) const { return fd; } - int SetMtime(time_t time); - bool GetSize(off_t& size); - bool GetMtime(time_t& time); - bool GetStats(struct stat& st); - bool SetAllEnable(void) { return SetAllStatus(true); } - bool SetAllDisable(void) { return SetAllStatus(false); } - bool LoadFull(off_t* size = NULL, bool force_load = false); - int Load(off_t start, off_t size); - int RowFlush(const char* tpath, headers_t& meta, bool force_sync = false); - int Flush(headers_t& meta, bool force_sync = false) { return RowFlush(NULL, meta, force_sync); } + bool GetStats(struct stat& st); + int SetMtime(time_t time); + bool UpdateMtime(void); + bool GetSize(size_t& size); + bool SetMode(mode_t mode); + bool SetUId(uid_t uid); + bool SetGId(gid_t gid); + bool SetContentType(const char* path); + + int Load(off_t start = 0, size_t size = 0); // size=0 means loading to end + int NoCacheLoadAndPost(off_t start = 0, size_t size = 0); // size=0 means loading to end + int NoCachePreMultipartPost(void); + int NoCacheMultipartPost(int tgfd, off_t start, size_t size); + int NoCacheCompleteMultipartPost(void); + + int RowFlush(const char* tpath, bool force_sync = false); + int Flush(bool force_sync = false) { return RowFlush(NULL, force_sync); } + ssize_t Read(char* bytes, off_t start, size_t size, bool force_load = false); ssize_t Write(const char* bytes, off_t start, size_t size); }; @@ -147,9 +182,12 @@ class FdManager static pthread_mutex_t fd_manager_lock; static bool is_lock_init; static std::string cache_dir; - static size_t page_size; + static size_t free_disk_space; // limit free disk space - fdent_map_t fent; + fdent_map_t fent; + + private: + static fsblkcnt_t GetFreeDiskSpace(const char* path); public: FdManager(); @@ -163,16 +201,21 @@ class FdManager static bool SetCacheDir(const char* dir); static bool IsCacheDir(void) { return (0 < FdManager::cache_dir.size()); } static const char* GetCacheDir(void) { return FdManager::cache_dir.c_str(); } - static size_t SetPageSize(size_t size); - static size_t GetPageSize(void) { return FdManager::page_size; } static bool MakeCachePath(const char* path, std::string& cache_path, bool is_create_dir = true); + static bool CheckCacheTopDir(void); static bool MakeRandomTempPath(const char* path, std::string& tmppath); + static size_t GetEnsureFreeDiskSpace(void) { return FdManager::free_disk_space; } + static size_t SetEnsureFreeDiskSpace(size_t size); + static size_t InitEnsureFreeDiskSpace(void) { return SetEnsureFreeDiskSpace(0); } + static bool IsSafeDiskSpace(const char* path, size_t size); + FdEntity* GetFdEntity(const char* path, int existfd = -1); - FdEntity* Open(const char* path, off_t size = -1, time_t time = -1, bool force_tmpfile = false, bool is_create = true); + FdEntity* Open(const char* path, headers_t* pmeta = NULL, ssize_t size = -1, time_t time = -1, bool force_tmpfile = false, bool is_create = true); FdEntity* ExistOpen(const char* path, int existfd = -1); void Rename(const std::string &from, const std::string &to); bool Close(FdEntity* ent); + bool ChangeEntityToTempPath(FdEntity* ent, const char* path); }; #endif // FD_CACHE_H_ diff --git a/src/gnutls_auth.cpp b/src/gnutls_auth.cpp index 9ffe958..dd6ec59 100644 --- a/src/gnutls_auth.cpp +++ b/src/gnutls_auth.cpp @@ -217,7 +217,7 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size) break; }else if(-1 == bytes){ // error - DPRNNN("file read error(%d)", errno); + S3FS_PRN_ERR("file read error(%d)", errno); return NULL; } md5_update(&ctx_md5, bytes, buf); @@ -261,7 +261,7 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size) memset(buf, 0, 512); if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_md5, GCRY_MD_MD5, 0))){ - DPRNN("MD5 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err)); + S3FS_PRN_ERR("MD5 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err)); return NULL; } @@ -273,7 +273,7 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size) break; }else if(-1 == bytes){ // error - DPRNNN("file read error(%d)", errno); + S3FS_PRN_ERR("file read error(%d)", errno); return NULL; } gcry_md_write(ctx_md5, buf, bytes); @@ -344,7 +344,7 @@ unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size) break; }else if(-1 == bytes){ // error - DPRNNN("file read error(%d)", errno); + S3FS_PRN_ERR("file read error(%d)", errno); return NULL; } sha256_update(&ctx_sha256, bytes, buf); @@ -375,7 +375,7 @@ bool s3fs_sha256(const unsigned char* data, unsigned int datalen, unsigned char* gcry_md_hd_t ctx_sha256; gcry_error_t err; if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_sha256, GCRY_MD_SHA256, 0))){ - DPRNN("SHA256 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err)); + S3FS_PRN_ERR("SHA256 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err)); free(*digest); return false; } @@ -409,7 +409,7 @@ unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size) memset(buf, 0, 512); if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_sha256, GCRY_MD_SHA256, 0))){ - DPRNN("SHA256 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err)); + S3FS_PRN_ERR("SHA256 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err)); return NULL; } @@ -421,7 +421,7 @@ unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size) break; }else if(-1 == bytes){ // error - DPRNNN("file read error(%d)", errno); + S3FS_PRN_ERR("file read error(%d)", errno); return NULL; } gcry_md_write(ctx_sha256, buf, bytes); diff --git a/src/nss_auth.cpp b/src/nss_auth.cpp index 4fb11a5..5a3e744 100644 --- a/src/nss_auth.cpp +++ b/src/nss_auth.cpp @@ -182,7 +182,7 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size) break; }else if(-1 == bytes){ // error - DPRNNN("file read error(%d)", errno); + S3FS_PRN_ERR("file read error(%d)", errno); return NULL; } PK11_DigestOp(md5ctx, buf, bytes); @@ -262,7 +262,8 @@ unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size) break; }else if(-1 == bytes){ // error - DPRNNN("file read error(%d)", errno); + S3FS_PRN_ERR("file read error(%d)", errno); + PK11_DestroyContext(sha256ctx, PR_TRUE); return NULL; } PK11_DigestOp(sha256ctx, buf, bytes); diff --git a/src/openssl_auth.cpp b/src/openssl_auth.cpp index 2fbe8a0..80c8283 100644 --- a/src/openssl_auth.cpp +++ b/src/openssl_auth.cpp @@ -105,7 +105,7 @@ static struct CRYPTO_dynlock_value* s3fs_dyn_crypt_mutex(const char* file, int l struct CRYPTO_dynlock_value* dyndata; if(NULL == (dyndata = static_cast(malloc(sizeof(struct CRYPTO_dynlock_value))))){ - DPRNCRIT("Could not allocate memory for CRYPTO_dynlock_value"); + S3FS_PRN_CRIT("Could not allocate memory for CRYPTO_dynlock_value"); return NULL; } pthread_mutex_init(&(dyndata->dyn_mutex), NULL); @@ -134,14 +134,14 @@ static void s3fs_destroy_dyn_crypt_mutex(struct CRYPTO_dynlock_value* dyndata, c bool s3fs_init_crypt_mutex(void) { if(s3fs_crypt_mutex){ - FPRNNN("s3fs_crypt_mutex is not NULL, destroy it."); + S3FS_PRN_DBG("s3fs_crypt_mutex is not NULL, destroy it."); if(!s3fs_destroy_crypt_mutex()){ - DPRN("Failed to s3fs_crypt_mutex"); + S3FS_PRN_ERR("Failed to s3fs_crypt_mutex"); return false; } } if(NULL == (s3fs_crypt_mutex = static_cast(malloc(CRYPTO_num_locks() * sizeof(pthread_mutex_t))))){ - DPRNCRIT("Could not allocate memory for s3fs_crypt_mutex"); + S3FS_PRN_CRIT("Could not allocate memory for s3fs_crypt_mutex"); return false; } for(int cnt = 0; cnt < CRYPTO_num_locks(); cnt++){ @@ -250,7 +250,7 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size) break; }else if(-1 == bytes){ // error - DPRNNN("file read error(%d)", errno); + S3FS_PRN_ERR("file read error(%d)", errno); return NULL; } MD5_Update(&md5ctx, buf, bytes); @@ -297,10 +297,8 @@ bool s3fs_sha256(const unsigned char* data, unsigned int datalen, unsigned char* unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size) { - const EVP_MD* md = EVP_get_digestbyname("sha256"); - EVP_MD_CTX* sha256ctx = EVP_MD_CTX_create(); - EVP_DigestInit_ex(sha256ctx, md, NULL); - + const EVP_MD* md = EVP_get_digestbyname("sha256"); + EVP_MD_CTX* sha256ctx; char buf[512]; ssize_t bytes; unsigned char* result; @@ -318,6 +316,9 @@ unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size) return NULL; } + sha256ctx = EVP_MD_CTX_create(); + EVP_DigestInit_ex(sha256ctx, md, NULL); + memset(buf, 0, 512); for(ssize_t total = 0; total < size; total += bytes){ bytes = 512 < (size - total) ? 512 : (size - total); @@ -327,13 +328,15 @@ unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size) break; }else if(-1 == bytes){ // error - DPRNNN("file read error(%d)", errno); + S3FS_PRN_ERR("file read error(%d)", errno); + EVP_MD_CTX_destroy(sha256ctx); return NULL; } EVP_DigestUpdate(sha256ctx, buf, bytes); memset(buf, 0, 512); } if(NULL == (result = (unsigned char*)malloc(get_sha256_digest_length()))){ + EVP_MD_CTX_destroy(sha256ctx); return NULL; } EVP_DigestFinal_ex(sha256ctx, result, NULL); diff --git a/src/s3fs.cpp b/src/s3fs.cpp index 87a6e9b..62ba947 100644 --- a/src/s3fs.cpp +++ b/src/s3fs.cpp @@ -35,6 +35,7 @@ #include #include #include +#include #include #include @@ -84,9 +85,7 @@ typedef std::list uncomp_mp_list_t; //------------------------------------------------------------------- // Global valiables //------------------------------------------------------------------- -bool debug = false; bool foreground = false; -bool foreground2 = false; bool nomultipart = false; bool pathrequeststyle = false; bool is_specified_endpoint = false; @@ -95,6 +94,8 @@ std::string service_path = "/"; std::string host = "http://s3.amazonaws.com"; std::string bucket = ""; std::string endpoint = "us-east-1"; +s3fs_log_level debug_level = S3FS_LOG_CRIT; +const char* s3fs_log_nest[S3FS_LOG_NEST_MAX] = {"", " ", " ", " "}; //------------------------------------------------------------------- // Static valiables @@ -120,10 +121,15 @@ static bool is_s3fs_gid = false;// default does not set. static bool is_s3fs_umask = false;// default does not set. static bool is_remove_cache = false; static bool create_bucket = false; +static int64_t singlepart_copy_limit = FIVE_GB; //------------------------------------------------------------------- // Static functions : prototype //------------------------------------------------------------------- +static void s3fs_usr2_handler(int sig); +static bool set_s3fs_usr2_handler(void); +static s3fs_log_level set_s3fs_log_level(s3fs_log_level level); +static s3fs_log_level bumpup_s3fs_log_level(void); static bool is_special_name_folder_object(const char* path); static int chk_dir_object_type(const char* path, string& newpath, string& nowpath, string& nowcache, headers_t* pmeta = NULL, int* pDirType = NULL); static int get_object_attribute(const char* path, struct stat* pstbuf, headers_t* pmeta = NULL, bool overcheck = true, bool* pisforce = NULL); @@ -214,16 +220,59 @@ static int s3fs_removexattr(const char* path, const char* name); //------------------------------------------------------------------- // Functions //------------------------------------------------------------------- +static void s3fs_usr2_handler(int sig) +{ + if(SIGUSR2 == sig){ + bumpup_s3fs_log_level(); + } +} +static bool set_s3fs_usr2_handler(void) +{ + struct sigaction sa; + + memset(&sa, 0, sizeof(struct sigaction)); + sa.sa_handler = s3fs_usr2_handler; + sa.sa_flags = SA_RESTART; + if(0 != sigaction(SIGUSR2, &sa, NULL)){ + return false; + } + return true; +} + +static s3fs_log_level set_s3fs_log_level(s3fs_log_level level) +{ + if(level == debug_level){ + return debug_level; + } + s3fs_log_level old = debug_level; + debug_level = level; + setlogmask(LOG_UPTO(S3FS_LOG_LEVEL_TO_SYSLOG(debug_level))); + S3FS_PRN_CRIT("change debug level from %sto %s", S3FS_LOG_LEVEL_STRING(old), S3FS_LOG_LEVEL_STRING(debug_level)); + return old; +} + +static s3fs_log_level bumpup_s3fs_log_level(void) +{ + s3fs_log_level old = debug_level; + debug_level = ( S3FS_LOG_CRIT == debug_level ? S3FS_LOG_ERR : + S3FS_LOG_ERR == debug_level ? S3FS_LOG_WARN : + S3FS_LOG_WARN == debug_level ? S3FS_LOG_INFO : + S3FS_LOG_INFO == debug_level ? S3FS_LOG_DBG : + S3FS_LOG_CRIT ); + setlogmask(LOG_UPTO(S3FS_LOG_LEVEL_TO_SYSLOG(debug_level))); + S3FS_PRN_CRIT("change debug level from %sto %s", S3FS_LOG_LEVEL_STRING(old), S3FS_LOG_LEVEL_STRING(debug_level)); + return old; +} + static bool is_special_name_folder_object(const char* path) { - string strpath = path; - headers_t header; - if(!path || '\0' == path[0]){ return false; } - strpath = path; + string strpath = path; + headers_t header; + if(string::npos == strpath.find("_$folder$", 0)){ if('/' == strpath[strpath.length() - 1]){ strpath = strpath.substr(0, strpath.length() - 1); @@ -350,7 +399,7 @@ static int get_object_attribute(const char* path, struct stat* pstbuf, headers_t bool forcedir = false; string::size_type Pos; - FPRNINFO("[path=%s]", path); + S3FS_PRN_DBG("[path=%s]", path); if(!path || '\0' == path[0]){ return -ENOENT; @@ -459,20 +508,20 @@ static int get_object_attribute(const char* path, struct stat* pstbuf, headers_t if(0 != StatCache::getStatCacheData()->GetCacheSize()){ // add into stat cache if(!StatCache::getStatCacheData()->AddStat(strpath, (*pheader), forcedir)){ - DPRN("failed adding stat cache [path=%s]", strpath.c_str()); + S3FS_PRN_ERR("failed adding stat cache [path=%s]", strpath.c_str()); return -ENOENT; } if(!StatCache::getStatCacheData()->GetStat(strpath, pstat, pheader, overcheck, pisforce)){ // There is not in cache.(why?) -> retry to convert. if(!convert_header_to_stat(strpath.c_str(), (*pheader), pstat, forcedir)){ - DPRN("failed convert headers to stat[path=%s]", strpath.c_str()); + S3FS_PRN_ERR("failed convert headers to stat[path=%s]", strpath.c_str()); return -ENOENT; } } }else{ // cache size is Zero -> only convert. if(!convert_header_to_stat(strpath.c_str(), (*pheader), pstat, forcedir)){ - DPRN("failed convert headers to stat[path=%s]", strpath.c_str()); + S3FS_PRN_ERR("failed convert headers to stat[path=%s]", strpath.c_str()); return -ENOENT; } } @@ -496,7 +545,7 @@ static int check_object_access(const char* path, int mask, struct stat* pstbuf) struct stat* pst = (pstbuf ? pstbuf : &st); struct fuse_context* pcxt; - FPRNINFO("[path=%s]", path); + S3FS_PRN_DBG("[path=%s]", path); if(NULL == (pcxt = fuse_get_context())){ return -EIO; @@ -570,7 +619,7 @@ static int check_object_owner(const char* path, struct stat* pstbuf) struct stat* pst = (pstbuf ? pstbuf : &st); struct fuse_context* pcxt; - FPRNINFO("[path=%s]", path); + S3FS_PRN_DBG("[path=%s]", path); if(NULL == (pcxt = fuse_get_context())){ return -EIO; @@ -602,14 +651,14 @@ static int check_parent_object_access(const char* path, int mask) string parent; int result; - FPRNINFO("[path=%s]", path); + S3FS_PRN_DBG("[path=%s]", path); if(0 == strcmp(path, "/") || 0 == strcmp(path, ".")){ // path is mount point. return 0; } if(X_OK == (mask & X_OK)){ - for(parent = mydirname(path); 0 < parent.size(); parent = mydirname(parent.c_str())){ + for(parent = mydirname(path); 0 < parent.size(); parent = mydirname(parent)){ if(parent == "."){ parent = "/"; } @@ -635,37 +684,46 @@ static int check_parent_object_access(const char* path, int mask) } // -// This function is global, is called fom curl class(GetObject). +// ssevalue is MD5 for SSE-C type, or KMS id for SSE-KMS // -char* get_object_sseckey_md5(const char* path) +bool get_object_sse_type(const char* path, sse_type_t& ssetype, string& ssevalue) { if(!path){ - return NULL; + return false; } + headers_t meta; - if(0 != get_object_attribute(path, NULL, &meta)){ - DPRNNN("Failed to get object(%s) headers", path); - return NULL; + S3FS_PRN_ERR("Failed to get object(%s) headers", path); + return false; } + ssetype = SSE_DISABLE; + ssevalue.erase(); for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){ - string key = (*iter).first; - if(0 == strcasecmp(key.c_str(), "x-amz-server-side-encryption-customer-key-md5")){ - return strdup((*iter).second.c_str()); + string key = (*iter).first; + if(0 == strcasecmp(key.c_str(), "x-amz-server-side-encryption") && 0 == strcasecmp((*iter).second.c_str(), "AES256")){ + ssetype = SSE_S3; + }else if(0 == strcasecmp(key.c_str(), "x-amz-server-side-encryption-aws-kms-key-id")){ + ssetype = SSE_KMS; + ssevalue = (*iter).second; + }else if(0 == strcasecmp(key.c_str(), "x-amz-server-side-encryption-customer-key-md5")){ + ssetype = SSE_C; + ssevalue = (*iter).second; } } - return NULL; + return true; } static FdEntity* get_local_fent(const char* path, bool is_load) { struct stat stobj; FdEntity* ent; + headers_t meta; - FPRNNN("[path=%s]", path); + S3FS_PRN_INFO2("[path=%s]", path); - if(0 != get_object_attribute(path, &stobj)){ + if(0 != get_object_attribute(path, &stobj, &meta)){ return NULL; } @@ -673,17 +731,16 @@ static FdEntity* get_local_fent(const char* path, bool is_load) time_t mtime = (!S_ISREG(stobj.st_mode) || S_ISLNK(stobj.st_mode)) ? -1 : stobj.st_mtime; bool force_tmpfile = S_ISREG(stobj.st_mode) ? false : true; - if(NULL == (ent = FdManager::get()->Open(path, stobj.st_size, mtime, force_tmpfile, true))){ - DPRN("Coult not open file. errno(%d)", errno); + if(NULL == (ent = FdManager::get()->Open(path, &meta, static_cast(stobj.st_size), mtime, force_tmpfile, true))){ + S3FS_PRN_ERR("Coult not open file. errno(%d)", errno); return NULL; } // load - if(is_load && !ent->LoadFull()){ - DPRN("Coult not load file. errno(%d)", errno); + if(is_load && !ent->OpenAndLoadAll(&meta)){ + S3FS_PRN_ERR("Coult not load file. errno(%d)", errno); FdManager::get()->Close(ent); return NULL; } - return ent; } @@ -698,12 +755,12 @@ static int put_headers(const char* path, headers_t& meta, bool is_copy) S3fsCurl s3fscurl(true); struct stat buf; - FPRNNN("[path=%s]", path); + S3FS_PRN_INFO2("[path=%s]", path); // files larger than 5GB must be modified via the multipart interface // *** If there is not target object(a case of move command), // get_object_attribute() returns error with initilizing buf. - get_object_attribute(path, &buf); + (void)get_object_attribute(path, &buf); if(buf.st_size >= FIVE_GB){ // multipart @@ -721,7 +778,7 @@ static int put_headers(const char* path, headers_t& meta, bool is_copy) // no opened fd if(FdManager::get()->IsCacheDir()){ // create cache file if be needed - ent = FdManager::get()->Open(path, buf.st_size, -1, false, true); + ent = FdManager::get()->Open(path, &meta, static_cast(buf.st_size), -1, false, true); } } if(ent){ @@ -737,7 +794,7 @@ static int s3fs_getattr(const char* path, struct stat* stbuf) { int result; - FPRN("[path=%s]", path); + S3FS_PRN_INFO("[path=%s]", path); // check parent directory attribute. if(0 != (result = check_parent_object_access(path, X_OK))){ @@ -750,6 +807,7 @@ static int s3fs_getattr(const char* path, struct stat* stbuf) // (See: Issue 241) if(stbuf){ FdEntity* ent; + if(NULL != (ent = FdManager::get()->ExistOpen(path))){ struct stat tmpstbuf; if(ent->GetStats(tmpstbuf)){ @@ -760,7 +818,7 @@ static int s3fs_getattr(const char* path, struct stat* stbuf) stbuf->st_blksize = 4096; stbuf->st_blocks = get_blocks(stbuf->st_size); } - FPRNINFO("[path=%s] uid=%u, gid=%u, mode=%04o", path, (unsigned int)(stbuf->st_uid), (unsigned int)(stbuf->st_gid), stbuf->st_mode); + S3FS_PRN_DBG("[path=%s] uid=%u, gid=%u, mode=%04o", path, (unsigned int)(stbuf->st_uid), (unsigned int)(stbuf->st_gid), stbuf->st_mode); S3FS_MALLOCTRIM(0); return result; @@ -774,23 +832,23 @@ static int s3fs_readlink(const char* path, char* buf, size_t size) // Open FdEntity* ent; if(NULL == (ent = get_local_fent(path))){ - DPRN("could not get fent(file=%s)", path); + S3FS_PRN_ERR("could not get fent(file=%s)", path); return -EIO; } // Get size - off_t readsize; + size_t readsize; if(!ent->GetSize(readsize)){ - DPRN("could not get file size(file=%s)", path); + S3FS_PRN_ERR("could not get file size(file=%s)", path); FdManager::get()->Close(ent); return -EIO; } - if(static_cast(size) <= readsize){ + if(size <= readsize){ readsize = size - 1; } // Read ssize_t ressize; - if(0 > (ressize = ent->Read(buf, 0, static_cast(readsize)))){ - DPRN("could not read file(file=%s, errno=%zd)", path, ressize); + if(0 > (ressize = ent->Read(buf, 0, readsize))){ + S3FS_PRN_ERR("could not read file(file=%s, errno=%zd)", path, ressize); FdManager::get()->Close(ent); return static_cast(ressize); } @@ -804,7 +862,7 @@ static int s3fs_readlink(const char* path, char* buf, size_t size) static int do_create_bucket(void) { - FPRNNN("/"); + S3FS_PRN_INFO2("/"); headers_t meta; @@ -813,8 +871,7 @@ static int do_create_bucket(void) if(res < 0){ // fd=-1 means for creating zero byte object. long responseCode = s3fscurl.GetLastResponseCode(); if((responseCode == 400 || responseCode == 403) && S3fsCurl::IsSignatureV4()){ - LOWSYSLOGPRINT(LOG_ERR, "Could not connect, so retry to connect by signature version 2."); - FPRN("Could not connect, so retry to connect by signature version 2."); + S3FS_PRN_ERR("Could not connect, so retry to connect by signature version 2."); S3fsCurl::SetSignatureV4(false); // retry to check @@ -828,7 +885,7 @@ static int do_create_bucket(void) // common function for creation of a plain object static int create_file_object(const char* path, mode_t mode, uid_t uid, gid_t gid) { - FPRNNN("[path=%s][mode=%04o]", path, mode); + S3FS_PRN_INFO2("[path=%s][mode=%04o]", path, mode); headers_t meta; meta["Content-Type"] = S3fsCurl::LookupMimeType(string(path)); @@ -844,17 +901,16 @@ static int create_file_object(const char* path, mode_t mode, uid_t uid, gid_t gi static int s3fs_mknod(const char *path, mode_t mode, dev_t rdev) { int result; - headers_t meta; struct fuse_context* pcxt; - FPRN("[path=%s][mode=%04o][dev=%ju]", path, mode, (uintmax_t)rdev); + S3FS_PRN_INFO("[path=%s][mode=%04o][dev=%ju]", path, mode, (uintmax_t)rdev); if(NULL == (pcxt = fuse_get_context())){ return -EIO; } if(0 != (result = create_file_object(path, mode, pcxt->uid, pcxt->gid))){ - DPRN("could not create object for special file(result=%d)", result); + S3FS_PRN_ERR("could not create object for special file(result=%d)", result); return result; } StatCache::getStatCacheData()->DelStat(path); @@ -866,10 +922,9 @@ static int s3fs_mknod(const char *path, mode_t mode, dev_t rdev) static int s3fs_create(const char* path, mode_t mode, struct fuse_file_info* fi) { int result; - headers_t meta; struct fuse_context* pcxt; - FPRN("[path=%s][mode=%04o][flags=%d]", path, mode, fi->flags); + S3FS_PRN_INFO("[path=%s][mode=%04o][flags=%d]", path, mode, fi->flags); if(NULL == (pcxt = fuse_get_context())){ return -EIO; @@ -893,8 +948,11 @@ static int s3fs_create(const char* path, mode_t mode, struct fuse_file_info* fi) return result; } - FdEntity* ent; - if(NULL == (ent = FdManager::get()->Open(path, 0, -1, false, true))){ + + FdEntity* ent; + headers_t meta; + get_object_attribute(path, NULL, &meta); + if(NULL == (ent = FdManager::get()->Open(path, &meta, 0, -1, false, true))){ return -EIO; } fi->fh = ent->GetFd(); @@ -905,7 +963,7 @@ static int s3fs_create(const char* path, mode_t mode, struct fuse_file_info* fi) static int create_directory_object(const char* path, mode_t mode, time_t time, uid_t uid, gid_t gid) { - FPRNN("[path=%s][mode=%04o][time=%jd][uid=%u][gid=%u]", path, mode, (intmax_t)time, (unsigned int)uid, (unsigned int)gid); + S3FS_PRN_INFO1("[path=%s][mode=%04o][time=%jd][uid=%u][gid=%u]", path, mode, (intmax_t)time, (unsigned int)uid, (unsigned int)gid); if(!path || '\0' == path[0]){ return -1; @@ -931,7 +989,7 @@ static int s3fs_mkdir(const char* path, mode_t mode) int result; struct fuse_context* pcxt; - FPRN("[path=%s][mode=%04o]", path, mode); + S3FS_PRN_INFO("[path=%s][mode=%04o]", path, mode); if(NULL == (pcxt = fuse_get_context())){ return -EIO; @@ -959,7 +1017,7 @@ static int s3fs_unlink(const char* path) { int result; - FPRN("[path=%s]", path); + S3FS_PRN_INFO("[path=%s]", path); if(0 != (result = check_parent_object_access(path, W_OK | X_OK))){ return result; @@ -979,7 +1037,7 @@ static int directory_empty(const char* path) S3ObjList head; if((result = list_bucket(path, head, "/", true)) != 0){ - DPRNNN("list_bucket returns error."); + S3FS_PRN_ERR("list_bucket returns error."); return result; } if(!head.IsEmpty()){ @@ -994,7 +1052,7 @@ static int s3fs_rmdir(const char* path) string strpath; struct stat stbuf; - FPRN("[path=%s]", path); + S3FS_PRN_INFO("[path=%s]", path); if(0 != (result = check_parent_object_access(path, W_OK | X_OK))){ return result; @@ -1049,7 +1107,7 @@ static int s3fs_symlink(const char* from, const char* to) int result; struct fuse_context* pcxt; - FPRN("[from=%s][to=%s]", from, to); + S3FS_PRN_INFO("[from=%s][to=%s]", from, to); if(NULL == (pcxt = fuse_get_context())){ return -EIO; @@ -1073,20 +1131,20 @@ static int s3fs_symlink(const char* from, const char* to) // open tmpfile FdEntity* ent; - if(NULL == (ent = FdManager::get()->Open(to, 0, -1, true, true))){ - DPRN("could not open tmpfile(errno=%d)", errno); + if(NULL == (ent = FdManager::get()->Open(to, &headers, 0, -1, true, true))){ + S3FS_PRN_ERR("could not open tmpfile(errno=%d)", errno); return -errno; } // write ssize_t from_size = strlen(from); if(from_size != ent->Write(from, 0, from_size)){ - DPRN("could not write tmpfile(errno=%d)", errno); + S3FS_PRN_ERR("could not write tmpfile(errno=%d)", errno); FdManager::get()->Close(ent); return -errno; } // upload - if(0 != (result = ent->Flush(headers, true))){ - DPRN("could not upload tmpfile(result=%d)", result); + if(0 != (result = ent->Flush(true))){ + S3FS_PRN_WARN("could not upload tmpfile(result=%d)", result); } FdManager::get()->Close(ent); @@ -1102,7 +1160,7 @@ static int rename_object(const char* from, const char* to) string s3_realpath; headers_t meta; - FPRNN("[from=%s][to=%s]", from , to); + S3FS_PRN_INFO1("[from=%s][to=%s]", from , to); if(0 != (result = check_parent_object_access(to, W_OK | X_OK))){ // not permmit writing "to" object parent dir. @@ -1135,10 +1193,9 @@ static int rename_object(const char* from, const char* to) static int rename_object_nocopy(const char* from, const char* to) { - int result; - headers_t meta; + int result; - FPRNN("[from=%s][to=%s]", from , to); + S3FS_PRN_INFO1("[from=%s][to=%s]", from , to); if(0 != (result = check_parent_object_access(to, W_OK | X_OK))){ // not permmit writing "to" object parent dir. @@ -1149,24 +1206,22 @@ static int rename_object_nocopy(const char* from, const char* to) return result; } - // Get attributes - if(0 != (result = get_object_attribute(from, NULL, &meta))){ - return result; - } - - // Set header - meta["Content-Type"] = S3fsCurl::LookupMimeType(string(to)); - // open & load FdEntity* ent; if(NULL == (ent = get_local_fent(from, true))){ - DPRN("could not open and read file(%s)", from); + S3FS_PRN_ERR("could not open and read file(%s)", from); + return -EIO; + } + + // Set header + if(!ent->SetContentType(to)){ + S3FS_PRN_ERR("could not set content-type for %s", to); return -EIO; } // upload - if(0 != (result = ent->RowFlush(to, meta, true))){ - DPRN("could not upload file(%s): result=%d", to, result); + if(0 != (result = ent->RowFlush(to, true))){ + S3FS_PRN_ERR("could not upload file(%s): result=%d", to, result); FdManager::get()->Close(ent); return result; } @@ -1188,7 +1243,7 @@ static int rename_large_object(const char* from, const char* to) struct stat buf; headers_t meta; - FPRNN("[from=%s][to=%s]", from , to); + S3FS_PRN_INFO1("[from=%s][to=%s]", from , to); if(0 != (result = check_parent_object_access(to, W_OK | X_OK))){ // not permmit writing "to" object parent dir. @@ -1217,7 +1272,7 @@ static int clone_directory_object(const char* from, const char* to) int result = -1; struct stat stbuf; - FPRNN("[from=%s][to=%s]", from, to); + S3FS_PRN_INFO1("[from=%s][to=%s]", from, to); // get target's attributes if(0 != (result = get_object_attribute(from, &stbuf))){ @@ -1247,7 +1302,7 @@ static int rename_directory(const char* from, const char* to) int result; bool is_dir; - FPRNN("[from=%s][to=%s]", from, to); + S3FS_PRN_INFO1("[from=%s][to=%s]", from, to); // // Initiate and Add base directory into MVNODE struct. @@ -1273,14 +1328,14 @@ static int rename_directory(const char* from, const char* to) // No delimiter is specified, the result(head) is all object keys. // (CommonPrefixes is empty, but all object is listed in Key.) if(0 != (result = list_bucket(basepath.c_str(), head, NULL))){ - DPRNNN("list_bucket returns error."); + S3FS_PRN_ERR("list_bucket returns error."); return result; } head.GetNameList(headlist); // get name without "/". S3ObjList::MakeHierarchizedList(headlist, false); // add hierarchized dir. s3obj_list_t::const_iterator liter; - for(liter = headlist.begin(); headlist.end() != liter; liter++){ + for(liter = headlist.begin(); headlist.end() != liter; ++liter){ // make "from" and "to" object name. string from_name = basepath + (*liter); string to_name = strto + (*liter); @@ -1289,13 +1344,13 @@ static int rename_directory(const char* from, const char* to) // Check subdirectory. StatCache::getStatCacheData()->HasStat(from_name, etag.c_str()); // Check ETag if(0 != get_object_attribute(from_name.c_str(), &stbuf, NULL)){ - DPRNNN("failed to get %s object attribute.", from_name.c_str()); + S3FS_PRN_WARN("failed to get %s object attribute.", from_name.c_str()); continue; } if(S_ISDIR(stbuf.st_mode)){ is_dir = true; if(0 != chk_dir_object_type(from_name.c_str(), newpath, from_name, nowcache, NULL, &DirType) || DIRTYPE_UNKNOWN == DirType){ - DPRNNN("failed to get %s%s object directory type.", basepath.c_str(), (*liter).c_str()); + S3FS_PRN_WARN("failed to get %s%s object directory type.", basepath.c_str(), (*liter).c_str()); continue; } if(DIRTYPE_NOOBJ != DirType){ @@ -1322,7 +1377,7 @@ static int rename_directory(const char* from, const char* to) for(mn_cur = mn_head; mn_cur; mn_cur = mn_cur->next){ if(mn_cur->is_dir && mn_cur->old_path && '\0' != mn_cur->old_path[0]){ if(0 != (result = clone_directory_object(mn_cur->old_path, mn_cur->new_path))){ - DPRN("clone_directory_object returned an error(%d)", result); + S3FS_PRN_ERR("clone_directory_object returned an error(%d)", result); free_mvnodes(mn_head); return -EIO; } @@ -1339,7 +1394,7 @@ static int rename_directory(const char* from, const char* to) result = rename_object_nocopy(mn_cur->old_path, mn_cur->new_path); } if(0 != result){ - DPRN("rename_object returned an error(%d)", result); + S3FS_PRN_ERR("rename_object returned an error(%d)", result); free_mvnodes(mn_head); return -EIO; } @@ -1351,7 +1406,7 @@ static int rename_directory(const char* from, const char* to) if(mn_cur->is_dir && mn_cur->old_path && '\0' != mn_cur->old_path[0]){ if(!(mn_cur->is_normdir)){ if(0 != (result = s3fs_rmdir(mn_cur->old_path))){ - DPRN("s3fs_rmdir returned an error(%d)", result); + S3FS_PRN_ERR("s3fs_rmdir returned an error(%d)", result); free_mvnodes(mn_head); return -EIO; } @@ -1371,7 +1426,7 @@ static int s3fs_rename(const char* from, const char* to) struct stat buf; int result; - FPRN("[from=%s][to=%s]", from, to); + S3FS_PRN_INFO("[from=%s][to=%s]", from, to); if(0 != (result = check_parent_object_access(to, W_OK | X_OK))){ // not permmit writing "to" object parent dir. @@ -1388,7 +1443,7 @@ static int s3fs_rename(const char* from, const char* to) // files larger than 5GB must be modified via the multipart interface if(S_ISDIR(buf.st_mode)){ result = rename_directory(from, to); - }else if(!nomultipart && buf.st_size >= FIVE_GB){ + }else if(!nomultipart && buf.st_size >= singlepart_copy_limit){ result = rename_large_object(from, to); }else{ if(!nocopyapi && !norenameapi){ @@ -1404,7 +1459,7 @@ static int s3fs_rename(const char* from, const char* to) static int s3fs_link(const char* from, const char* to) { - FPRN("[from=%s][to=%s]", from, to); + S3FS_PRN_INFO("[from=%s][to=%s]", from, to); return -EPERM; } @@ -1418,10 +1473,10 @@ static int s3fs_chmod(const char* path, mode_t mode) struct stat stbuf; int nDirType = DIRTYPE_UNKNOWN; - FPRN("[path=%s][mode=%04o]", path, mode); + S3FS_PRN_INFO("[path=%s][mode=%04o]", path, mode); if(0 == strcmp(path, "/")){ - DPRNNN("Could not change mode for maount point."); + S3FS_PRN_ERR("Could not change mode for mount point."); return -EIO; } if(0 != (result = check_parent_object_access(path, X_OK))){ @@ -1477,18 +1532,17 @@ static int s3fs_chmod(const char* path, mode_t mode) static int s3fs_chmod_nocopy(const char* path, mode_t mode) { - int result; - string strpath; - string newpath; - string nowcache; - headers_t meta; + int result; + string strpath; + string newpath; + string nowcache; struct stat stbuf; - int nDirType = DIRTYPE_UNKNOWN; + int nDirType = DIRTYPE_UNKNOWN; - FPRNN("[path=%s][mode=%04o]", path, mode); + S3FS_PRN_INFO1("[path=%s][mode=%04o]", path, mode); if(0 == strcmp(path, "/")){ - DPRNNN("Could not change mode for maount point."); + S3FS_PRN_ERR("Could not change mode for maount point."); return -EIO; } if(0 != (result = check_parent_object_access(path, X_OK))){ @@ -1500,11 +1554,11 @@ static int s3fs_chmod_nocopy(const char* path, mode_t mode) // Get attributes if(S_ISDIR(stbuf.st_mode)){ - result = chk_dir_object_type(path, newpath, strpath, nowcache, &meta, &nDirType); + result = chk_dir_object_type(path, newpath, strpath, nowcache, NULL, &nDirType); }else{ strpath = path; nowcache = strpath; - result = get_object_attribute(strpath.c_str(), NULL, &meta); + result = get_object_attribute(strpath.c_str(), NULL, NULL); } if(0 != result){ return result; @@ -1530,19 +1584,19 @@ static int s3fs_chmod_nocopy(const char* path, mode_t mode) }else{ // normal object or directory object of newer version - // Change file mode - meta["x-amz-meta-mode"] = str(mode); - // open & load FdEntity* ent; if(NULL == (ent = get_local_fent(strpath.c_str(), true))){ - DPRN("could not open and read file(%s)", strpath.c_str()); + S3FS_PRN_ERR("could not open and read file(%s)", strpath.c_str()); return -EIO; } + // Change file mode + ent->SetMode(mode); + // upload - if(0 != (result = ent->Flush(meta, true))){ - DPRN("could not upload file(%s): result=%d", strpath.c_str(), result); + if(0 != (result = ent->Flush(true))){ + S3FS_PRN_ERR("could not upload file(%s): result=%d", strpath.c_str(), result); FdManager::get()->Close(ent); return result; } @@ -1565,10 +1619,10 @@ static int s3fs_chown(const char* path, uid_t uid, gid_t gid) struct stat stbuf; int nDirType = DIRTYPE_UNKNOWN; - FPRN("[path=%s][uid=%u][gid=%u]", path, (unsigned int)uid, (unsigned int)gid); + S3FS_PRN_INFO("[path=%s][uid=%u][gid=%u]", path, (unsigned int)uid, (unsigned int)gid); if(0 == strcmp(path, "/")){ - DPRNNN("Could not change owner for maount point."); + S3FS_PRN_ERR("Could not change owner for maount point."); return -EIO; } if(0 != (result = check_parent_object_access(path, X_OK))){ @@ -1639,18 +1693,17 @@ static int s3fs_chown(const char* path, uid_t uid, gid_t gid) static int s3fs_chown_nocopy(const char* path, uid_t uid, gid_t gid) { - int result; - string strpath; - string newpath; - string nowcache; - headers_t meta; + int result; + string strpath; + string newpath; + string nowcache; struct stat stbuf; - int nDirType = DIRTYPE_UNKNOWN; + int nDirType = DIRTYPE_UNKNOWN; - FPRNN("[path=%s][uid=%u][gid=%u]", path, (unsigned int)uid, (unsigned int)gid); + S3FS_PRN_INFO1("[path=%s][uid=%u][gid=%u]", path, (unsigned int)uid, (unsigned int)gid); if(0 == strcmp(path, "/")){ - DPRNNN("Could not change owner for maount point."); + S3FS_PRN_ERR("Could not change owner for maount point."); return -EIO; } if(0 != (result = check_parent_object_access(path, X_OK))){ @@ -1662,11 +1715,11 @@ static int s3fs_chown_nocopy(const char* path, uid_t uid, gid_t gid) // Get attributes if(S_ISDIR(stbuf.st_mode)){ - result = chk_dir_object_type(path, newpath, strpath, nowcache, &meta, &nDirType); + result = chk_dir_object_type(path, newpath, strpath, nowcache, NULL, &nDirType); }else{ strpath = path; nowcache = strpath; - result = get_object_attribute(strpath.c_str(), NULL, &meta); + result = get_object_attribute(strpath.c_str(), NULL, NULL); } if(0 != result){ return result; @@ -1701,20 +1754,20 @@ static int s3fs_chown_nocopy(const char* path, uid_t uid, gid_t gid) }else{ // normal object or directory object of newer version - // Change owner - meta["x-amz-meta-uid"] = str(uid); - meta["x-amz-meta-gid"] = str(gid); - // open & load FdEntity* ent; if(NULL == (ent = get_local_fent(strpath.c_str(), true))){ - DPRN("could not open and read file(%s)", strpath.c_str()); + S3FS_PRN_ERR("could not open and read file(%s)", strpath.c_str()); return -EIO; } + // Change owner + ent->SetUId(uid); + ent->SetGId(gid); + // upload - if(0 != (result = ent->Flush(meta, true))){ - DPRN("could not upload file(%s): result=%d", strpath.c_str(), result); + if(0 != (result = ent->Flush(true))){ + S3FS_PRN_ERR("could not upload file(%s): result=%d", strpath.c_str(), result); FdManager::get()->Close(ent); return result; } @@ -1737,10 +1790,10 @@ static int s3fs_utimens(const char* path, const struct timespec ts[2]) struct stat stbuf; int nDirType = DIRTYPE_UNKNOWN; - FPRN("[path=%s][mtime=%jd]", path, (intmax_t)(ts[1].tv_sec)); + S3FS_PRN_INFO("[path=%s][mtime=%jd]", path, (intmax_t)(ts[1].tv_sec)); if(0 == strcmp(path, "/")){ - DPRNNN("Could not change mtime for maount point."); + S3FS_PRN_ERR("Could not change mtime for maount point."); return -EIO; } if(0 != (result = check_parent_object_access(path, X_OK))){ @@ -1797,18 +1850,17 @@ static int s3fs_utimens(const char* path, const struct timespec ts[2]) static int s3fs_utimens_nocopy(const char* path, const struct timespec ts[2]) { - int result; - string strpath; - string newpath; - string nowcache; - headers_t meta; + int result; + string strpath; + string newpath; + string nowcache; struct stat stbuf; - int nDirType = DIRTYPE_UNKNOWN; + int nDirType = DIRTYPE_UNKNOWN; - FPRNN("[path=%s][mtime=%s]", path, str(ts[1].tv_sec).c_str()); + S3FS_PRN_INFO1("[path=%s][mtime=%s]", path, str(ts[1].tv_sec).c_str()); if(0 == strcmp(path, "/")){ - DPRNNN("Could not change mtime for mount point."); + S3FS_PRN_ERR("Could not change mtime for mount point."); return -EIO; } if(0 != (result = check_parent_object_access(path, X_OK))){ @@ -1822,11 +1874,11 @@ static int s3fs_utimens_nocopy(const char* path, const struct timespec ts[2]) // Get attributes if(S_ISDIR(stbuf.st_mode)){ - result = chk_dir_object_type(path, newpath, strpath, nowcache, &meta, &nDirType); + result = chk_dir_object_type(path, newpath, strpath, nowcache, NULL, &nDirType); }else{ strpath = path; nowcache = strpath; - result = get_object_attribute(strpath.c_str(), NULL, &meta); + result = get_object_attribute(strpath.c_str(), NULL, NULL); } if(0 != result){ return result; @@ -1852,26 +1904,23 @@ static int s3fs_utimens_nocopy(const char* path, const struct timespec ts[2]) }else{ // normal object or directory object of newer version - // Change date - meta["x-amz-meta-mtime"] = str(ts[1].tv_sec); - // open & load FdEntity* ent; if(NULL == (ent = get_local_fent(strpath.c_str(), true))){ - DPRN("could not open and read file(%s)", strpath.c_str()); + S3FS_PRN_ERR("could not open and read file(%s)", strpath.c_str()); return -EIO; } // set mtime if(0 != (result = ent->SetMtime(ts[1].tv_sec))){ - DPRN("could not set mtime to file(%s): result=%d", strpath.c_str(), result); + S3FS_PRN_ERR("could not set mtime to file(%s): result=%d", strpath.c_str(), result); FdManager::get()->Close(ent); return result; } // upload - if(0 != (result = ent->Flush(meta, true))){ - DPRN("could not upload file(%s): result=%d", strpath.c_str(), result); + if(0 != (result = ent->Flush(true))){ + S3FS_PRN_ERR("could not upload file(%s): result=%d", strpath.c_str(), result); FdManager::get()->Close(ent); return result; } @@ -1890,7 +1939,11 @@ static int s3fs_truncate(const char* path, off_t size) headers_t meta; FdEntity* ent = NULL; - FPRN("[path=%s][size=%jd]", path, (intmax_t)size); + S3FS_PRN_INFO("[path=%s][size=%jd]", path, (intmax_t)size); + + if(size < 0){ + size = 0; + } if(0 != (result = check_parent_object_access(path, X_OK))){ return result; @@ -1902,27 +1955,38 @@ static int s3fs_truncate(const char* path, off_t size) // Get file information if(0 == (result = get_object_attribute(path, NULL, &meta))){ // Exists -> Get file(with size) - if(NULL == (ent = FdManager::get()->Open(path, size, -1, false, true))){ - DPRN("could not open file(%s): errno=%d", path, errno); + if(NULL == (ent = FdManager::get()->Open(path, &meta, static_cast(size), -1, false, true))){ + S3FS_PRN_ERR("could not open file(%s): errno=%d", path, errno); return -EIO; } - if(0 != (result = ent->Load(0, size))){ - DPRN("could not download file(%s): result=%d", path, result); + if(0 != (result = ent->Load(0, static_cast(size)))){ + S3FS_PRN_ERR("could not download file(%s): result=%d", path, result); FdManager::get()->Close(ent); return result; } }else{ // Not found -> Make tmpfile(with size) - if(NULL == (ent = FdManager::get()->Open(path, size, -1, true, true))){ - DPRN("could not open file(%s): errno=%d", path, errno); + + struct fuse_context* pcxt; + if(NULL == (pcxt = fuse_get_context())){ + return -EIO; + } + meta["Content-Type"] = string("application/octet-stream"); // Static + meta["x-amz-meta-mode"] = str(S_IFLNK | S_IRWXU | S_IRWXG | S_IRWXO); + meta["x-amz-meta-mtime"] = str(time(NULL)); + meta["x-amz-meta-uid"] = str(pcxt->uid); + meta["x-amz-meta-gid"] = str(pcxt->gid); + + if(NULL == (ent = FdManager::get()->Open(path, &meta, static_cast(size), -1, true, true))){ + S3FS_PRN_ERR("could not open file(%s): errno=%d", path, errno); return -EIO; } } // upload - if(0 != (result = ent->Flush(meta, true))){ - DPRN("could not upload file(%s): result=%d", path, result); + if(0 != (result = ent->Flush(true))){ + S3FS_PRN_ERR("could not upload file(%s): result=%d", path, result); FdManager::get()->Close(ent); return result; } @@ -1937,10 +2001,10 @@ static int s3fs_truncate(const char* path, off_t size) static int s3fs_open(const char* path, struct fuse_file_info* fi) { int result; - headers_t meta; struct stat st; + bool needs_flush = false; - FPRN("[path=%s][flags=%d]", path, fi->flags); + S3FS_PRN_INFO("[path=%s][flags=%d]", path, fi->flags); // clear stat for reading fresh stat. // (if object stat is changed, we refresh it. then s3fs gets always @@ -1962,16 +2026,30 @@ static int s3fs_open(const char* path, struct fuse_file_info* fi) } if((unsigned int)fi->flags & O_TRUNC){ - st.st_size = 0; + if(0 != st.st_size){ + st.st_size = 0; + needs_flush = true; + } } if(!S_ISREG(st.st_mode) || S_ISLNK(st.st_mode)){ st.st_mtime = -1; } - FdEntity* ent; - if(NULL == (ent = FdManager::get()->Open(path, st.st_size, st.st_mtime, false, true))){ + FdEntity* ent; + headers_t meta; + get_object_attribute(path, NULL, &meta); + if(NULL == (ent = FdManager::get()->Open(path, &meta, static_cast(st.st_size), st.st_mtime, false, true))){ return -EIO; } + + if (needs_flush){ + if(0 != (result = ent->RowFlush(path, true))){ + S3FS_PRN_ERR("could not upload file(%s): result=%d", path, result); + FdManager::get()->Close(ent); + return result; + } + } + fi->fh = ent->GetFd(); S3FS_MALLOCTRIM(0); @@ -1982,27 +2060,27 @@ static int s3fs_read(const char* path, char* buf, size_t size, off_t offset, str { ssize_t res; - FPRNINFO("[path=%s][size=%zu][offset=%jd][fd=%llu]", path, size, (intmax_t)offset, (unsigned long long)(fi->fh)); + S3FS_PRN_DBG("[path=%s][size=%zu][offset=%jd][fd=%llu]", path, size, (intmax_t)offset, (unsigned long long)(fi->fh)); FdEntity* ent; if(NULL == (ent = FdManager::get()->ExistOpen(path, static_cast(fi->fh)))){ - DPRN("could not find opened fd(%s)", path); + S3FS_PRN_ERR("could not find opened fd(%s)", path); return -EIO; } if(ent->GetFd() != static_cast(fi->fh)){ - DPRNNN("Warning - different fd(%d - %llu)", ent->GetFd(), (unsigned long long)(fi->fh)); + S3FS_PRN_WARN("different fd(%d - %llu)", ent->GetFd(), (unsigned long long)(fi->fh)); } // check real file size - off_t realsize = 0; - if(!ent->GetSize(realsize) || 0 >= realsize){ - DPRNINFO("file size is 0, so break to read."); + size_t realsize = 0; + if(!ent->GetSize(realsize) || realsize <= 0){ + S3FS_PRN_ERR("file size is 0, so break to read."); FdManager::get()->Close(ent); return 0; } if(0 > (res = ent->Read(buf, offset, size, false))){ - DPRN("failed to read file(%s). result=%zd", path, res); + S3FS_PRN_WARN("failed to read file(%s). result=%zd", path, res); } FdManager::get()->Close(ent); @@ -2013,18 +2091,18 @@ static int s3fs_write(const char* path, const char* buf, size_t size, off_t offs { ssize_t res; - FPRNINFO("[path=%s][size=%zu][offset=%jd][fd=%llu]", path, size, (intmax_t)offset, (unsigned long long)(fi->fh)); + S3FS_PRN_DBG("[path=%s][size=%zu][offset=%jd][fd=%llu]", path, size, (intmax_t)offset, (unsigned long long)(fi->fh)); FdEntity* ent; if(NULL == (ent = FdManager::get()->ExistOpen(path, static_cast(fi->fh)))){ - DPRN("could not find opened fd(%s)", path); + S3FS_PRN_ERR("could not find opened fd(%s)", path); return -EIO; } if(ent->GetFd() != static_cast(fi->fh)){ - DPRNNN("Warning - different fd(%d - %llu)", ent->GetFd(), (unsigned long long)(fi->fh)); + S3FS_PRN_WARN("different fd(%d - %llu)", ent->GetFd(), (unsigned long long)(fi->fh)); } if(0 > (res = ent->Write(buf, offset, size))){ - DPRN("failed to write file(%s). result=%zd", path, res); + S3FS_PRN_WARN("failed to write file(%s). result=%zd", path, res); } FdManager::get()->Close(ent); @@ -2046,7 +2124,7 @@ static int s3fs_flush(const char* path, struct fuse_file_info* fi) { int result; - FPRN("[path=%s][fd=%llu]", path, (unsigned long long)(fi->fh)); + S3FS_PRN_INFO("[path=%s][fd=%llu]", path, (unsigned long long)(fi->fh)); int mask = (O_RDONLY != (fi->flags & O_ACCMODE) ? W_OK : R_OK); if(0 != (result = check_parent_object_access(path, X_OK))){ @@ -2063,20 +2141,8 @@ static int s3fs_flush(const char* path, struct fuse_file_info* fi) FdEntity* ent; if(NULL != (ent = FdManager::get()->ExistOpen(path, static_cast(fi->fh)))){ - headers_t meta; - if(0 != (result = get_object_attribute(path, NULL, &meta))){ - FdManager::get()->Close(ent); - return result; - } - - // If both mtime are not same, force to change mtime based on fd. - time_t ent_mtime; - if(ent->GetMtime(ent_mtime)){ - if(str(ent_mtime) != meta["x-amz-meta-mtime"]){ - meta["x-amz-meta-mtime"] = str(ent_mtime); - } - } - result = ent->Flush(meta, false); + ent->UpdateMtime(); + result = ent->Flush(false); FdManager::get()->Close(ent); } S3FS_MALLOCTRIM(0); @@ -2091,24 +2157,14 @@ static int s3fs_fsync(const char* path, int datasync, struct fuse_file_info* fi) { int result = 0; - FPRN("[path=%s][fd=%llu]", path, (unsigned long long)(fi->fh)); + S3FS_PRN_INFO("[path=%s][fd=%llu]", path, (unsigned long long)(fi->fh)); FdEntity* ent; if(NULL != (ent = FdManager::get()->ExistOpen(path, static_cast(fi->fh)))){ - headers_t meta; - if(0 != (result = get_object_attribute(path, NULL, &meta))){ - FdManager::get()->Close(ent); - return result; + if(0 == datasync){ + ent->UpdateMtime(); } - - // If datasync is not zero, only flush data without meta updating. - time_t ent_mtime; - if(ent->GetMtime(ent_mtime)){ - if(0 == datasync && str(ent_mtime) != meta["x-amz-meta-mtime"]){ - meta["x-amz-meta-mtime"] = str(ent_mtime); - } - } - result = ent->Flush(meta, false); + result = ent->Flush(false); FdManager::get()->Close(ent); } S3FS_MALLOCTRIM(0); @@ -2118,7 +2174,7 @@ static int s3fs_fsync(const char* path, int datasync, struct fuse_file_info* fi) static int s3fs_release(const char* path, struct fuse_file_info* fi) { - FPRN("[path=%s][fd=%llu]", path, (unsigned long long)(fi->fh)); + S3FS_PRN_INFO("[path=%s][fd=%llu]", path, (unsigned long long)(fi->fh)); // [NOTICE] // At first, we remove stats cache. @@ -2132,18 +2188,18 @@ static int s3fs_release(const char* path, struct fuse_file_info* fi) FdEntity* ent; if(NULL == (ent = FdManager::get()->GetFdEntity(path, static_cast(fi->fh)))){ - DPRN("could not find fd(file=%s)", path); + S3FS_PRN_ERR("could not find fd(file=%s)", path); return -EIO; } if(ent->GetFd() != static_cast(fi->fh)){ - DPRNNN("Warning - different fd(%d - %llu)", ent->GetFd(), (unsigned long long)(fi->fh)); + S3FS_PRN_WARN("different fd(%d - %llu)", ent->GetFd(), (unsigned long long)(fi->fh)); } FdManager::get()->Close(ent); // check - for debug - if(debug){ + if(IS_S3FS_LOG_DBG()){ if(NULL != (ent = FdManager::get()->GetFdEntity(path, static_cast(fi->fh)))){ - DPRNNN("Warning - file(%s),fd(%d) is still opened.", path, ent->GetFd()); + S3FS_PRN_WARN("file(%s),fd(%d) is still opened.", path, ent->GetFd()); } } S3FS_MALLOCTRIM(0); @@ -2156,7 +2212,7 @@ static int s3fs_opendir(const char* path, struct fuse_file_info* fi) int result; int mask = (O_RDONLY != (fi->flags & O_ACCMODE) ? W_OK : R_OK) | X_OK; - FPRN("[path=%s][flags=%d]", path, fi->flags); + S3FS_PRN_INFO("[path=%s][flags=%d]", path, fi->flags); if(0 == (result = check_object_access(path, mask, NULL))){ result = check_parent_object_access(path, mask); @@ -2173,7 +2229,7 @@ static bool multi_head_callback(S3fsCurl* s3fscurl) } string saved_path = s3fscurl->GetSpacialSavedPath(); if(!StatCache::getStatCacheData()->AddStat(saved_path, *(s3fscurl->GetResponseHeaders()))){ - DPRN("failed adding stat cache [path=%s]", saved_path.c_str()); + S3FS_PRN_ERR("failed adding stat cache [path=%s]", saved_path.c_str()); return false; } return true; @@ -2184,23 +2240,19 @@ static S3fsCurl* multi_head_retry_callback(S3fsCurl* s3fscurl) if(!s3fscurl){ return NULL; } - int ssec_key_pos = s3fscurl->GetLastPreHeadSeecKeyPos(); - int next_retry_count = s3fscurl->GetMultipartRetryCount() + 1; + int ssec_key_pos= s3fscurl->GetLastPreHeadSeecKeyPos(); + int retry_count = s3fscurl->GetMultipartRetryCount(); - if(s3fscurl->IsOverMultipartRetryCount()){ - if(S3fsCurl::IsSseCustomMode()){ - // If sse-c mode, start check not sse-c(ssec_key_pos = -1). - // do increment ssec_key_pos for checking all sse-c key. - next_retry_count = 0; - ssec_key_pos++; - if(S3fsCurl::GetSseKeyCount() <= ssec_key_pos){ - DPRN("Over retry count(%d) limit(%s).", s3fscurl->GetMultipartRetryCount(), s3fscurl->GetSpacialSavedPath().c_str()); - return NULL; - } - }else{ - DPRN("Over retry count(%d) limit(%s).", s3fscurl->GetMultipartRetryCount(), s3fscurl->GetSpacialSavedPath().c_str()); + // retry next sse key. + // if end of sse key, set retry master count is up. + ssec_key_pos = (ssec_key_pos < 0 ? 0 : ssec_key_pos + 1); + if(0 == S3fsCurl::GetSseKeyCount() || S3fsCurl::GetSseKeyCount() <= ssec_key_pos){ + if(s3fscurl->IsOverMultipartRetryCount()){ + S3FS_PRN_ERR("Over retry count(%d) limit(%s).", s3fscurl->GetMultipartRetryCount(), s3fscurl->GetSpacialSavedPath().c_str()); return NULL; } + ssec_key_pos= -1; + retry_count++; } S3fsCurl* newcurl = new S3fsCurl(s3fscurl->IsUseAhbe()); @@ -2209,11 +2261,11 @@ static S3fsCurl* multi_head_retry_callback(S3fsCurl* s3fscurl) string saved_path = s3fscurl->GetSpacialSavedPath(); if(!newcurl->PreHeadRequest(path, base_path, saved_path, ssec_key_pos)){ - DPRN("Could not duplicate curl object(%s).", saved_path.c_str()); + S3FS_PRN_ERR("Could not duplicate curl object(%s).", saved_path.c_str()); delete newcurl; return NULL; } - newcurl->SetMultipartRetryCount(next_retry_count); + newcurl->SetMultipartRetryCount(retry_count); return newcurl; } @@ -2225,7 +2277,7 @@ static int readdir_multi_head(const char* path, S3ObjList& head, void* buf, fuse s3obj_list_t fillerlist; int result = 0; - FPRNN("[path=%s][list=%zu]", path, headlist.size()); + S3FS_PRN_INFO1("[path=%s][list=%zu]", path, headlist.size()); // Make base path list. head.GetNameList(headlist, true, false); // get name with "/". @@ -2235,7 +2287,7 @@ static int readdir_multi_head(const char* path, S3ObjList& head, void* buf, fuse curlmulti.SetRetryCallback(multi_head_retry_callback); // Loop - while(0 < headlist.size()){ + while(!headlist.empty()){ s3obj_list_t::iterator iter; long cnt; @@ -2255,17 +2307,17 @@ static int readdir_multi_head(const char* path, S3ObjList& head, void* buf, fuse continue; } - // First check for directory, start checking "not sse-c". - // If checking failed, retry to check with "sse-c" by retry callback func when sse-c mode. + // First check for directory, start checking "not SSE-C". + // If checking failed, retry to check with "SSE-C" by retry callback func when SSE-C mode. S3fsCurl* s3fscurl = new S3fsCurl(); if(!s3fscurl->PreHeadRequest(disppath, (*iter), disppath)){ // target path = cache key path.(ex "dir/") - DPRNNN("Could not make curl object for head request(%s).", disppath.c_str()); + S3FS_PRN_WARN("Could not make curl object for head request(%s).", disppath.c_str()); delete s3fscurl; continue; } if(!curlmulti.SetS3fsCurlObject(s3fscurl)){ - DPRNNN("Could not make curl object into multi curl(%s).", disppath.c_str()); + S3FS_PRN_WARN("Could not make curl object into multi curl(%s).", disppath.c_str()); delete s3fscurl; continue; } @@ -2274,20 +2326,28 @@ static int readdir_multi_head(const char* path, S3ObjList& head, void* buf, fuse // Multi request if(0 != (result = curlmulti.Request())){ - DPRN("error occuered in multi request(errno=%d).", result); - break; + // If result is -EIO, it is somthing error occurred. + // This case includes that the object is encrypting(SSE) and s3fs does not have keys. + // So s3fs set result to 0 in order to continue the process. + if(-EIO == result){ + S3FS_PRN_WARN("error occuered in multi request(errno=%d), but continue...", result); + result = 0; + }else{ + S3FS_PRN_ERR("error occuered in multi request(errno=%d).", result); + break; + } } // populate fuse buffer // here is best posision, because a case is cache size < files in directory // - for(iter = fillerlist.begin(); fillerlist.end() != iter; iter++){ + for(iter = fillerlist.begin(); fillerlist.end() != iter; ++iter){ struct stat st; string bpath = mybasename((*iter)); if(StatCache::getStatCacheData()->GetStat((*iter), &st)){ filler(buf, bpath.c_str(), &st, 0); }else{ - FPRNNN("Could not find %s file in stat cache.", (*iter).c_str()); + S3FS_PRN_INFO2("Could not find %s file in stat cache.", (*iter).c_str()); filler(buf, bpath.c_str(), 0, 0); } } @@ -2301,10 +2361,9 @@ static int readdir_multi_head(const char* path, S3ObjList& head, void* buf, fuse static int s3fs_readdir(const char* path, void* buf, fuse_fill_dir_t filler, off_t offset, struct fuse_file_info* fi) { S3ObjList head; - s3obj_list_t headlist; int result; - FPRN("[path=%s]", path); + S3FS_PRN_INFO("[path=%s]", path); if(0 != (result = check_object_access(path, X_OK, NULL))){ return result; @@ -2312,7 +2371,7 @@ static int s3fs_readdir(const char* path, void* buf, fuse_fill_dir_t filler, off // get a list of all the objects if((result = list_bucket(path, head, "/")) != 0){ - DPRN("list_bucket returns error(%d).", result); + S3FS_PRN_ERR("list_bucket returns error(%d).", result); return result; } @@ -2329,7 +2388,7 @@ static int s3fs_readdir(const char* path, void* buf, fuse_fill_dir_t filler, off strpath += "/"; } if(0 != (result = readdir_multi_head(strpath.c_str(), head, buf, filler))){ - DPRN("readdir_multi_head returns error(%d).", result); + S3FS_PRN_ERR("readdir_multi_head returns error(%d).", result); } S3FS_MALLOCTRIM(0); @@ -2349,7 +2408,7 @@ static int list_bucket(const char* path, S3ObjList& head, const char* delimiter, xmlDocPtr doc; BodyData* body; - FPRNN("[path=%s]", path); + S3FS_PRN_INFO1("[path=%s]", path); if(delimiter && 0 < strlen(delimiter)){ query_delimiter += "delimiter="; @@ -2382,18 +2441,18 @@ static int list_bucket(const char* path, S3ObjList& head, const char* delimiter, // request if(0 != (result = s3fscurl.ListBucketRequest(path, each_query.c_str()))){ - DPRN("ListBucketRequest returns with error."); + S3FS_PRN_ERR("ListBucketRequest returns with error."); return result; } body = s3fscurl.GetBodyData(); // xmlDocPtr if(NULL == (doc = xmlReadMemory(body->str(), static_cast(body->size()), "", NULL, 0))){ - DPRN("xmlReadMemory returns with error."); + S3FS_PRN_ERR("xmlReadMemory returns with error."); return -1; } if(0 != append_objects_from_xml(path, doc, head)){ - DPRN("append_objects_from_xml returns with error."); + S3FS_PRN_ERR("append_objects_from_xml returns with error."); xmlFreeDoc(doc); return -1; } @@ -2408,7 +2467,7 @@ static int list_bucket(const char* path, S3ObjList& head, const char* delimiter, // string lastname; if(!head.GetLastName(lastname)){ - DPRN("Could not find next marker, thus break loop."); + S3FS_PRN_WARN("Could not find next marker, thus break loop."); truncated = false; }else{ next_marker = s3_realpath.substr(1); @@ -2441,11 +2500,11 @@ static int append_objects_from_xml_ex(const char* path, xmlDocPtr doc, xmlXPathC xmlNodeSetPtr content_nodes; if(NULL == (contents_xp = xmlXPathEvalExpression((xmlChar*)ex_contents, ctx))){ - DPRNNN("xmlXPathEvalExpression returns null."); + S3FS_PRN_ERR("xmlXPathEvalExpression returns null."); return -1; } if(xmlXPathNodeSetIsEmpty(contents_xp->nodesetval)){ - DPRNNN("contents_xp->nodesetval is empty."); + S3FS_PRN_WARN("contents_xp->nodesetval is empty."); S3FS_XMLXPATHFREEOBJECT(contents_xp); return 0; } @@ -2460,11 +2519,11 @@ static int append_objects_from_xml_ex(const char* path, xmlDocPtr doc, xmlXPathC // object name xmlXPathObjectPtr key; if(NULL == (key = xmlXPathEvalExpression((xmlChar*)ex_key, ctx))){ - DPRNNN("key is null. but continue."); + S3FS_PRN_WARN("key is null. but continue."); continue; } if(xmlXPathNodeSetIsEmpty(key->nodesetval)){ - DPRNNN("node is empty. but continue."); + S3FS_PRN_WARN("node is empty. but continue."); xmlXPathFreeObject(key); continue; } @@ -2472,7 +2531,7 @@ static int append_objects_from_xml_ex(const char* path, xmlDocPtr doc, xmlXPathC char* name = get_object_name(doc, key_nodes->nodeTab[0]->xmlChildrenNode, path); if(!name){ - DPRNNN("name is something wrong. but continue."); + S3FS_PRN_WARN("name is something wrong. but continue."); }else if((const char*)name != c_strErrorObjectName){ is_dir = isCPrefix ? true : false; @@ -2483,7 +2542,7 @@ static int append_objects_from_xml_ex(const char* path, xmlDocPtr doc, xmlXPathC xmlXPathObjectPtr ETag; if(NULL != (ETag = xmlXPathEvalExpression((xmlChar*)ex_etag, ctx))){ if(xmlXPathNodeSetIsEmpty(ETag->nodesetval)){ - DPRNNN("ETag->nodesetval is empty."); + S3FS_PRN_INFO("ETag->nodesetval is empty."); }else{ xmlNodeSetPtr etag_nodes = ETag->nodesetval; xmlChar* petag = xmlNodeListGetString(doc, etag_nodes->nodeTab[0]->xmlChildrenNode, 1); @@ -2496,7 +2555,7 @@ static int append_objects_from_xml_ex(const char* path, xmlDocPtr doc, xmlXPathC } } if(!head.insert(name, (0 < stretag.length() ? stretag.c_str() : NULL), is_dir)){ - DPRN("insert_object returns with error."); + S3FS_PRN_ERR("insert_object returns with error."); xmlXPathFreeObject(key); xmlXPathFreeObject(contents_xp); free(name); @@ -2505,7 +2564,7 @@ static int append_objects_from_xml_ex(const char* path, xmlDocPtr doc, xmlXPathC } free(name); }else{ - DPRNINFO("name is file or subdir in dir. but continue."); + S3FS_PRN_WARN("name is file or subdir in dir. but continue."); } xmlXPathFreeObject(key); } @@ -2584,7 +2643,7 @@ static int append_objects_from_xml(const char* path, xmlDocPtr doc, S3ObjList& h if(-1 == append_objects_from_xml_ex(prefix.c_str(), doc, ctx, ex_contents.c_str(), ex_key.c_str(), ex_etag.c_str(), 0, head) || -1 == append_objects_from_xml_ex(prefix.c_str(), doc, ctx, ex_cprefix.c_str(), ex_prefix.c_str(), NULL, 1, head) ) { - DPRN("append_objects_from_xml_ex returns with error."); + S3FS_PRN_ERR("append_objects_from_xml_ex returns with error."); S3FS_XMLXPATHFREECONTEXT(ctx); return -1; } @@ -2615,7 +2674,7 @@ static xmlChar* get_base_exp(xmlDocPtr doc, const char* exp) return NULL; } if(xmlXPathNodeSetIsEmpty(marker_xp->nodesetval)){ - DPRNNN("marker_xp->nodesetval is empty."); + S3FS_PRN_ERR("marker_xp->nodesetval is empty."); xmlXPathFreeObject(marker_xp); xmlXPathFreeContext(ctx); return NULL; @@ -2662,7 +2721,7 @@ static char* get_object_name(xmlDocPtr doc, xmlNodePtr node, const char* path) // Get full path xmlChar* fullpath = xmlNodeListGetString(doc, node, 1); if(!fullpath){ - DPRN("could not get object full path name.."); + S3FS_PRN_ERR("could not get object full path name.."); return NULL; } // basepath(path) is as same as fullpath. @@ -2672,7 +2731,6 @@ static char* get_object_name(xmlDocPtr doc, xmlNodePtr node, const char* path) } // Make dir path and filename - string strfullpath= (char*)fullpath; string strdirpath = mydirname(string((char*)fullpath)); string strmybpath = mybasename(string((char*)fullpath)); const char* dirpath = strdirpath.c_str(); @@ -2727,7 +2785,7 @@ static int remote_mountpath_exists(const char* path) { struct stat stbuf; - FPRNN("[path=%s]", path); + S3FS_PRN_INFO1("[path=%s]", path); // getattr will prefix the path with the remote mountpoint if(0 != get_object_attribute("/", &stbuf, NULL)){ @@ -2755,14 +2813,14 @@ static bool parse_xattr_keyval(const std::string& xattrpair, string& key, PXATTR size_t pos; string tmpval; if(string::npos == (pos = xattrpair.find_first_of(":"))){ - DPRNNN("one of xattr pair(%s) is wrong format.", xattrpair.c_str()); + S3FS_PRN_ERR("one of xattr pair(%s) is wrong format.", xattrpair.c_str()); return false; } key = xattrpair.substr(0, pos); tmpval = xattrpair.substr(pos + 1); if(!takeout_str_dquart(key) || !takeout_str_dquart(tmpval)){ - DPRNNN("one of xattr pair(%s) is wrong format.", xattrpair.c_str()); + S3FS_PRN_ERR("one of xattr pair(%s) is wrong format.", xattrpair.c_str()); return false; } @@ -2789,7 +2847,7 @@ static size_t parse_xattrs(const std::string& strxattrs, xattrs_t& xattrs) endpos = jsonxattrs.find_last_of("}"); } if(startpos == string::npos || endpos == string::npos || endpos <= startpos){ - DPRNNN("xattr header(%s) is not json format.", jsonxattrs.c_str()); + S3FS_PRN_WARN("xattr header(%s) is not json format.", jsonxattrs.c_str()); return 0; } restxattrs = jsonxattrs.substr(startpos + 1, endpos - (startpos + 1)); @@ -2845,7 +2903,8 @@ static int set_xattrs_to_header(headers_t& meta, const char* name, const char* v string strxattrs; xattrs_t xattrs; - if(meta.end() == meta.find("x-amz-meta-xattr")){ + headers_t::iterator iter; + if(meta.end() == (iter = meta.find("x-amz-meta-xattr"))){ if(XATTR_REPLACE == (flags & XATTR_REPLACE)){ // there is no xattr header but flags is replace, so failure. return -ENOATTR; @@ -2855,16 +2914,17 @@ static int set_xattrs_to_header(headers_t& meta, const char* name, const char* v // found xattr header but flags is only creating, so failure. return -EEXIST; } - strxattrs = meta["x-amz-meta-xattr"]; + strxattrs = iter->second; } // get map as xattrs_t parse_xattrs(strxattrs, xattrs); // add name(do not care overwrite and empty name/value) - if(xattrs.end() != xattrs.find(string(name))){ + xattrs_t::iterator xiter; + if(xattrs.end() != (xiter = xattrs.find(string(name)))){ // found same head. free value. - delete xattrs[string(name)]; + delete xiter->second; } PXATTRVAL pval = new XATTRVAL; @@ -2895,10 +2955,10 @@ static int s3fs_setxattr(const char* path, const char* name, const char* value, static int s3fs_setxattr(const char* path, const char* name, const char* value, size_t size, int flags) #endif { - FPRN("[path=%s][name=%s][value=%p][size=%zu][flags=%d]", path, name, value, size, flags); + S3FS_PRN_INFO("[path=%s][name=%s][value=%p][size=%zu][flags=%d]", path, name, value, size, flags); if((value && 0 == size) || (!value && 0 < size)){ - DPRN("Wrong parameter: value(%p), size(%zu)", value, size); + S3FS_PRN_ERR("Wrong parameter: value(%p), size(%zu)", value, size); return 0; } @@ -2918,7 +2978,7 @@ static int s3fs_setxattr(const char* path, const char* name, const char* value, int nDirType = DIRTYPE_UNKNOWN; if(0 == strcmp(path, "/")){ - DPRNNN("Could not change mode for mount point."); + S3FS_PRN_ERR("Could not change mode for mount point."); return -EIO; } if(0 != (result = check_parent_object_access(path, X_OK))){ @@ -2985,7 +3045,7 @@ static int s3fs_getxattr(const char* path, const char* name, char* value, size_t static int s3fs_getxattr(const char* path, const char* name, char* value, size_t size) #endif { - FPRN("[path=%s][name=%s][value=%p][size=%zu]", path, name, value, size); + S3FS_PRN_INFO("[path=%s][name=%s][value=%p][size=%zu]", path, name, value, size); if(!path || !name){ return -EIO; @@ -3056,7 +3116,7 @@ static int s3fs_getxattr(const char* path, const char* name, char* value, size_t static int s3fs_listxattr(const char* path, char* list, size_t size) { - FPRN("[path=%s][list=%p][size=%zu]", path, list, size); + S3FS_PRN_INFO("[path=%s][list=%p][size=%zu]", path, list, size); if(!path){ return -EIO; @@ -3077,11 +3137,12 @@ static int s3fs_listxattr(const char* path, char* list, size_t size) } // get xattrs - if(meta.end() == meta.find("x-amz-meta-xattr")){ + headers_t::iterator iter; + if(meta.end() == (iter = meta.find("x-amz-meta-xattr"))){ // object does not have xattrs return 0; } - string strxattrs = meta["x-amz-meta-xattr"]; + string strxattrs = iter->second; parse_xattrs(strxattrs, xattrs); @@ -3123,7 +3184,7 @@ static int s3fs_listxattr(const char* path, char* list, size_t size) static int s3fs_removexattr(const char* path, const char* name) { - FPRN("[path=%s][name=%s]", path, name); + S3FS_PRN_INFO("[path=%s][name=%s]", path, name); if(!path || !name){ return -EIO; @@ -3139,7 +3200,7 @@ static int s3fs_removexattr(const char* path, const char* name) int nDirType = DIRTYPE_UNKNOWN; if(0 == strcmp(path, "/")){ - DPRNNN("Could not change mode for mount point."); + S3FS_PRN_ERR("Could not change mode for mount point."); return -EIO; } if(0 != (result = check_parent_object_access(path, X_OK))){ @@ -3182,7 +3243,7 @@ static int s3fs_removexattr(const char* path, const char* name) if(xiter->second){ delete xiter->second; } - xattrs.erase(strname); + xattrs.erase(xiter); // build new xattr if(!xattrs.empty()){ @@ -3233,19 +3294,17 @@ static int s3fs_removexattr(const char* path, const char* name) static void* s3fs_init(struct fuse_conn_info* conn) { - FPRN("init"); - LOWSYSLOGPRINT(LOG_ERR, "init v%s (%s)", VERSION, s3fs_crypt_lib_name()); + S3FS_PRN_CRIT("init v%s(commit:%s) with %s", VERSION, COMMIT_HASH_VAL, s3fs_crypt_lib_name()); // ssl init if(!s3fs_init_global_ssl()){ - fprintf(stderr, "%s: could not initialize for ssl libraries.\n", program_name.c_str()); + S3FS_PRN_CRIT("could not initialize for ssl libraries."); exit(EXIT_FAILURE); } // init curl if(!S3fsCurl::InitS3fsCurl("/etc/mime.types")){ - fprintf(stderr, "%s: Could not initiate curl library.\n", program_name.c_str()); - LOWSYSLOGPRINT(LOG_ERR, "Could not initiate curl library."); + S3FS_PRN_CRIT("Could not initiate curl library."); exit(EXIT_FAILURE); } @@ -3271,22 +3330,22 @@ static void* s3fs_init(struct fuse_conn_info* conn) #endif // cache if(is_remove_cache && !FdManager::DeleteCacheDirectory()){ - DPRNINFO("Could not inilialize cache directory."); + S3FS_PRN_DBG("Could not inilialize cache directory."); } return NULL; } static void s3fs_destroy(void*) { - DPRN("destroy"); + S3FS_PRN_INFO("destroy"); // Destroy curl if(!S3fsCurl::DestroyS3fsCurl()){ - DPRN("Could not release curl library."); + S3FS_PRN_WARN("Could not release curl library."); } // cache if(is_remove_cache && !FdManager::DeleteCacheDirectory()){ - DPRN("Could not remove cache directory."); + S3FS_PRN_WARN("Could not remove cache directory."); } // ssl s3fs_destroy_global_ssl(); @@ -3294,7 +3353,7 @@ static void s3fs_destroy(void*) static int s3fs_access(const char* path, int mask) { - FPRN("[path=%s][mask=%s%s%s%s]", path, + S3FS_PRN_INFO("[path=%s][mask=%s%s%s%s]", path, ((mask & R_OK) == R_OK) ? "R_OK " : "", ((mask & W_OK) == W_OK) ? "W_OK " : "", ((mask & X_OK) == X_OK) ? "X_OK " : "", @@ -3317,18 +3376,18 @@ static xmlChar* get_exp_value_xml(xmlDocPtr doc, xmlXPathContextPtr ctx, const c // search exp_key tag if(NULL == (exp = xmlXPathEvalExpression((xmlChar*)exp_key, ctx))){ - DPRNNN("Could not find key(%s).", exp_key); + S3FS_PRN_ERR("Could not find key(%s).", exp_key); return NULL; } if(xmlXPathNodeSetIsEmpty(exp->nodesetval)){ - DPRNNN("Key(%s) node is empty.", exp_key); + S3FS_PRN_ERR("Key(%s) node is empty.", exp_key); S3FS_XMLXPATHFREEOBJECT(exp); return NULL; } // get exp_key value & set in struct exp_nodes = exp->nodesetval; if(NULL == (exp_value = xmlNodeListGetString(doc, exp_nodes->nodeTab[0]->xmlChildrenNode, 1))){ - DPRNNN("Key(%s) value is empty.", exp_key); + S3FS_PRN_ERR("Key(%s) value is empty.", exp_key); S3FS_XMLXPATHFREEOBJECT(exp); return NULL; } @@ -3343,11 +3402,11 @@ static void print_uncomp_mp_list(uncomp_mp_list_t& list) printf("Lists the parts that have been uploaded for a specific multipart upload.\n"); printf("\n"); - if(0 < list.size()){ + if(!list.empty()){ printf("---------------------------------------------------------------\n"); int cnt = 0; - for(uncomp_mp_list_t::iterator iter = list.begin(); iter != list.end(); iter++, cnt++){ + for(uncomp_mp_list_t::iterator iter = list.begin(); iter != list.end(); ++iter, ++cnt){ printf(" Path : %s\n", (*iter).key.c_str()); printf(" UploadId : %s\n", (*iter).id.c_str()); printf(" Date : %s\n", (*iter).date.c_str()); @@ -3364,7 +3423,7 @@ static bool abort_uncomp_mp_list(uncomp_mp_list_t& list) { char buff[1024]; - if(0 >= list.size()){ + if(list.empty()){ return true; } memset(buff, 0, sizeof(buff)); @@ -3385,12 +3444,12 @@ static bool abort_uncomp_mp_list(uncomp_mp_list_t& list) // do removing their. S3fsCurl s3fscurl; bool result = true; - for(uncomp_mp_list_t::iterator iter = list.begin(); iter != list.end(); iter++){ + for(uncomp_mp_list_t::iterator iter = list.begin(); iter != list.end(); ++iter){ const char* tpath = (*iter).key.c_str(); string upload_id = (*iter).id; if(0 != s3fscurl.AbortMultipartUpload(tpath, upload_id)){ - fprintf(stderr, "Failed to remove %s multipart uploading object.\n", tpath); + S3FS_PRN_EXIT("Failed to remove %s multipart uploading object.", tpath); result = false; }else{ printf("Succeed to remove %s multipart uploading object.\n", tpath); @@ -3432,11 +3491,11 @@ static bool get_uncomp_mp_list(xmlDocPtr doc, uncomp_mp_list_t& list) // get "Upload" Tags xmlXPathObjectPtr upload_xp; if(NULL == (upload_xp = xmlXPathEvalExpression((xmlChar*)ex_upload.c_str(), ctx))){ - DPRNNN("xmlXPathEvalExpression returns null."); + S3FS_PRN_ERR("xmlXPathEvalExpression returns null."); return false; } if(xmlXPathNodeSetIsEmpty(upload_xp->nodesetval)){ - DPRNNN("upload_xp->nodesetval is empty."); + S3FS_PRN_INFO("upload_xp->nodesetval is empty."); S3FS_XMLXPATHFREEOBJECT(upload_xp); S3FS_XMLXPATHFREECONTEXT(ctx); return true; @@ -3495,14 +3554,13 @@ static int s3fs_utility_mode(void) // ssl init if(!s3fs_init_global_ssl()){ - fprintf(stderr, "%s: could not initialize for ssl libraries.\n", program_name.c_str()); + S3FS_PRN_EXIT("could not initialize for ssl libraries."); return EXIT_FAILURE; } // init curl if(!S3fsCurl::InitS3fsCurl("/etc/mime.types")){ - fprintf(stderr, "%s: Could not initiate curl library.\n", program_name.c_str()); - LOWSYSLOGPRINT(LOG_ERR, "Could not initiate curl library."); + S3FS_PRN_EXIT("Could not initiate curl library."); s3fs_destroy_global_ssl(); return EXIT_FAILURE; } @@ -3513,22 +3571,22 @@ static int s3fs_utility_mode(void) string body; int result = EXIT_SUCCESS; if(0 != s3fscurl.MultipartListRequest(body)){ - fprintf(stderr, "%s: Could not get list multipart upload.\n", program_name.c_str()); + S3FS_PRN_EXIT("Could not get list multipart upload."); result = EXIT_FAILURE; }else{ // perse result(uncomplete multipart upload information) - FPRNINFO("response body = {\n%s\n}", body.c_str()); + S3FS_PRN_DBG("response body = {\n%s\n}", body.c_str()); xmlDocPtr doc; if(NULL == (doc = xmlReadMemory(body.c_str(), static_cast(body.size()), "", NULL, 0))){ - DPRN("xmlReadMemory exited with error."); + S3FS_PRN_DBG("xmlReadMemory exited with error."); result = EXIT_FAILURE; }else{ // make working uploads list uncomp_mp_list_t list; if(!get_uncomp_mp_list(doc, list)){ - DPRN("get_uncomp_mp_list exited with error."); + S3FS_PRN_DBG("get_uncomp_mp_list exited with error."); result = EXIT_FAILURE; }else{ @@ -3536,7 +3594,7 @@ static int s3fs_utility_mode(void) print_uncomp_mp_list(list); // remove if(!abort_uncomp_mp_list(list)){ - DPRN("an error occurred during removal process."); + S3FS_PRN_DBG("an error occurred during removal process."); result = EXIT_FAILURE; } } @@ -3546,7 +3604,7 @@ static int s3fs_utility_mode(void) // Destroy curl if(!S3fsCurl::DestroyS3fsCurl()){ - DPRN("Could not release curl library."); + S3FS_PRN_WARN("Could not release curl library."); } // ssl @@ -3592,11 +3650,11 @@ static bool check_region_error(const char* pbody, string& expectregion) static int s3fs_check_service(void) { - FPRN("check services."); + S3FS_PRN_INFO("check services."); // At first time for access S3, we check IAM role if it sets. if(!S3fsCurl::CheckIAMCredentialUpdate()){ - fprintf(stderr, "%s: Failed to check IAM role name(%s).\n", program_name.c_str(), S3fsCurl::GetIAMRole()); + S3FS_PRN_CRIT("Failed to check IAM role name(%s).", S3fsCurl::GetIAMRole()); return EXIT_FAILURE; } @@ -3613,8 +3671,7 @@ static int s3fs_check_service(void) string expectregion; if(check_region_error(body->str(), expectregion)){ // not specified endpoint, so try to connect to expected region. - LOWSYSLOGPRINT(LOG_ERR, "Could not connect wrong region %s, so retry to connect region %s.", endpoint.c_str(), expectregion.c_str()); - FPRN("Could not connect wrong region %s, so retry to connect region %s.", endpoint.c_str(), expectregion.c_str()); + S3FS_PRN_CRIT("Could not connect wrong region %s, so retry to connect region %s.", endpoint.c_str(), expectregion.c_str()); endpoint = expectregion; if(S3fsCurl::IsSignatureV4()){ if(host == "http://s3.amazonaws.com"){ @@ -3634,8 +3691,7 @@ static int s3fs_check_service(void) // try signature v2 if(0 > res && (responseCode == 400 || responseCode == 403) && S3fsCurl::IsSignatureV4()){ // switch sigv2 - LOWSYSLOGPRINT(LOG_ERR, "Could not connect, so retry to connect by signature version 2."); - FPRN("Could not connect, so retry to connect by signature version 2."); + S3FS_PRN_WARN("Could not connect, so retry to connect by signature version 2."); S3fsCurl::SetSignatureV4(false); // retry to check with sigv2 @@ -3647,25 +3703,25 @@ static int s3fs_check_service(void) // check errors(after retrying) if(0 > res && responseCode != 200 && responseCode != 301){ if(responseCode == 400){ - fprintf(stderr, "%s: Bad Request\n", program_name.c_str()); + S3FS_PRN_CRIT("Bad Request - result of checking service."); return EXIT_FAILURE; } if(responseCode == 403){ - fprintf(stderr, "%s: invalid credentials\n", program_name.c_str()); + S3FS_PRN_CRIT("invalid credentials - result of checking service."); return EXIT_FAILURE; } if(responseCode == 404){ - fprintf(stderr, "%s: bucket not found\n", program_name.c_str()); + S3FS_PRN_CRIT("bucket not found - result of checking service."); return EXIT_FAILURE; } // unable to connect if(responseCode == CURLE_OPERATION_TIMEDOUT){ - fprintf(stderr, "%s: unable to connect bucket and timeout\n", program_name.c_str()); + S3FS_PRN_CRIT("unable to connect bucket and timeout - result of checking service."); return EXIT_FAILURE; } // another error - fprintf(stderr, "%s: unable to connect\n", program_name.c_str()); + S3FS_PRN_CRIT("unable to connect - result of checking service."); return EXIT_FAILURE; } } @@ -3673,8 +3729,7 @@ static int s3fs_check_service(void) // make sure remote mountpath exists and is a directory if(mount_prefix.size() > 0){ if(remote_mountpath_exists(mount_prefix.c_str()) != 0){ - fprintf(stderr, "%s: remote mountpath %s not found.\n", - program_name.c_str(), mount_prefix.c_str()); + S3FS_PRN_CRIT("remote mountpath %s not found.", mount_prefix.c_str()); return EXIT_FAILURE; } } @@ -3717,15 +3772,13 @@ static int check_for_aws_format(void) first_pos = line.find_first_of(" \t"); if(first_pos != string::npos){ - printf ("%s: invalid line in passwd file, found whitespace character\n", - program_name.c_str()); + S3FS_PRN_EXIT("invalid line in passwd file, found whitespace character."); return -1; } first_pos = line.find_first_of("["); if(first_pos != string::npos && first_pos == 0){ - printf ("%s: invalid line in passwd file, found a bracket \"[\" character\n", - program_name.c_str()); + S3FS_PRN_EXIT("invalid line in passwd file, found a bracket \"[\" character."); return -1; } @@ -3749,7 +3802,7 @@ static int check_for_aws_format(void) if(got_access_key_id_line && got_secret_key_line){ if(!S3fsCurl::SetAccessKey(AccessKeyId.c_str(), SecretAccesskey.c_str())){ - fprintf(stderr, "%s: if one access key is specified, both keys need to be specified\n", program_name.c_str()); + S3FS_PRN_EXIT("if one access key is specified, both keys need to be specified."); return 0; } return 1; @@ -3775,8 +3828,7 @@ static int check_passwd_file_perms(void) // let's get the file info if(stat(passwd_file.c_str(), &info) != 0){ - fprintf (stderr, "%s: unexpected error from stat(%s, ) \n", - program_name.c_str(), passwd_file.c_str()); + S3FS_PRN_EXIT("unexpected error from stat(%s).", passwd_file.c_str()); return EXIT_FAILURE; } @@ -3784,8 +3836,7 @@ static int check_passwd_file_perms(void) if( (info.st_mode & S_IROTH) || (info.st_mode & S_IWOTH) || (info.st_mode & S_IXOTH)) { - fprintf (stderr, "%s: credentials file %s should not have others permissions\n", - program_name.c_str(), passwd_file.c_str()); + S3FS_PRN_EXIT("credentials file %s should not have others permissions.", passwd_file.c_str()); return EXIT_FAILURE; } @@ -3795,21 +3846,18 @@ static int check_passwd_file_perms(void) if( (info.st_mode & S_IRGRP) || (info.st_mode & S_IWGRP) || (info.st_mode & S_IXGRP)) { - fprintf (stderr, "%s: credentials file %s should not have group permissions\n", - program_name.c_str(), passwd_file.c_str()); + S3FS_PRN_EXIT("credentials file %s should not have group permissions.", passwd_file.c_str()); return EXIT_FAILURE; } }else{ // "/etc/passwd-s3fs" does not allow group write. if((info.st_mode & S_IWGRP)){ - fprintf (stderr, "%s: credentials file %s should not have group writable permissions\n", - program_name.c_str(), passwd_file.c_str()); + S3FS_PRN_EXIT("credentials file %s should not have group writable permissions.", passwd_file.c_str()); return EXIT_FAILURE; } } if((info.st_mode & S_IXUSR) || (info.st_mode & S_IXGRP)){ - fprintf (stderr, "%s: credentials file %s should not have executable permissions\n", - program_name.c_str(), passwd_file.c_str()); + S3FS_PRN_EXIT("credentials file %s should not have executable permissions.", passwd_file.c_str()); return EXIT_FAILURE; } return EXIT_SUCCESS; @@ -3872,22 +3920,19 @@ static int read_passwd_file(void) first_pos = line.find_first_of(" \t"); if(first_pos != string::npos){ - printf ("%s: invalid line in passwd file, found whitespace character\n", - program_name.c_str()); + S3FS_PRN_EXIT("invalid line in passwd file, found whitespace character."); return EXIT_FAILURE; } first_pos = line.find_first_of("["); if(first_pos != string::npos && first_pos == 0){ - printf ("%s: invalid line in passwd file, found a bracket \"[\" character\n", - program_name.c_str()); + S3FS_PRN_EXIT("invalid line in passwd file, found a bracket \"[\" character."); return EXIT_FAILURE; } first_pos = line.find_first_of(":"); if(first_pos == string::npos){ - printf ("%s: invalid line in passwd file, no \":\" separator found\n", - program_name.c_str()); + S3FS_PRN_EXIT("invalid line in passwd file, no \":\" separator found."); return EXIT_FAILURE; } last_pos = line.find_last_of(":"); @@ -3900,8 +3945,7 @@ static int read_passwd_file(void) }else{ // no bucket specified - original style - found default key if(default_found == 1){ - printf ("%s: more than one default key pair found in passwd file\n", - program_name.c_str()); + S3FS_PRN_EXIT("more than one default key pair found in passwd file."); return EXIT_FAILURE; } default_found = 1; @@ -3909,7 +3953,7 @@ static int read_passwd_file(void) field2 = line.substr(0,first_pos); field3 = line.substr(first_pos + 1, string::npos); if(!S3fsCurl::SetAccessKey(field2.c_str(), field3.c_str())){ - fprintf(stderr, "%s: if one access key is specified, both keys need to be specified\n", program_name.c_str()); + S3FS_PRN_EXIT("if one access key is specified, both keys need to be specified."); return EXIT_FAILURE; } } @@ -3919,7 +3963,7 @@ static int read_passwd_file(void) // will be used if(field1.size() != 0 && field1 == bucket){ if(!S3fsCurl::SetAccessKey(field2.c_str(), field3.c_str())){ - fprintf(stderr, "%s: if one access key is specified, both keys need to be specified\n", program_name.c_str()); + S3FS_PRN_EXIT("if one access key is specified, both keys need to be specified."); return EXIT_FAILURE; } break; @@ -3963,8 +4007,7 @@ static int get_access_keys(void) PF.close(); return read_passwd_file(); }else{ - fprintf(stderr, "%s: specified passwd_file is not readable\n", - program_name.c_str()); + S3FS_PRN_EXIT("specified passwd_file is not readable."); return EXIT_FAILURE; } } @@ -3975,13 +4018,11 @@ static int get_access_keys(void) if(AWSACCESSKEYID != NULL || AWSSECRETACCESSKEY != NULL){ if( (AWSACCESSKEYID == NULL && AWSSECRETACCESSKEY != NULL) || (AWSACCESSKEYID != NULL && AWSSECRETACCESSKEY == NULL) ){ - - fprintf(stderr, "%s: if environment variable AWSACCESSKEYID is set then AWSSECRETACCESSKEY must be set too\n", - program_name.c_str()); + S3FS_PRN_EXIT("if environment variable AWSACCESSKEYID is set then AWSSECRETACCESSKEY must be set too."); return EXIT_FAILURE; } if(!S3fsCurl::SetAccessKey(AWSACCESSKEYID, AWSSECRETACCESSKEY)){ - fprintf(stderr, "%s: if one access key is specified, both keys need to be specified\n", program_name.c_str()); + S3FS_PRN_EXIT("if one access key is specified, both keys need to be specified."); return EXIT_FAILURE; } return EXIT_SUCCESS; @@ -3998,8 +4039,7 @@ static int get_access_keys(void) PF.close(); return read_passwd_file(); }else{ - fprintf(stderr, "%s: AWS_CREDENTIAL_FILE: \"%s\" is not readable\n", - program_name.c_str(), passwd_file.c_str()); + S3FS_PRN_EXIT("AWS_CREDENTIAL_FILE: \"%s\" is not readable.", passwd_file.c_str()); return EXIT_FAILURE; } } @@ -4033,9 +4073,8 @@ static int get_access_keys(void) PF.close(); return read_passwd_file(); } - - fprintf(stderr, "%s: could not determine how to establish security credentials\n", - program_name.c_str()); + S3FS_PRN_EXIT("could not determine how to establish security credentials."); + return EXIT_FAILURE; } @@ -4048,7 +4087,7 @@ static int set_moutpoint_attribute(struct stat& mpst) mp_gid = getegid(); mp_mode = S_IFDIR | (allow_other ? (is_mp_umask ? (~mp_umask & (S_IRWXU | S_IRWXG | S_IRWXO)) : (S_IRWXU | S_IRWXG | S_IRWXO)) : S_IRWXU); - FPRNNN("PROC(uid=%u, gid=%u) - MountPoint(uid=%u, gid=%u, mode=%04o)", + S3FS_PRN_INFO2("PROC(uid=%u, gid=%u) - MountPoint(uid=%u, gid=%u, mode=%04o)", (unsigned int)mp_uid, (unsigned int)mp_gid, (unsigned int)(mpst.st_uid), (unsigned int)(mpst.st_gid), mpst.st_mode); // check owner @@ -4086,7 +4125,7 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar char* pmount_prefix = strtok(NULL, ":"); if(pmount_prefix){ if(0 == strlen(pmount_prefix) || '/' != pmount_prefix[0]){ - fprintf(stderr, "%s: path(%s) must be prefix \"/\".\n", program_name.c_str(), pmount_prefix); + S3FS_PRN_EXIT("path(%s) must be prefix \"/\".", pmount_prefix); return -1; } mount_prefix = pmount_prefix; @@ -4108,18 +4147,15 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar struct stat stbuf; if(stat(arg, &stbuf) == -1){ - fprintf(stderr, "%s: unable to access MOUNTPOINT %s: %s\n", - program_name.c_str(), mountpoint.c_str(), strerror(errno)); + S3FS_PRN_EXIT("unable to access MOUNTPOINT %s: %s", mountpoint.c_str(), strerror(errno)); return -1; } if(!(S_ISDIR(stbuf.st_mode))){ - fprintf(stderr, "%s: MOUNTPOINT: %s is not a directory\n", - program_name.c_str(), mountpoint.c_str()); + S3FS_PRN_EXIT("MOUNTPOINT: %s is not a directory.", mountpoint.c_str()); return -1; } if(!set_moutpoint_attribute(stbuf)){ - fprintf(stderr, "%s: MOUNTPOINT: %s permission denied.\n", - program_name.c_str(), mountpoint.c_str()); + S3FS_PRN_EXIT("MOUNTPOINT: %s permission denied.", mountpoint.c_str()); return -1; } @@ -4127,16 +4163,13 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar struct dirent *ent; DIR *dp = opendir(mountpoint.c_str()); if(dp == NULL){ - fprintf(stderr, "%s: failed to open MOUNTPOINT: %s: %s\n", - program_name.c_str(), mountpoint.c_str(), strerror(errno)); + S3FS_PRN_EXIT("failed to open MOUNTPOINT: %s: %s", mountpoint.c_str(), strerror(errno)); return -1; } while((ent = readdir(dp)) != NULL){ if(strcmp(ent->d_name, ".") != 0 && strcmp(ent->d_name, "..") != 0){ closedir(dp); - fprintf(stderr, "%s: MOUNTPOINT directory %s is not empty.\n" - "%s: if you are sure this is safe, can use the 'nonempty' mount option.\n", - program_name.c_str(), mountpoint.c_str(), program_name.c_str()); + S3FS_PRN_EXIT("MOUNTPOINT directory %s is not empty. if you are sure this is safe, can use the 'nonempty' mount option.", mountpoint.c_str()); return -1; } } @@ -4147,11 +4180,9 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar // Unknow option if(0 == utility_mode){ - fprintf(stderr, "%s: specified unknown third optioni(%s).\n", program_name.c_str(), arg); + S3FS_PRN_EXIT("specified unknown third optioni(%s).", arg); }else{ - fprintf(stderr, "%s: specified unknown second optioni(%s).\n" - "%s: you don't need to specify second option(mountpoint) for utility mode(-u).\n", - program_name.c_str(), arg, program_name.c_str()); + S3FS_PRN_EXIT("specified unknown second optioni(%s). you don't need to specify second option(mountpoint) for utility mode(-u).", arg); } return -1; @@ -4159,7 +4190,7 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar if(0 == STR2NCMP(arg, "uid=")){ s3fs_uid = get_uid(strchr(arg, '=') + sizeof(char)); if(0 != geteuid() && 0 == s3fs_uid){ - fprintf(stderr, "%s: root user can only specify uid=0.\n", program_name.c_str()); + S3FS_PRN_EXIT("root user can only specify uid=0."); return -1; } is_s3fs_uid = true; @@ -4168,7 +4199,7 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar if(0 == STR2NCMP(arg, "gid=")){ s3fs_gid = get_gid(strchr(arg, '=') + sizeof(char)); if(0 != getegid() && 0 == s3fs_gid){ - fprintf(stderr, "%s: root user can only specify gid=0.\n", program_name.c_str()); + S3FS_PRN_EXIT("root user can only specify gid=0."); return -1; } is_s3fs_gid = true; @@ -4220,6 +4251,7 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar nomultipart = true; return 0; } + // old format for storage_class if(0 == strcmp(arg, "use_rrs") || 0 == STR2NCMP(arg, "use_rrs=")){ off_t rrs = 1; // for an old format. @@ -4227,66 +4259,141 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar rrs = s3fs_strtoofft(strchr(arg, '=') + sizeof(char)); } if(0 == rrs){ - S3fsCurl::SetUseRrs(false); + S3fsCurl::SetStorageClass(STANDARD); }else if(1 == rrs){ - if(S3fsCurl::GetUseSse()){ - fprintf(stderr, "%s: use_rrs option could not be specified with use_sse.\n", program_name.c_str()); - return -1; - } - S3fsCurl::SetUseRrs(true); + S3fsCurl::SetStorageClass(REDUCED_REDUNDANCY); }else{ - fprintf(stderr, "%s: poorly formed argument to option: use_rrs\n", program_name.c_str()); + S3FS_PRN_EXIT("poorly formed argument to option: use_rrs"); return -1; } return 0; } - if(0 == strcmp(arg, "use_sse") || 0 == STR2NCMP(arg, "use_sse=")){ - if(0 == STR2NCMP(arg, "use_sse=")){ - if(S3fsCurl::GetUseRrs()){ - fprintf(stderr, "%s: use_sse option could not be specified with use_rrs.\n", program_name.c_str()); - return -1; - } - const char* ssecfile = &arg[strlen("use_sse=")]; - if(0 == strcmp(ssecfile, "1")){ - if(S3fsCurl::IsSseCustomMode()){ - fprintf (stderr, "%s: already set SSE-C key by environment, and confrict use_sse option.\n", program_name.c_str()); - return -1; - } - S3fsCurl::SetUseSse(true); - }else{ - // testing sse-c, try to load AES256 keys - struct stat st; - if(0 != stat(ssecfile, &st)){ - fprintf (stderr, "%s: could not open use_sse keys file(%s)\n", program_name.c_str(), ssecfile); - return -1; - } - if(st.st_mode & (S_IXUSR | S_IRWXG | S_IRWXO)){ - fprintf (stderr, "%s: use_sse keys file %s should be 0600 permissions\n", program_name.c_str(), ssecfile); - return -1; - } - if(!S3fsCurl::SetSseKeys(ssecfile)){ - fprintf (stderr, "%s: failed to load use_sse keys file %s\n", program_name.c_str(), ssecfile); - return -1; - } - } + if(0 == STR2NCMP(arg, "storage_class=")){ + const char *storage_class = strchr(arg, '=') + sizeof(char); + if(0 == strcmp(storage_class, "standard")){ + S3fsCurl::SetStorageClass(STANDARD); + }else if(0 == strcmp(storage_class, "standard_ia")){ + S3fsCurl::SetStorageClass(STANDARD_IA); + }else if(0 == strcmp(storage_class, "reduced_redundancy")){ + S3fsCurl::SetStorageClass(REDUCED_REDUNDANCY); }else{ - if(S3fsCurl::GetUseRrs()){ - fprintf(stderr, "%s: use_sse option could not be specified with use_rrs.\n", program_name.c_str()); - return -1; - } - if(S3fsCurl::IsSseCustomMode()){ - fprintf (stderr, "%s: already set SSE-C key by environment, and confrict use_sse option.\n", program_name.c_str()); - return -1; - } - S3fsCurl::SetUseSse(true); + S3FS_PRN_EXIT("unknown value for storage_class: %s", storage_class); + return -1; } return 0; } + // + // [NOTE] + // use_sse Set Server Side Encrypting type to SSE-S3 + // use_sse=1 + // use_sse=file Set Server Side Encrypting type to Custom key(SSE-C) and load custom keys + // use_sse=custom(c):file + // use_sse=custom(c) Set Server Side Encrypting type to Custom key(SSE-C) + // use_sse=kmsid(k):kms-key-id Set Server Side Encrypting type to AWS Key Management key id(SSE-KMS) and load KMS id + // use_sse=kmsid(k) Set Server Side Encrypting type to AWS Key Management key id(SSE-KMS) + // + // load_sse_c=file Load Server Side Encrypting custom keys + // + // AWSSSECKEYS Loaing Environment for Server Side Encrypting custom keys + // AWSSSEKMSID Loaing Environment for Server Side Encrypting Key id + // + if(0 == STR2NCMP(arg, "use_sse")){ + if(0 == strcmp(arg, "use_sse") || 0 == strcmp(arg, "use_sse=1")){ // use_sse=1 is old type paraemter + // sse type is SSE_S3 + if(!S3fsCurl::IsSseDisable() && !S3fsCurl::IsSseS3Type()){ + S3FS_PRN_EXIT("already set SSE another type, so confrict use_sse option or environment."); + return -1; + } + S3fsCurl::SetSseType(SSE_S3); + + }else if(0 == strcmp(arg, "use_sse=kmsid") || 0 == strcmp(arg, "use_sse=k")){ + // sse type is SSE_KMS with out kmsid(expecting id is loaded by environment) + if(!S3fsCurl::IsSseDisable() && !S3fsCurl::IsSseKmsType()){ + S3FS_PRN_EXIT("already set SSE another type, so confrict use_sse option or environment."); + return -1; + } + if(!S3fsCurl::IsSetSseKmsId()){ + S3FS_PRN_EXIT("use_sse=kms but not loaded kms id by environemnt."); + return -1; + } + S3fsCurl::SetSseType(SSE_KMS); + + }else if(0 == STR2NCMP(arg, "use_sse=kmsid:") || 0 == STR2NCMP(arg, "use_sse=k:")){ + // sse type is SSE_KMS with kmsid + if(!S3fsCurl::IsSseDisable() && !S3fsCurl::IsSseKmsType()){ + S3FS_PRN_EXIT("already set SSE another type, so confrict use_sse option or environment."); + return -1; + } + const char* kmsid; + if(0 == STR2NCMP(arg, "use_sse=kmsid:")){ + kmsid = &arg[strlen("use_sse=kmsid:")]; + }else{ + kmsid = &arg[strlen("use_sse=k:")]; + } + if(!S3fsCurl::SetSseKmsid(kmsid)){ + S3FS_PRN_EXIT("failed to load use_sse kms id."); + return -1; + } + S3fsCurl::SetSseType(SSE_KMS); + + }else if(0 == strcmp(arg, "use_sse=custom") || 0 == strcmp(arg, "use_sse=c")){ + // sse type is SSE_C with out custom keys(expecting keays are loaded by environment or load_sse_c option) + if(!S3fsCurl::IsSseDisable() && !S3fsCurl::IsSseCType()){ + S3FS_PRN_EXIT("already set SSE another type, so confrict use_sse option or environment."); + return -1; + } + // [NOTE] + // do not check ckeys exists here. + // + S3fsCurl::SetSseType(SSE_C); + + }else if(0 == STR2NCMP(arg, "use_sse=custom:") || 0 == STR2NCMP(arg, "use_sse=c:")){ + // sse type is SSE_C with custom keys + if(!S3fsCurl::IsSseDisable() && !S3fsCurl::IsSseCType()){ + S3FS_PRN_EXIT("already set SSE another type, so confrict use_sse option or environment."); + return -1; + } + const char* ssecfile; + if(0 == STR2NCMP(arg, "use_sse=custom:")){ + ssecfile = &arg[strlen("use_sse=custom:")]; + }else{ + ssecfile = &arg[strlen("use_sse=c:")]; + } + if(!S3fsCurl::SetSseCKeys(ssecfile)){ + S3FS_PRN_EXIT("failed to load use_sse custom key file(%s).", ssecfile); + return -1; + } + S3fsCurl::SetSseType(SSE_C); + + }else if(0 == strcmp(arg, "use_sse=")){ // this type is old style(paraemter is custom key file path) + // SSE_C with custom keys. + const char* ssecfile = &arg[strlen("use_sse=")]; + if(!S3fsCurl::SetSseCKeys(ssecfile)){ + S3FS_PRN_EXIT("failed to load use_sse custom key file(%s).", ssecfile); + return -1; + } + S3fsCurl::SetSseType(SSE_C); + + }else{ + // never come here. + S3FS_PRN_EXIT("something wrong use_sse optino."); + return -1; + } + return 0; + } + // [NOTE] + // Do only load SSE custom keys, care for set without set sse type. + if(0 == STR2NCMP(arg, "load_sse_c=")){ + const char* ssecfile = &arg[strlen("load_sse_c=")]; + if(!S3fsCurl::SetSseCKeys(ssecfile)){ + S3FS_PRN_EXIT("failed to load use_sse custom key file(%s).", ssecfile); + return -1; + } + } if(0 == STR2NCMP(arg, "ssl_verify_hostname=")){ long sslvh = static_cast(s3fs_strtoofft(strchr(arg, '=') + sizeof(char))); if(-1 == S3fsCurl::SetSslVerifyHostname(sslvh)){ - fprintf(stderr, "%s: poorly formed argument to option: ssl_verify_hostname\n", - program_name.c_str()); + S3FS_PRN_EXIT("poorly formed argument to option: ssl_verify_hostname."); return -1; } return 0; @@ -4307,8 +4414,7 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar }else if(0 == pubbucket){ S3fsCurl::SetPublicBucket(false); }else{ - fprintf(stderr, "%s: poorly formed argument to option: public_bucket\n", - program_name.c_str()); + S3FS_PRN_EXIT("poorly formed argument to option: public_bucket."); return -1; } return 0; @@ -4360,43 +4466,43 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar if(0 == STR2NCMP(arg, "parallel_count=") || 0 == STR2NCMP(arg, "parallel_upload=")){ int maxpara = static_cast(s3fs_strtoofft(strchr(arg, '=') + sizeof(char))); if(0 >= maxpara){ - fprintf(stderr, "%s: argument should be over 1: parallel_count\n", - program_name.c_str()); + S3FS_PRN_EXIT("argument should be over 1: parallel_count"); return -1; } S3fsCurl::SetMaxParallelCount(maxpara); - - if(FdManager::GetPageSize() < static_cast(S3fsCurl::GetMultipartSize() * S3fsCurl::GetMaxParallelCount())){ - FdManager::SetPageSize(static_cast(S3fsCurl::GetMultipartSize() * S3fsCurl::GetMaxParallelCount())); - } return 0; } if(0 == STR2NCMP(arg, "fd_page_size=")){ - size_t pagesize = static_cast(s3fs_strtoofft(strchr(arg, '=') + sizeof(char))); - if(pagesize < static_cast(S3fsCurl::GetMultipartSize() * S3fsCurl::GetMaxParallelCount())){ - fprintf(stderr, "%s: argument should be over 1MB: fd_page_size\n", - program_name.c_str()); - return -1; - } - FdManager::SetPageSize(pagesize); + S3FS_PRN_ERR("option fd_page_size is no longer supported, so skip this option."); return 0; } if(0 == STR2NCMP(arg, "multipart_size=")){ off_t size = static_cast(s3fs_strtoofft(strchr(arg, '=') + sizeof(char))); if(!S3fsCurl::SetMultipartSize(size)){ - fprintf(stderr, "%s: multipart_size option must be at least 10 MB\n", program_name.c_str()); + S3FS_PRN_EXIT("multipart_size option must be at least 5 MB."); return -1; } - if(FdManager::GetPageSize() < static_cast(S3fsCurl::GetMultipartSize() * S3fsCurl::GetMaxParallelCount())){ - FdManager::SetPageSize(static_cast(S3fsCurl::GetMultipartSize() * S3fsCurl::GetMaxParallelCount())); + // update ensure free disk space if it is not set. + FdManager::InitEnsureFreeDiskSpace(); + return 0; + } + if(0 == STR2NCMP(arg, "ensure_diskfree=")){ + size_t dfsize = static_cast(s3fs_strtoofft(strchr(arg, '=') + sizeof(char))) * 1024 * 1024; + if(dfsize < static_cast(S3fsCurl::GetMultipartSize())){ + S3FS_PRN_WARN("specified size to ensure disk free space is smaller than multipart size, so set multipart size to it."); + dfsize = static_cast(S3fsCurl::GetMultipartSize()); } + FdManager::SetEnsureFreeDiskSpace(dfsize); + return 0; + } + if(0 == STR2NCMP(arg, "singlepart_copy_limit=")){ + singlepart_copy_limit = static_cast(s3fs_strtoofft(strchr(arg, '=') + sizeof(char))) * 1024; return 0; } if(0 == STR2NCMP(arg, "ahbe_conf=")){ string ahbe_conf = strchr(arg, '=') + sizeof(char); if(!AdditionalHeader::get()->Load(ahbe_conf.c_str())){ - fprintf(stderr, "%s: failed to load ahbe_conf file(%s).\n", - program_name.c_str(), ahbe_conf.c_str()); + S3FS_PRN_EXIT("failed to load ahbe_conf file(%s).", ahbe_conf.c_str()); return -1; } AdditionalHeader::get()->Dump(); @@ -4449,30 +4555,47 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar pathrequeststyle = true; return 0; } - + // + // debug option for s3fs + // + if(0 == STR2NCMP(arg, "dbglevel=")){ + const char* strlevel = strchr(arg, '=') + sizeof(char); + if(0 == strcasecmp(strlevel, "silent") || 0 == strcasecmp(strlevel, "critical") || 0 == strcasecmp(strlevel, "crit")){ + set_s3fs_log_level(S3FS_LOG_CRIT); + }else if(0 == strcasecmp(strlevel, "error") || 0 == strcasecmp(strlevel, "err")){ + set_s3fs_log_level(S3FS_LOG_ERR); + }else if(0 == strcasecmp(strlevel, "wan") || 0 == strcasecmp(strlevel, "warn") || 0 == strcasecmp(strlevel, "warning")){ + set_s3fs_log_level(S3FS_LOG_WARN); + }else if(0 == strcasecmp(strlevel, "inf") || 0 == strcasecmp(strlevel, "info") || 0 == strcasecmp(strlevel, "information")){ + set_s3fs_log_level(S3FS_LOG_INFO); + }else if(0 == strcasecmp(strlevel, "dbg") || 0 == strcasecmp(strlevel, "debug")){ + set_s3fs_log_level(S3FS_LOG_DBG); + }else{ + S3FS_PRN_EXIT("option dbglevel has unknown parameter(%s).", strlevel); + return -1; + } + return 0; + } + // // debug option // - // The first -d (or --debug) enables s3fs debug - // the second -d option is passed to fuse to turn on its - // debug output + // debug_level is S3FS_LOG_INFO, after second -d is passed to fuse. + // if(0 == strcmp(arg, "-d") || 0 == strcmp(arg, "--debug")){ - if(!debug){ - debug = true; + if(!IS_S3FS_LOG_INFO() && !IS_S3FS_LOG_DBG()){ + set_s3fs_log_level(S3FS_LOG_INFO); + return 0; + } + if(0 == strcmp(arg, "--debug")){ + // fuse doesn't understand "--debug", but it understands -d. + // but we can't pass -d back to fuse. return 0; - }else{ - // fuse doesn't understand "--debug", but it - // understands -d, but we can't pass -d back - // to fuse, in this case just ignore the - // second --debug if is was provided. If we - // do not ignore this, fuse emits an error - if(strcmp(arg, "--debug") == 0){ - return 0; - } } } - // for deep debugging message + // "f2" is not used no more. + // (set S3FS_LOG_DBG) if(0 == strcmp(arg, "f2")){ - foreground2 = true; + set_s3fs_log_level(S3FS_LOG_DBG); return 0; } if(0 == strcmp(arg, "curldbg")){ @@ -4481,13 +4604,11 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar } if(0 == STR2NCMP(arg, "accessKeyId=")){ - fprintf(stderr, "%s: option accessKeyId is no longer supported\n", - program_name.c_str()); + S3FS_PRN_EXIT("option accessKeyId is no longer supported."); return -1; } if(0 == STR2NCMP(arg, "secretAccessKey=")){ - fprintf(stderr, "%s: option secretAccessKey is no longer supported\n", - program_name.c_str()); + S3FS_PRN_EXIT("option secretAccessKey is no longer supported."); return -1; } } @@ -4508,6 +4629,10 @@ int main(int argc, char* argv[]) {0, 0, 0, 0} }; + // init syslog(default CRIT) + openlog("s3fs", LOG_PID | LOG_ODELAY | LOG_NOWAIT, LOG_USER); + set_s3fs_log_level(debug_level); + // init xml2 xmlInitParser(); LIBXML_TEST_VERSION @@ -4548,8 +4673,11 @@ int main(int argc, char* argv[]) } } - // Load SSE-C Key from env - S3fsCurl::LoadEnvSseKeys(); + // Load SSE environment + if(!S3fsCurl::LoadEnvSse()){ + S3FS_PRN_EXIT("something wrong about SSE environment."); + exit(EXIT_FAILURE); + } // clear this structure memset(&s3fs_oper, 0, sizeof(s3fs_oper)); @@ -4562,25 +4690,35 @@ int main(int argc, char* argv[]) exit(EXIT_FAILURE); } + // [NOTE] + // exclusive option check here. + // + if(REDUCED_REDUNDANCY == S3fsCurl::GetStorageClass() && !S3fsCurl::IsSseDisable()){ + S3FS_PRN_EXIT("use_sse option could not be specified with storage class reduced_redundancy."); + exit(EXIT_FAILURE); + } + if(!S3fsCurl::FinalCheckSse()){ + S3FS_PRN_EXIT("something wrong about SSE options."); + exit(EXIT_FAILURE); + } + // The first plain argument is the bucket if(bucket.size() == 0){ - fprintf(stderr, "%s: missing BUCKET argument\n", program_name.c_str()); + S3FS_PRN_EXIT("missing BUCKET argument."); show_usage(); exit(EXIT_FAILURE); } // bucket names cannot contain upper case characters in virtual-hosted style if((!pathrequeststyle) && (lower(bucket) != bucket)){ - fprintf(stderr, "%s: BUCKET %s, name not compatible with virtual-hosted style\n", - program_name.c_str(), bucket.c_str()); + S3FS_PRN_EXIT("BUCKET %s, name not compatible with virtual-hosted style.", bucket.c_str()); exit(EXIT_FAILURE); } // check bucket name for illegal characters found = bucket.find_first_of("/:\\;!@#$%^&*?|+="); if(found != string::npos){ - fprintf(stderr, "%s: BUCKET %s -- bucket name contains an illegal character\n", - program_name.c_str(), bucket.c_str()); + S3FS_PRN_EXIT("BUCKET %s -- bucket name contains an illegal character.", bucket.c_str()); exit(EXIT_FAILURE); } @@ -4590,7 +4728,7 @@ int main(int argc, char* argv[]) // if the mountpoint option was ever supplied if(utility_mode == 0){ if(mountpoint.size() == 0){ - fprintf(stderr, "%s: missing MOUNTPOINT argument\n", program_name.c_str()); + S3FS_PRN_EXIT("missing MOUNTPOINT argument."); show_usage(); exit(EXIT_FAILURE); } @@ -4598,13 +4736,11 @@ int main(int argc, char* argv[]) // error checking of command line arguments for compatibility if(S3fsCurl::IsPublicBucket() && S3fsCurl::IsSetAccessKeyId()){ - fprintf(stderr, "%s: specifying both public_bucket and the access keys options is invalid\n", - program_name.c_str()); + S3FS_PRN_EXIT("specifying both public_bucket and the access keys options is invalid."); exit(EXIT_FAILURE); } if(passwd_file.size() > 0 && S3fsCurl::IsSetAccessKeyId()){ - fprintf(stderr, "%s: specifying both passwd_file and the access keys options is invalid\n", - program_name.c_str()); + S3FS_PRN_EXIT("specifying both passwd_file and the access keys options is invalid."); exit(EXIT_FAILURE); } if(!S3fsCurl::IsPublicBucket()){ @@ -4612,14 +4748,19 @@ int main(int argc, char* argv[]) exit(EXIT_FAILURE); } if(!S3fsCurl::IsSetAccessKeyId()){ - fprintf(stderr, "%s: could not establish security credentials, check documentation\n", - program_name.c_str()); + S3FS_PRN_EXIT("could not establish security credentials, check documentation."); exit(EXIT_FAILURE); } // More error checking on the access key pair can be done // like checking for appropriate lengths and characters } + // check cache dir permission + if(!FdManager::CheckCacheTopDir() || !CacheFileStat::CheckCacheFileStatTopDir()){ + S3FS_PRN_EXIT("could not allow cache directory permission, check permission of cache directories."); + exit(EXIT_FAILURE); + } + // There's room for more command line error checking // Check to see if the bucket name contains periods and https (SSL) is @@ -4639,8 +4780,7 @@ int main(int argc, char* argv[]) if(found != string::npos){ found = host.find("https:"); if(found != string::npos){ - fprintf(stderr, "%s: Using https and a bucket name with periods is unsupported.\n", - program_name.c_str()); + S3FS_PRN_EXIT("Using https and a bucket name with periods is unsupported."); exit(1); } } @@ -4651,6 +4791,13 @@ int main(int argc, char* argv[]) exit(s3fs_utility_mode()); } + // check free disk space + FdManager::InitEnsureFreeDiskSpace(); + if(!FdManager::IsSafeDiskSpace(NULL, S3fsCurl::GetMultipartSize())){ + S3FS_PRN_EXIT("There is no enough disk space for used as cache(or temporary) directory by s3fs."); + exit(EXIT_FAILURE); + } + s3fs_oper.getattr = s3fs_getattr; s3fs_oper.readlink = s3fs_readlink; s3fs_oper.mknod = s3fs_mknod; @@ -4690,7 +4837,13 @@ int main(int argc, char* argv[]) s3fs_oper.removexattr = s3fs_removexattr; if(!s3fs_init_global_ssl()){ - fprintf(stderr, "%s: could not initialize for ssl libraries.\n", program_name.c_str()); + S3FS_PRN_EXIT("could not initialize for ssl libraries."); + exit(EXIT_FAILURE); + } + + // set signal handler for debugging + if(!set_s3fs_usr2_handler()){ + S3FS_PRN_EXIT("could not set signal handler for SIGUSR2."); exit(EXIT_FAILURE); } diff --git a/src/s3fs.h b/src/s3fs.h index 9ff40e5..688f2ab 100644 --- a/src/s3fs.h +++ b/src/s3fs.h @@ -84,8 +84,6 @@ #endif // HAVE_MALLOC_TRIM -char* get_object_sseckey_md5(const char* path); - #endif // S3FS_S3_H_ /* diff --git a/src/s3fs_auth.h b/src/s3fs_auth.h index 99b8118..98fc763 100644 --- a/src/s3fs_auth.h +++ b/src/s3fs_auth.h @@ -20,14 +20,15 @@ #ifndef S3FS_AUTH_H_ #define S3FS_AUTH_H_ +#include +#include + //------------------------------------------------------------------- // Utility functions for Authentication //------------------------------------------------------------------- // // in common_auth.cpp // -char* s3fs_base64(const unsigned char* input, size_t length); -unsigned char* s3fs_decode64(const char* input, size_t* plength); std::string s3fs_get_content_md5(int fd); std::string s3fs_md5sum(int fd, off_t start, ssize_t size); std::string s3fs_sha256sum(int fd, off_t start, ssize_t size); diff --git a/src/s3fs_util.cpp b/src/s3fs_util.cpp index b81256a..a95f312 100644 --- a/src/s3fs_util.cpp +++ b/src/s3fs_util.cpp @@ -233,7 +233,7 @@ bool S3ObjList::GetLastName(std::string& lastname) const { bool result = false; lastname = ""; - for(s3obj_t::const_iterator iter = objects.begin(); iter != objects.end(); iter++){ + for(s3obj_t::const_iterator iter = objects.begin(); iter != objects.end(); ++iter){ if((*iter).second.orgname.length()){ if(0 > strcmp(lastname.c_str(), (*iter).second.orgname.c_str())){ lastname = (*iter).second.orgname; @@ -253,7 +253,7 @@ bool S3ObjList::GetNameList(s3obj_list_t& list, bool OnlyNormalized, bool CutSla { s3obj_t::const_iterator iter; - for(iter = objects.begin(); objects.end() != iter; iter++){ + for(iter = objects.begin(); objects.end() != iter; ++iter){ if(OnlyNormalized && 0 != (*iter).second.normalname.length()){ continue; } @@ -275,7 +275,7 @@ bool S3ObjList::MakeHierarchizedList(s3obj_list_t& list, bool haveSlash) s3obj_h_t::iterator hiter; s3obj_list_t::const_iterator liter; - for(liter = list.begin(); list.end() != liter; liter++){ + for(liter = list.begin(); list.end() != liter; ++liter){ string strtmp = (*liter); if(1 < strtmp.length() && '/' == strtmp[strtmp.length() - 1]){ strtmp = strtmp.substr(0, strtmp.length() - 1); @@ -425,51 +425,14 @@ void free_mvnodes(MVNODE *head) //------------------------------------------------------------------- // Class AutoLock //------------------------------------------------------------------- -AutoLock::AutoLock(pthread_mutex_t* pmutex) : auto_mutex(pmutex), is_locked(false) +AutoLock::AutoLock(pthread_mutex_t* pmutex) : auto_mutex(pmutex) { - Lock(); + pthread_mutex_lock(auto_mutex); } AutoLock::~AutoLock() { - Unlock(); -} - -bool AutoLock::Lock(void) -{ - if(!auto_mutex){ - return false; - } - if(is_locked){ - // already locked - return true; - } - try{ - pthread_mutex_lock(auto_mutex); - is_locked = true; - }catch(exception& e){ - is_locked = false; - return false; - } - return true; -} - -bool AutoLock::Unlock(void) -{ - if(!auto_mutex){ - return false; - } - if(!is_locked){ - // already unlocked - return true; - } - try{ - pthread_mutex_unlock(auto_mutex); - is_locked = false; - }catch(exception& e){ - return false; - } - return true; + pthread_mutex_unlock(auto_mutex); } //------------------------------------------------------------------- @@ -479,7 +442,6 @@ bool AutoLock::Unlock(void) string get_username(uid_t uid) { static size_t maxlen = 0; // set onece - int result; char* pbuf; struct passwd pwinfo; struct passwd* ppwinfo = NULL; @@ -488,19 +450,19 @@ string get_username(uid_t uid) if(0 == maxlen){ long res = sysconf(_SC_GETPW_R_SIZE_MAX); if(0 > res){ - DPRNNN("could not get max pw length."); + S3FS_PRN_WARN("could not get max pw length."); maxlen = 0; return string(""); } maxlen = res; } if(NULL == (pbuf = (char*)malloc(sizeof(char) * maxlen))){ - DPRNCRIT("failed to allocate memory."); + S3FS_PRN_CRIT("failed to allocate memory."); return string(""); } // get group information - if(0 != (result = getpwuid_r(uid, &pwinfo, pbuf, maxlen, &ppwinfo))){ - DPRNNN("could not get pw information."); + if(0 != getpwuid_r(uid, &pwinfo, pbuf, maxlen, &ppwinfo)){ + S3FS_PRN_WARN("could not get pw information."); free(pbuf); return string(""); } @@ -526,19 +488,19 @@ int is_uid_inculde_group(uid_t uid, gid_t gid) if(0 == maxlen){ long res = sysconf(_SC_GETGR_R_SIZE_MAX); if(0 > res){ - DPRNNN("could not get max name length."); + S3FS_PRN_ERR("could not get max name length."); maxlen = 0; return -ERANGE; } maxlen = res; } if(NULL == (pbuf = (char*)malloc(sizeof(char) * maxlen))){ - DPRNCRIT("failed to allocate memory."); + S3FS_PRN_CRIT("failed to allocate memory."); return -ENOMEM; } // get group information if(0 != (result = getgrgid_r(gid, &ginfo, pbuf, maxlen, &pginfo))){ - DPRNNN("could not get group information."); + S3FS_PRN_ERR("could not get group information."); free(pbuf); return -result; } @@ -584,23 +546,80 @@ string mybasename(string path) // mkdir --parents int mkdirp(const string& path, mode_t mode) { - string base; - string component; + string base; + string component; stringstream ss(path); while (getline(ss, component, '/')) { base += "/" + component; - mkdir(base.c_str(), mode); + + struct stat st; + if(0 == stat(base.c_str(), &st)){ + if(!S_ISDIR(st.st_mode)){ + return EPERM; + } + }else{ + if(0 != mkdir(base.c_str(), mode)){ + return errno; + } + } } return 0; } +bool check_exist_dir_permission(const char* dirpath) +{ + if(!dirpath || '\0' == dirpath[0]){ + return false; + } + + // exists + struct stat st; + if(0 != stat(dirpath, &st)){ + if(ENOENT == errno){ + // dir does not exitst + return true; + } + if(EACCES == errno){ + // could not access directory + return false; + } + // somthing error occured + return false; + } + + // check type + if(!S_ISDIR(st.st_mode)){ + // path is not directory + return false; + } + + // check permission + uid_t myuid = geteuid(); + if(myuid == st.st_uid){ + if(S_IRWXU != (st.st_mode & S_IRWXU)){ + return false; + } + }else{ + if(1 == is_uid_inculde_group(myuid, st.st_gid)){ + if(S_IRWXG != (st.st_mode & S_IRWXG)){ + return false; + } + }else{ + if(S_IRWXO != (st.st_mode & S_IRWXO)){ + return false; + } + } + } + return true; +} + bool delete_files_in_dir(const char* dir, bool is_remove_own) { DIR* dp; struct dirent* dent; if(NULL == (dp = opendir(dir))){ - DPRNINFO("could not open dir(%s) - errno(%d)", dir, errno); + S3FS_PRN_ERR("could not open dir(%s) - errno(%d)", dir, errno); return false; } @@ -613,20 +632,20 @@ bool delete_files_in_dir(const char* dir, bool is_remove_own) fullpath += dent->d_name; struct stat st; if(0 != lstat(fullpath.c_str(), &st)){ - DPRN("could not get stats of file(%s) - errno(%d)", fullpath.c_str(), errno); + S3FS_PRN_ERR("could not get stats of file(%s) - errno(%d)", fullpath.c_str(), errno); closedir(dp); return false; } if(S_ISDIR(st.st_mode)){ // dir -> Reentrant if(!delete_files_in_dir(fullpath.c_str(), true)){ - DPRNINFO("could not remove sub dir(%s) - errno(%d)", fullpath.c_str(), errno); + S3FS_PRN_ERR("could not remove sub dir(%s) - errno(%d)", fullpath.c_str(), errno); closedir(dp); return false; } }else{ if(0 != unlink(fullpath.c_str())){ - DPRN("could not remove file(%s) - errno(%d)", fullpath.c_str(), errno); + S3FS_PRN_ERR("could not remove file(%s) - errno(%d)", fullpath.c_str(), errno); closedir(dp); return false; } @@ -635,7 +654,7 @@ bool delete_files_in_dir(const char* dir, bool is_remove_own) closedir(dp); if(is_remove_own && 0 != rmdir(dir)){ - DPRN("could not remove dir(%s) - errno(%d)", dir, errno); + S3FS_PRN_ERR("could not remove dir(%s) - errno(%d)", dir, errno); return false; } return true; @@ -875,28 +894,54 @@ void show_help (void) " del_cache (delete local file cache)\n" " - delete local file cache when s3fs starts and exits.\n" "\n" - " use_rrs (default is disable)\n" - " - this option makes Amazon's Reduced Redundancy Storage enable.\n" + " storage_class (default=\"standard\")\n" + " - store object with specified storage class. Possible values:\n" + " standard, standard_ia, and reduced_redundancy.\n" "\n" " use_sse (default is disable)\n" - " - use Amazon's Server-Site Encryption or Server-Side Encryption\n" - " with Customer-Provided Encryption Keys.\n" - " this option can not be specified with use_rrs. specifying only \n" - " \"use_sse\" or \"use_sse=1\" enables Server-Side Encryption.\n" - " (use_sse=1 for old version)\n" - " specifying this option with file path which has some SSE-C\n" - " secret key enables Server-Side Encryption with Customer-Provided\n" - " Encryption Keys.(use_sse=file)\n" - " the file must be 600 permission. the file can have some lines,\n" - " each line is one SSE-C key. the first line in file is used as\n" - " Customer-Provided Encryption Keys for uploading and changing\n" - " headers etc.\n" - " if there are some keys after first line, those are used\n" - " downloading object which are encrypted by not first key.\n" - " so that, you can keep all SSE-C keys in file, that is SSE-C\n" - " key history.\n" - " if AWSSSECKEYS environment is set, you can set SSE-C key instead\n" + " - Specify three type Amazon's Server-Site Encryption: SSE-S3,\n" + " SSE-C or SSE-KMS. SSE-S3 uses Amazon S3-managed encryption\n" + " keys, SSE-C uses customer-provided encryption keys, and\n" + " SSE-KMS uses the master key which you manage in AWS KMS.\n" + " You can specify \"use_sse\" or \"use_sse=1\" enables SSE-S3\n" + " type(use_sse=1 is old type parameter).\n" + " Case of setting SSE-C, you can specify \"use_sse=custom\",\n" + " \"use_sse=custom:\" or\n" + " \"use_sse=\"(only \n" + " specified is old type parameter). You can use \"c\" for\n" + " short \"custom\".\n" + " The custom key file must be 600 permission. The file can\n" + " have some lines, each line is one SSE-C key. The first line\n" + " in file is used as Customer-Provided Encryption Keys for\n" + " uploading and changing headers etc. If there are some keys\n" + " after first line, those are used downloading object which\n" + " are encrypted by not first key. So that, you can keep all\n" + " SSE-C keys in file, that is SSE-C key history.\n" + " If you specify \"custom\"(\"c\") without file path, you\n" + " need to set custom key by load_sse_c option or AWSSSECKEYS\n" + " environment.(AWSSSECKEYS environment has some SSE-C keys\n" + " with \":\" separator.) This option is used to decide the\n" + " SSE type. So that if you do not want to encrypt a object\n" + " object at uploading, but you need to decrypt encrypted\n" + " object at downloaing, you can use load_sse_c option instead\n" " of this option.\n" + " For setting SSE-KMS, specify \"use_sse=kmsid\" or\n" + " \"use_sse=kmsid:\". You can use \"k\" for short \"kmsid\".\n" + " If you san specify SSE-KMS type with your in AWS\n" + " KMS, you can set it after \"kmsid:\"(or \"k:\"). If you\n" + " specify only \"kmsid\"(\"k\"), you need to set AWSSSEKMSID\n" + " environment which value is . You must be careful\n" + " about that you can not use the KMS id which is not same EC2\n" + " region.\n" + "\n" + " load_sse_c - specify SSE-C keys\n" + " Specify the custom-provided encription keys file path for decrypting\n" + " at duwnloading.\n" + " If you use the custom-provided encription key at uploading, you\n" + " specify with \"use_sse=custom\". The file has many lines, one line\n" + " means one custom key. So that you can keep all SSE-C keys in file,\n" + " that is SSE-C key history. AWSSSECKEYS environment is as same as this\n" + " file contents.\n" "\n" " public_bucket (default=\"\" which means disabled)\n" " - anonymously mount a public bucket when set to 1\n" @@ -925,10 +970,10 @@ void show_help (void) " If you specify this option for set \"Content-Encoding\" HTTP \n" " header, please take care for RFC 2616.\n" "\n" - " connect_timeout (default=\"10\" seconds)\n" + " connect_timeout (default=\"300\" seconds)\n" " - time to wait for connection before giving up\n" "\n" - " readwrite_timeout (default=\"30\" seconds)\n" + " readwrite_timeout (default=\"60\" seconds)\n" " - time to wait between read/write activity before giving up\n" "\n" " max_stat_cache_size (default=\"1000\" entries (about 4MB))\n" @@ -948,7 +993,8 @@ void show_help (void) " in stat cache that the object(file or directory) does not exist.\n" "\n" " no_check_certificate\n" - " - server certificate won't be checked against the available certificate authorities.\n" + " - server certificate won't be checked against the available \n" + " certificate authorities.\n" "\n" " nodnscache (disable dns cache)\n" " - s3fs is always using dns cache, this option make dns cache disable.\n" @@ -971,13 +1017,15 @@ void show_help (void) " multipart_size (default=\"10\")\n" " - part size, in MB, for each multipart request.\n" "\n" - " fd_page_size (default=\"52428800\"(50MB))\n" - " - number of internal management page size for each file descriptor.\n" - " For delayed reading and writing by s3fs, s3fs manages pages which \n" - " is separated from object. Each pages has a status that data is \n" - " already loaded(or not loaded yet).\n" - " This option should not be changed when you don't have a trouble \n" - " with performance.\n" + " ensure_diskfree (default same multipart_size value)\n" + " - sets MB to ensure disk free space. s3fs makes file for\n" + " downloading, uploading and caching files. If the disk free\n" + " space is smaller than this value, s3fs do not use diskspace\n" + " as possible in exchange for the performance.\n" + "\n" + " singlepart_copy_limit (default=\"5120\")\n" + " - maximum size, in MB, of a single-part copy before trying \n" + " multipart copy.\n" "\n" " url (default=\"http://s3.amazonaws.com\")\n" " - sets the url to use to access amazon s3\n" @@ -1006,7 +1054,7 @@ void show_help (void) " nomultipart (disable multipart uploads)\n" "\n" " enable_content_md5 (default is disable)\n" - " - verifying uploaded object without multipart by content-md5 header.\n" + " - ensure data integrity during writes with MD5 hash.\n" "\n" " iam_role (default is no role)\n" " - set the IAM Role that will supply the credentials from the \n" @@ -1040,6 +1088,16 @@ void show_help (void) " the virtual-host request style, by using the older path request\n" " style.\n" "\n" + " dbglevel (default=\"crit\")\n" + " Set the debug message level. set value as crit(critical), err\n" + " (error), warn(warning), info(information) to debug level.\n" + " default debug level is critical. If s3fs run with \"-d\" option,\n" + " the debug level is set information. When s3fs catch the signal\n" + " SIGUSR2, the debug level is bumpup.\n" + "\n" + " curldbg - put curl debug message\n" + " Put the debug message from libcurl when this option is specified.\n" + "\n" "FUSE/mount Options:\n" "\n" " Most of the generic mount options described in 'man mount' are\n" @@ -1062,8 +1120,7 @@ void show_help (void) " disable multi-threaded operation\n" "\n" "\n" - "Report bugs to \n" - "s3fs home page: \n" + "s3fs home page: \n" ); return; } @@ -1071,12 +1128,12 @@ void show_help (void) void show_version(void) { printf( - "Amazon Simple Storage Service File System V%s with %s\n" + "Amazon Simple Storage Service File System V%s(commit:%s) with %s\n" "Copyright (C) 2010 Randy Rizun \n" "License GPL2: GNU GPL version 2 \n" "This is free software: you are free to change and redistribute it.\n" "There is NO WARRANTY, to the extent permitted by law.\n", - VERSION, s3fs_crypt_lib_name()); + VERSION, COMMIT_HASH_VAL, s3fs_crypt_lib_name()); return; } diff --git a/src/s3fs_util.h b/src/s3fs_util.h index bb1de05..d4d8ac5 100644 --- a/src/s3fs_util.h +++ b/src/s3fs_util.h @@ -88,14 +88,10 @@ class AutoLock { private: pthread_mutex_t* auto_mutex; - bool is_locked; public: - AutoLock(pthread_mutex_t* pmutex = NULL); + explicit AutoLock(pthread_mutex_t* pmutex); ~AutoLock(); - - bool Lock(void); - bool Unlock(void); }; //------------------------------------------------------------------- @@ -113,6 +109,7 @@ int is_uid_inculde_group(uid_t uid, gid_t gid); std::string mydirname(std::string path); std::string mybasename(std::string path); int mkdirp(const std::string& path, mode_t mode); +bool check_exist_dir_permission(const char* dirpath); bool delete_files_in_dir(const char* dir, bool is_remove_own); time_t get_mtime(const char *s); diff --git a/src/string_util.cpp b/src/string_util.cpp index 6ce5238..4a6d71c 100644 --- a/src/string_util.cpp +++ b/src/string_util.cpp @@ -17,6 +17,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ +#include #include #include #include @@ -85,13 +86,6 @@ string lower(string s) return s; } -string IntToStr(int n) -{ - stringstream result; - result << n; - return result.str(); -} - string trim_left(const string &s, const string &t /* = SPACES */) { string d(s); @@ -275,6 +269,104 @@ string get_date_iso8601(time_t tm) return buf; } +std::string s3fs_hex(const unsigned char* input, size_t length) +{ + std::string hex; + for(size_t pos = 0; pos < length; ++pos){ + char hexbuf[3]; + snprintf(hexbuf, 3, "%02x", input[pos]); + hex += hexbuf; + } + return hex; +} + +char* s3fs_base64(const unsigned char* input, size_t length) +{ + static const char* base = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/="; + char* result; + + if(!input || 0 >= length){ + return NULL; + } + if(NULL == (result = (char*)malloc((((length / 3) + 1) * 4 + 1) * sizeof(char)))){ + return NULL; // ENOMEM + } + + unsigned char parts[4]; + size_t rpos; + size_t wpos; + for(rpos = 0, wpos = 0; rpos < length; rpos += 3){ + parts[0] = (input[rpos] & 0xfc) >> 2; + parts[1] = ((input[rpos] & 0x03) << 4) | ((((rpos + 1) < length ? input[rpos + 1] : 0x00) & 0xf0) >> 4); + parts[2] = (rpos + 1) < length ? (((input[rpos + 1] & 0x0f) << 2) | ((((rpos + 2) < length ? input[rpos + 2] : 0x00) & 0xc0) >> 6)) : 0x40; + parts[3] = (rpos + 2) < length ? (input[rpos + 2] & 0x3f) : 0x40; + + result[wpos++] = base[parts[0]]; + result[wpos++] = base[parts[1]]; + result[wpos++] = base[parts[2]]; + result[wpos++] = base[parts[3]]; + } + result[wpos] = '\0'; + + return result; +} + +inline unsigned char char_decode64(const char ch) +{ + unsigned char by; + if('A' <= ch && ch <= 'Z'){ // A - Z + by = static_cast(ch - 'A'); + }else if('a' <= ch && ch <= 'z'){ // a - z + by = static_cast(ch - 'a' + 26); + }else if('0' <= ch && ch <= '9'){ // 0 - 9 + by = static_cast(ch - '0' + 52); + }else if('+' == ch){ // + + by = 62; + }else if('/' == ch){ // / + by = 63; + }else if('=' == ch){ // = + by = 64; + }else{ // something wrong + by = UCHAR_MAX; + } + return by; +} + +unsigned char* s3fs_decode64(const char* input, size_t* plength) +{ + unsigned char* result; + if(!input || 0 == strlen(input) || !plength){ + return NULL; + } + if(NULL == (result = (unsigned char*)malloc((strlen(input) + 1)))){ + return NULL; // ENOMEM + } + + unsigned char parts[4]; + size_t input_len = strlen(input); + size_t rpos; + size_t wpos; + for(rpos = 0, wpos = 0; rpos < input_len; rpos += 4){ + parts[0] = char_decode64(input[rpos]); + parts[1] = (rpos + 1) < input_len ? char_decode64(input[rpos + 1]) : 64; + parts[2] = (rpos + 2) < input_len ? char_decode64(input[rpos + 2]) : 64; + parts[3] = (rpos + 3) < input_len ? char_decode64(input[rpos + 3]) : 64; + + result[wpos++] = ((parts[0] << 2) & 0xfc) | ((parts[1] >> 4) & 0x03); + if(64 == parts[2]){ + break; + } + result[wpos++] = ((parts[1] << 4) & 0xf0) | ((parts[2] >> 2) & 0x0f); + if(64 == parts[3]){ + break; + } + result[wpos++] = ((parts[2] << 6) & 0xc0) | (parts[3] & 0x3f); + } + result[wpos] = '\0'; + *plength = wpos; + return result; +} + /* * Local variables: * tab-width: 4 diff --git a/src/string_util.h b/src/string_util.h index 003b48f..29ff04f 100644 --- a/src/string_util.h +++ b/src/string_util.h @@ -45,7 +45,6 @@ std::string trim_left(const std::string &s, const std::string &t = SPACES); std::string trim_right(const std::string &s, const std::string &t = SPACES); std::string trim(const std::string &s, const std::string &t = SPACES); std::string lower(std::string s); -std::string IntToStr(int); std::string get_date_rfc850(void); void get_date_sigv3(std::string& date, std::string& date8601); std::string get_date_string(time_t tm); @@ -56,6 +55,10 @@ std::string urlDecode(const std::string& s); bool takeout_str_dquart(std::string& str); bool get_keyword_value(std::string& target, const char* keyword, std::string& value); +std::string s3fs_hex(const unsigned char* input, size_t length); +char* s3fs_base64(const unsigned char* input, size_t length); +unsigned char* s3fs_decode64(const char* input, size_t* plength); + #endif // S3FS_STRING_UTIL_H_ /* diff --git a/src/test_string_util.cpp b/src/test_string_util.cpp index 9aab04e..2cc372e 100644 --- a/src/test_string_util.cpp +++ b/src/test_string_util.cpp @@ -18,12 +18,14 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ +#include +#include #include #include "string_util.h" #include "test_util.h" -int main(int argc, char *argv[]) +void test_trim() { ASSERT_EQUALS(std::string("1234"), trim(" 1234 ")); ASSERT_EQUALS(std::string("1234"), trim("1234 ")); @@ -40,5 +42,42 @@ int main(int argc, char *argv[]) ASSERT_EQUALS(std::string(" 1234"), trim_right(" 1234")); ASSERT_EQUALS(std::string("1234"), trim_right("1234")); + ASSERT_EQUALS(std::string("0"), str(0)); + ASSERT_EQUALS(std::string("1"), str(1)); + ASSERT_EQUALS(std::string("-1"), str(-1)); + ASSERT_EQUALS(std::string("9223372036854775807"), str(std::numeric_limits::max())); + ASSERT_EQUALS(std::string("-9223372036854775808"), str(std::numeric_limits::min())); + ASSERT_EQUALS(std::string("0"), str(std::numeric_limits::min())); + ASSERT_EQUALS(std::string("18446744073709551615"), str(std::numeric_limits::max())); +} + +void test_base64() +{ + size_t len; + ASSERT_STREQUALS(s3fs_base64(NULL, 0), NULL); + ASSERT_STREQUALS(reinterpret_cast(s3fs_decode64(NULL, &len)), NULL); + ASSERT_STREQUALS(s3fs_base64(reinterpret_cast(""), 0), NULL); + ASSERT_STREQUALS(reinterpret_cast(s3fs_decode64("", &len)), NULL); + + ASSERT_STREQUALS(s3fs_base64(reinterpret_cast("1"), 1), "MQ=="); + ASSERT_STREQUALS(reinterpret_cast(s3fs_decode64("MQ==", &len)), "1"); + ASSERT_EQUALS(len, static_cast(1)); + ASSERT_STREQUALS(s3fs_base64(reinterpret_cast("12"), 2), "MTI="); + ASSERT_STREQUALS(reinterpret_cast(s3fs_decode64("MTI=", &len)), "12"); + ASSERT_EQUALS(len, static_cast(2)); + ASSERT_STREQUALS(s3fs_base64(reinterpret_cast("123"), 3), "MTIz"); + ASSERT_STREQUALS(reinterpret_cast(s3fs_decode64("MTIz", &len)), "123"); + ASSERT_EQUALS(len, static_cast(3)); + ASSERT_STREQUALS(s3fs_base64(reinterpret_cast("1234"), 4), "MTIzNA=="); + ASSERT_STREQUALS(reinterpret_cast(s3fs_decode64("MTIzNA==", &len)), "1234"); + ASSERT_EQUALS(len, static_cast(4)); + + // TODO: invalid input +} + +int main(int argc, char *argv[]) +{ + test_trim(); + test_base64(); return 0; } diff --git a/src/test_util.h b/src/test_util.h index 2422bd0..7180375 100644 --- a/src/test_util.h +++ b/src/test_util.h @@ -29,6 +29,18 @@ template void assert_equals(const T &x, const T &y, const char *fil } } +void assert_strequals(const char *x, const char *y, const char *file, int line) +{ + if(x == NULL && y == NULL){ + return; + } else if((x == NULL || y == NULL) || strcmp(x, y) != 0){ + std::cerr << x << " != " << y << " at " << file << ":" << line << std::endl; + std::exit(1); + } +} + #define ASSERT_EQUALS(x, y) \ assert_equals((x), (y), __FILE__, __LINE__) +#define ASSERT_STREQUALS(x, y) \ + assert_strequals((x), (y), __FILE__, __LINE__) diff --git a/test/Makefile.am b/test/Makefile.am index 61e7460..84b13fa 100644 --- a/test/Makefile.am +++ b/test/Makefile.am @@ -28,8 +28,3 @@ EXTRA_DIST = \ sample_ahbe.conf testdir = test - -test_PROGRAMS=rename_before_close - -rename_before_close_SOURCES = rename_before_close.c - diff --git a/test/integration-test-common.sh b/test/integration-test-common.sh index 510208d..e805957 100644 --- a/test/integration-test-common.sh +++ b/test/integration-test-common.sh @@ -1,10 +1,10 @@ #!/bin/bash -e - +set -x S3FS=../src/s3fs -S3FS_CREDENTIALS_FILE="passwd-s3fs" +: ${S3FS_CREDENTIALS_FILE:="passwd-s3fs"} -TEST_BUCKET_1="s3fs-integration-test" +: ${TEST_BUCKET_1:="s3fs-integration-test"} TEST_BUCKET_MOUNT_POINT_1=${TEST_BUCKET_1} if [ ! -f "$S3FS_CREDENTIALS_FILE" ] @@ -15,9 +15,9 @@ fi chmod 600 "$S3FS_CREDENTIALS_FILE" S3PROXY_VERSION="1.4.0" -S3PROXY_BINARY="s3proxy-${S3PROXY_VERSION}" -if [ ! -e "${S3PROXY_BINARY}" ]; then +S3PROXY_BINARY=${S3PROXY_BINARY-"s3proxy-${S3PROXY_VERSION}"} +if [ -n "${S3PROXY_BINARY}" ] && [ ! -e "${S3PROXY_BINARY}" ]; then wget "https://github.com/andrewgaul/s3proxy/releases/download/s3proxy-${S3PROXY_VERSION}/s3proxy" \ - -O "${S3PROXY_BINARY}" + --quiet -O "${S3PROXY_BINARY}" chmod +x "${S3PROXY_BINARY}" fi diff --git a/test/integration-test-main.sh b/test/integration-test-main.sh index d63c846..f03e22c 100755 --- a/test/integration-test-main.sh +++ b/test/integration-test-main.sh @@ -61,6 +61,345 @@ function rm_test_dir { fi } +function test_append_file { + echo "Testing append to file ..." + # Write a small test file + for x in `seq 1 $TEST_TEXT_FILE_LENGTH` + do + echo "echo ${TEST_TEXT} to ${TEST_TEXT_FILE}" + done > ${TEST_TEXT_FILE} + + # Verify contents of file + echo "Verifying length of test file" + FILE_LENGTH=`wc -l $TEST_TEXT_FILE | awk '{print $1}'` + if [ "$FILE_LENGTH" -ne "$TEST_TEXT_FILE_LENGTH" ] + then + echo "error: expected $TEST_TEXT_FILE_LENGTH , got $FILE_LENGTH" + exit 1 + fi + + rm_test_file +} + +function test_truncate_file { + echo "Testing truncate file ..." + # Write a small test file + echo "${TEST_TEXT}" > ${TEST_TEXT_FILE} + + # Truncate file to 0 length. This should trigger open(path, O_RDWR | O_TRUNC...) + : > ${TEST_TEXT_FILE} + + # Verify file is zero length + if [ -s ${TEST_TEXT_FILE} ] + then + echo "error: expected ${TEST_TEXT_FILE} to be zero length" + exit 1 + fi + rm_test_file +} + + +function test_mv_file { + echo "Testing mv file function ..." + + # if the rename file exists, delete it + if [ -e $ALT_TEST_TEXT_FILE ] + then + rm $ALT_TEST_TEXT_FILE + fi + + if [ -e $ALT_TEST_TEXT_FILE ] + then + echo "Could not delete file ${ALT_TEST_TEXT_FILE}, it still exists" + exit 1 + fi + + # create the test file again + mk_test_file + + #rename the test file + mv $TEST_TEXT_FILE $ALT_TEST_TEXT_FILE + if [ ! -e $ALT_TEST_TEXT_FILE ] + then + echo "Could not move file" + exit 1 + fi + + # Check the contents of the alt file + ALT_TEXT_LENGTH=`echo $TEST_TEXT | wc -c | awk '{print $1}'` + ALT_FILE_LENGTH=`wc -c $ALT_TEST_TEXT_FILE | awk '{print $1}'` + if [ "$ALT_FILE_LENGTH" -ne "$ALT_TEXT_LENGTH" ] + then + echo "moved file length is not as expected expected: $ALT_TEXT_LENGTH got: $ALT_FILE_LENGTH" + exit 1 + fi + + # clean up + rm_test_file $ALT_TEST_TEXT_FILE +} + +function test_mv_directory { + echo "Testing mv directory function ..." + if [ -e $TEST_DIR ]; then + echo "Unexpected, this file/directory exists: ${TEST_DIR}" + exit 1 + fi + + mk_test_dir + + mv ${TEST_DIR} ${TEST_DIR}_rename + + if [ ! -d "${TEST_DIR}_rename" ]; then + echo "Directory ${TEST_DIR} was not renamed" + exit 1 + fi + + rmdir ${TEST_DIR}_rename + if [ -e "${TEST_DIR}_rename" ]; then + echo "Could not remove the test directory, it still exists: ${TEST_DIR}_rename" + exit 1 + fi +} + +function test_redirects { + echo "Testing redirects ..." + + mk_test_file ABCDEF + + CONTENT=`cat $TEST_TEXT_FILE` + + if [ ${CONTENT} != "ABCDEF" ]; then + echo "CONTENT read is unexpected, got ${CONTENT}, expected ABCDEF" + exit 1 + fi + + echo XYZ > $TEST_TEXT_FILE + + CONTENT=`cat $TEST_TEXT_FILE` + + if [ ${CONTENT} != "XYZ" ]; then + echo "CONTENT read is unexpected, got ${CONTENT}, expected XYZ" + exit 1 + fi + + echo 123456 >> $TEST_TEXT_FILE + + LINE1=`sed -n '1,1p' $TEST_TEXT_FILE` + LINE2=`sed -n '2,2p' $TEST_TEXT_FILE` + + if [ ${LINE1} != "XYZ" ]; then + echo "LINE1 was not as expected, got ${LINE1}, expected XYZ" + exit 1 + fi + + if [ ${LINE2} != "123456" ]; then + echo "LINE2 was not as expected, got ${LINE2}, expected 123456" + exit 1 + fi + + # clean up + rm_test_file +} + +function test_mkdir_rmdir { + echo "Testing creation/removal of a directory" + + if [ -e $TEST_DIR ]; then + echo "Unexpected, this file/directory exists: ${TEST_DIR}" + exit 1 + fi + + mk_test_dir + rm_test_dir +} + +function test_chmod { + echo "Testing chmod file function ..." + + # create the test file again + mk_test_file + + ORIGINAL_PERMISSIONS=$(stat --format=%a $TEST_TEXT_FILE) + + chmod 777 $TEST_TEXT_FILE; + + # if they're the same, we have a problem. + if [ $(stat --format=%a $TEST_TEXT_FILE) == $ORIGINAL_PERMISSIONS ] + then + echo "Could not modify $TEST_TEXT_FILE permissions" + exit 1 + fi + + # clean up + rm_test_file +} + +function test_chown { + echo "Testing chown file function ..." + + # create the test file again + mk_test_file + + ORIGINAL_PERMISSIONS=$(stat --format=%u:%g $TEST_TEXT_FILE) + + chown 1000:1000 $TEST_TEXT_FILE; + + # if they're the same, we have a problem. + if [ $(stat --format=%a $TEST_TEXT_FILE) == $ORIGINAL_PERMISSIONS ] + then + echo "Could not modify $TEST_TEXT_FILE ownership" + exit 1 + fi + + # clean up + rm_test_file +} + +function test_list { + echo "Testing list" + mk_test_file + mk_test_dir + + file_cnt=$(ls -1 | wc -l) + if [ $file_cnt != 2 ]; then + echo "Expected 2 file but got $file_cnt" + exit 1 + fi + + rm_test_file + rm_test_dir +} + +function test_remove_nonempty_directory { + echo "Testing removing a non-empty directory" + mk_test_dir + touch "${TEST_DIR}/file" + rmdir "${TEST_DIR}" 2>&1 | grep -q "Directory not empty" + rm "${TEST_DIR}/file" + rm_test_dir +} + +function test_rename_before_close { + echo "Testing rename before close ..." + ( + echo foo + mv $TEST_TEXT_FILE ${TEST_TEXT_FILE}.new + ) > $TEST_TEXT_FILE + + if ! cmp <(echo foo) ${TEST_TEXT_FILE}.new; then + echo "rename before close failed" + exit 1 + fi + + rm_test_file ${TEST_TEXT_FILE}.new + rm -f ${TEST_TEXT_FILE} +} + +function test_multipart_upload { + echo "Testing multi-part upload ..." + dd if=/dev/urandom of="/tmp/${BIG_FILE}" bs=$BIG_FILE_LENGTH count=1 + dd if="/tmp/${BIG_FILE}" of="${BIG_FILE}" bs=$BIG_FILE_LENGTH count=1 + + # Verify contents of file + echo "Comparing test file" + if ! cmp "/tmp/${BIG_FILE}" "${BIG_FILE}" + then + exit 1 + fi + + rm -f "/tmp/${BIG_FILE}" + rm_test_file "${BIG_FILE}" +} + +function test_multipart_copy { + echo "Testing multi-part copy ..." + dd if=/dev/urandom of="/tmp/${BIG_FILE}" bs=$BIG_FILE_LENGTH count=1 + dd if="/tmp/${BIG_FILE}" of="${BIG_FILE}" bs=$BIG_FILE_LENGTH count=1 + mv "${BIG_FILE}" "${BIG_FILE}-copy" + + # Verify contents of file + echo "Comparing test file" + if ! cmp "/tmp/${BIG_FILE}" "${BIG_FILE}-copy" + then + exit 1 + fi + + rm -f "/tmp/${BIG_FILE}" + rm_test_file "${BIG_FILE}-copy" +} + +function test_special_characters { + echo "Testing special characters ..." + + ls 'special' 2>&1 | grep -q 'No such file or directory' + ls 'special?' 2>&1 | grep -q 'No such file or directory' + ls 'special*' 2>&1 | grep -q 'No such file or directory' + ls 'special~' 2>&1 | grep -q 'No such file or directory' + ls 'specialµ' 2>&1 | grep -q 'No such file or directory' +} + +function test_symlink { + echo "Testing symlinks ..." + + rm -f $TEST_TEXT_FILE + rm -f $ALT_TEST_TEXT_FILE + echo foo > $TEST_TEXT_FILE + + ln -s $TEST_TEXT_FILE $ALT_TEST_TEXT_FILE + cmp $TEST_TEXT_FILE $ALT_TEST_TEXT_FILE + + rm -f $TEST_TEXT_FILE + + [ -L $ALT_TEST_TEXT_FILE ] + [ ! -f $ALT_TEST_TEXT_FILE ] +} + +function test_extended_attributes { + command -v setfattr >/dev/null 2>&1 || \ + { echo "Skipping extended attribute tests" ; return; } + + echo "Testing extended attributes ..." + + rm -f $TEST_TEXT_FILE + touch $TEST_TEXT_FILE + + # set value + setfattr -n key1 -v value1 $TEST_TEXT_FILE + getfattr -n key1 --only-values $TEST_TEXT_FILE | grep -q '^value1$' + + # append value + setfattr -n key2 -v value2 $TEST_TEXT_FILE + getfattr -n key1 --only-values $TEST_TEXT_FILE | grep -q '^value1$' + getfattr -n key2 --only-values $TEST_TEXT_FILE | grep -q '^value2$' + + # remove value + setfattr -x key1 $TEST_TEXT_FILE + ! getfattr -n key1 --only-values $TEST_TEXT_FILE + getfattr -n key2 --only-values $TEST_TEXT_FILE | grep -q '^value2$' +} + +function run_all_tests { + test_append_file + test_truncate_file + test_mv_file + test_mv_directory + test_redirects + test_mkdir_rmdir + test_chmod + test_chown + test_list + test_remove_nonempty_directory + # TODO: broken: https://github.com/s3fs-fuse/s3fs-fuse/issues/145 + #test_rename_before_close + test_multipart_upload + # TODO: test disabled until S3Proxy 1.5.0 is released + #test_multipart_copy + test_special_characters + test_symlink + test_extended_attributes +} + +# Mount the bucket CUR_DIR=`pwd` TEST_BUCKET_MOUNT_POINT_1=$1 if [ "$TEST_BUCKET_MOUNT_POINT_1" == "" ]; then @@ -74,270 +413,7 @@ then rm -f $TEST_TEXT_FILE fi -# Write a small test file -for x in `seq 1 $TEST_TEXT_FILE_LENGTH` -do - echo "echo ${TEST_TEXT} to ${TEST_TEXT_FILE}" - echo $TEST_TEXT >> $TEST_TEXT_FILE -done - -# Verify contents of file -echo "Verifying length of test file" -FILE_LENGTH=`wc -l $TEST_TEXT_FILE | awk '{print $1}'` -if [ "$FILE_LENGTH" -ne "$TEST_TEXT_FILE_LENGTH" ] -then - echo "error: expected $TEST_TEXT_FILE_LENGTH , got $FILE_LENGTH" - exit 1 -fi - -rm_test_file - -########################################################## -# Rename test (individual file) -########################################################## -echo "Testing mv file function ..." - -# if the rename file exists, delete it -if [ -e $ALT_TEST_TEXT_FILE ] -then - rm $ALT_TEST_TEXT_FILE -fi - -if [ -e $ALT_TEST_TEXT_FILE ] -then - echo "Could not delete file ${ALT_TEST_TEXT_FILE}, it still exists" - exit 1 -fi - -# create the test file again -mk_test_file - -#rename the test file -mv $TEST_TEXT_FILE $ALT_TEST_TEXT_FILE -if [ ! -e $ALT_TEST_TEXT_FILE ] -then - echo "Could not move file" - exit 1 -fi - -# Check the contents of the alt file -ALT_TEXT_LENGTH=`echo $TEST_TEXT | wc -c | awk '{print $1}'` -ALT_FILE_LENGTH=`wc -c $ALT_TEST_TEXT_FILE | awk '{print $1}'` -if [ "$ALT_FILE_LENGTH" -ne "$ALT_TEXT_LENGTH" ] -then - echo "moved file length is not as expected expected: $ALT_TEXT_LENGTH got: $ALT_FILE_LENGTH" - exit 1 -fi - -# clean up -rm_test_file $ALT_TEST_TEXT_FILE - -########################################################## -# Rename test (individual directory) -########################################################## -echo "Testing mv directory function ..." -if [ -e $TEST_DIR ]; then - echo "Unexpected, this file/directory exists: ${TEST_DIR}" - exit 1 -fi - -mk_test_dir - -mv ${TEST_DIR} ${TEST_DIR}_rename - -if [ ! -d "${TEST_DIR}_rename" ]; then - echo "Directory ${TEST_DIR} was not renamed" - exit 1 -fi - -rmdir ${TEST_DIR}_rename -if [ -e "${TEST_DIR}_rename" ]; then - echo "Could not remove the test directory, it still exists: ${TEST_DIR}_rename" - exit 1 -fi - -################################################################### -# test redirects > and >> -################################################################### -echo "Testing redirects ..." - -mk_test_file ABCDEF - -CONTENT=`cat $TEST_TEXT_FILE` - -if [ ${CONTENT} != "ABCDEF" ]; then - echo "CONTENT read is unexpected, got ${CONTENT}, expected ABCDEF" - exit 1 -fi - -echo XYZ > $TEST_TEXT_FILE - -CONTENT=`cat $TEST_TEXT_FILE` - -if [ ${CONTENT} != "XYZ" ]; then - echo "CONTENT read is unexpected, got ${CONTENT}, expected XYZ" - exit 1 -fi - -echo 123456 >> $TEST_TEXT_FILE - -LINE1=`sed -n '1,1p' $TEST_TEXT_FILE` -LINE2=`sed -n '2,2p' $TEST_TEXT_FILE` - -if [ ${LINE1} != "XYZ" ]; then - echo "LINE1 was not as expected, got ${LINE1}, expected XYZ" - exit 1 -fi - -if [ ${LINE2} != "123456" ]; then - echo "LINE2 was not as expected, got ${LINE2}, expected 123456" - exit 1 -fi - - -# clean up -rm_test_file - -##################################################################### -# Simple directory test mkdir/rmdir -##################################################################### -echo "Testing creation/removal of a directory" - -if [ -e $TEST_DIR ]; then - echo "Unexpected, this file/directory exists: ${TEST_DIR}" - exit 1 -fi - -mk_test_dir -rm_test_dir - -########################################################## -# File permissions test (individual file) -########################################################## -echo "Testing chmod file function ..." - -# create the test file again -mk_test_file - -ORIGINAL_PERMISSIONS=$(stat --format=%a $TEST_TEXT_FILE) - -chmod 777 $TEST_TEXT_FILE; - -# if they're the same, we have a problem. -if [ $(stat --format=%a $TEST_TEXT_FILE) == $ORIGINAL_PERMISSIONS ] -then - echo "Could not modify $TEST_TEXT_FILE permissions" - exit 1 -fi - -# clean up -rm_test_file - -########################################################## -# File permissions test (individual file) -########################################################## -echo "Testing chown file function ..." - -# create the test file again -mk_test_file - -ORIGINAL_PERMISSIONS=$(stat --format=%u:%g $TEST_TEXT_FILE) - -chown 1000:1000 $TEST_TEXT_FILE; - -# if they're the same, we have a problem. -if [ $(stat --format=%a $TEST_TEXT_FILE) == $ORIGINAL_PERMISSIONS ] -then - echo "Could not modify $TEST_TEXT_FILE ownership" - exit 1 -fi - -# clean up -rm_test_file - -########################################################## -# Testing list -########################################################## -echo "Testing list" -mk_test_file -mk_test_dir - -file_cnt=$(ls -1 | wc -l) -if [ $file_cnt != 2 ]; then - echo "Expected 2 file but got $file_cnt" - exit 1 -fi - -rm_test_file -rm_test_dir - -########################################################## -# Testing rename before close -########################################################## -if false; then -echo "Testing rename before close ..." -$CUR_DIR/rename_before_close $TEST_TEXT_FILE -if [ $? != 0 ]; then - echo "rename before close failed" - exit 1 -fi - -# clean up -rm_test_file -fi - -########################################################## -# Testing multi-part upload -########################################################## -echo "Testing multi-part upload ..." -dd if=/dev/urandom of="/tmp/${BIG_FILE}" bs=$BIG_FILE_LENGTH count=1 -dd if="/tmp/${BIG_FILE}" of="${BIG_FILE}" bs=$BIG_FILE_LENGTH count=1 - -# Verify contents of file -echo "Comparing test file" -if ! cmp "/tmp/${BIG_FILE}" "${BIG_FILE}" -then - exit 1 -fi - -rm -f "/tmp/${BIG_FILE}" -rm -f "${BIG_FILE}" - -########################################################## -# Testing special characters -########################################################## -echo "Testing special characters ..." - -ls 'special' 2>&1 | grep -q 'No such file or directory' -ls 'special?' 2>&1 | grep -q 'No such file or directory' -ls 'special*' 2>&1 | grep -q 'No such file or directory' -ls 'special~' 2>&1 | grep -q 'No such file or directory' -ls 'specialµ' 2>&1 | grep -q 'No such file or directory' - -########################################################## -# Testing extended attributes -########################################################## - -rm -f $TEST_TEXT_FILE -touch $TEST_TEXT_FILE - -# set value -setfattr -n key1 -v value1 $TEST_TEXT_FILE -getfattr -n key1 --only-values $TEST_TEXT_FILE | grep -q '^value1$' - -# append value -setfattr -n key2 -v value2 $TEST_TEXT_FILE -getfattr -n key1 --only-values $TEST_TEXT_FILE | grep -q '^value1$' -getfattr -n key2 --only-values $TEST_TEXT_FILE | grep -q '^value2$' - -# remove value -setfattr -x key1 $TEST_TEXT_FILE -! getfattr -n key1 --only-values $TEST_TEXT_FILE -getfattr -n key2 --only-values $TEST_TEXT_FILE | grep -q '^value2$' - -##################################################################### -# Tests are finished -##################################################################### +run_all_tests # Unmount the bucket cd $CUR_DIR diff --git a/test/rename_before_close.c b/test/rename_before_close.c deleted file mode 100644 index 7efceee..0000000 --- a/test/rename_before_close.c +++ /dev/null @@ -1,88 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include - -static const char FILE_CONTENT[] = "XXXX"; -#define PROG "rename_before_close" - -static char * -filename_to_mkstemp_template(const char *file) -{ - size_t len = strlen(file); - static const char suffix[] = ".XXXXXX"; - size_t new_len = len + sizeof(suffix); - char *ret_str = calloc(1, new_len); - int ret = snprintf(ret_str, new_len, "%s%s", file, suffix); - assert(ret == new_len - 1); - assert(ret_str[new_len] == '\0'); - return ret_str; -} - -static off_t -get_file_size(const char *file) -{ - struct stat ss; - printf(PROG ": stat(%s)\n", file); - int ret = lstat(file, &ss); - assert(ret == 0); - return ss.st_size; -} - -static void -test_rename_before_close(const char *file) -{ - char *template = filename_to_mkstemp_template(file); - printf(PROG ": mkstemp(%s)\n", template); - int fd = mkstemp(template); - assert(fd >= 0); - - sleep(1); - - printf(PROG ": write(%s)\n", template); - int ret = write(fd, FILE_CONTENT, sizeof(FILE_CONTENT)); - assert(ret == sizeof(FILE_CONTENT)); - - sleep(1); - - printf(PROG ": fsync(%s)\n", template); - ret = fsync(fd); - assert(ret == 0); - - sleep(1); - - assert(get_file_size(template) == sizeof(FILE_CONTENT)); - - sleep(1); - - printf(PROG ": rename(%s, %s)\n", template, file); - ret = rename(template, file); - assert(ret == 0); - - sleep(1); - - printf(PROG ": close(%s)\n", file); - ret = close(fd); - assert(ret == 0); - - sleep(1); - - assert(get_file_size(file) == sizeof(FILE_CONTENT)); -} - -int -main(int argc, char *argv[]) -{ - setvbuf(stdout, NULL, _IONBF, 0); - - if (argc < 2) { - printf("Usage: %s ", argv[0]); - return 1; - } - - test_rename_before_close(argv[1]); - return 0; -} diff --git a/test/small-integration-test.sh b/test/small-integration-test.sh index 188c566..c6116e3 100755 --- a/test/small-integration-test.sh +++ b/test/small-integration-test.sh @@ -1,8 +1,24 @@ #!/bin/bash +# +# By default tests run against a local s3proxy instance. To run against +# Amazon S3, specify the following variables: +# +# S3FS_CREDENTIALS_FILE=keyfile s3fs format key file +# TEST_BUCKET_1=bucket Name of bucket to use +# S3PROXY_BINARY="" Leave empty +# S3_URL="http://s3.amazonaws.com" Specify Amazon server +# +# Example: +# +# S3FS_CREDENTIALS_FILE=keyfile TEST_BUCKET_1=bucket S3PROXY_BINARY="" S3_URL="http://s3.amazonaws.com" ./small-integration-test.sh +# + set -o xtrace set -o errexit +: ${S3_URL:="http://127.0.0.1:8080"} + # Require root REQUIRE_ROOT=require-root.sh #source $REQUIRE_ROOT @@ -29,26 +45,32 @@ function retry { } function exit_handler { - kill $S3PROXY_PID + if [ -n "${S3PROXY_PID}" ] + then + kill $S3PROXY_PID + fi retry 30 fusermount -u $TEST_BUCKET_MOUNT_POINT_1 } trap exit_handler EXIT -stdbuf -oL -eL java -jar "$S3PROXY_BINARY" --properties s3proxy.conf | stdbuf -oL -eL sed -u "s/^/s3proxy: /" & +if [ -n "${S3PROXY_BINARY}" ] +then + stdbuf -oL -eL java -jar "$S3PROXY_BINARY" --properties s3proxy.conf | stdbuf -oL -eL sed -u "s/^/s3proxy: /" & -# wait for S3Proxy to start -for i in $(seq 30); -do - if exec 3<>"/dev/tcp/localhost/8080"; - then - exec 3<&- # Close for read - exec 3>&- # Close for write - break - fi - sleep 1 -done + # wait for S3Proxy to start + for i in $(seq 30); + do + if exec 3<>"/dev/tcp/127.0.0.1/8080"; + then + exec 3<&- # Close for read + exec 3>&- # Close for write + break + fi + sleep 1 + done -S3PROXY_PID=$(netstat -lpnt | grep :8080 | awk '{ print $7 }' | sed -u 's|/java||') + S3PROXY_PID=$(netstat -lpnt | grep :8080 | awk '{ print $7 }' | sed -u 's|/java||') +fi # Mount the bucket if [ ! -d $TEST_BUCKET_MOUNT_POINT_1 ] @@ -57,10 +79,13 @@ then fi stdbuf -oL -eL $S3FS $TEST_BUCKET_1 $TEST_BUCKET_MOUNT_POINT_1 \ -o createbucket \ + -o enable_content_md5 \ -o passwd_file=$S3FS_CREDENTIALS_FILE \ -o sigv2 \ - -o url=http://127.0.0.1:8080 \ - -o use_path_request_style -f -o f2 -d -d |& stdbuf -oL -eL sed -u "s/^/s3fs: /" & + -o singlepart_copy_limit=$((10 * 1024)) \ + -o url=${S3_URL} \ + -o use_path_request_style \ + -o dbglevel=info -f |& stdbuf -oL -eL sed -u "s/^/s3fs: /" & retry 30 grep $TEST_BUCKET_MOUNT_POINT_1 /proc/mounts || exit 1