2010-11-13 23:59:23 +00:00
/*
* s3fs - FUSE - based file system backed by Amazon S3
*
2017-05-07 11:24:17 +00:00
* Copyright ( C ) 2007 Randy Rizun < rrizun @ gmail . com >
2010-11-13 23:59:23 +00:00
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version 2
* of the License , or ( at your option ) any later version .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 51 Franklin Street , Fifth Floor , Boston , MA 02110 - 1301 , USA .
*/
2019-07-12 10:33:53 +00:00
# include <cstdio>
# include <cstdlib>
2010-11-13 23:59:23 +00:00
# include <unistd.h>
# include <dirent.h>
# include <pwd.h>
2020-08-22 12:40:53 +00:00
# include <sys/types.h>
2010-11-13 23:59:23 +00:00
# include <getopt.h>
# include <fstream>
2013-03-30 13:37:14 +00:00
# include "common.h"
2011-02-25 17:35:12 +00:00
# include "s3fs.h"
2020-08-22 12:40:53 +00:00
# include "metaheader.h"
# include "fdcache.h"
2020-09-13 07:49:25 +00:00
# include "fdcache_auto.h"
2011-03-01 19:35:55 +00:00
# include "curl.h"
2020-08-22 12:40:53 +00:00
# include "curl_multi.h"
# include "s3objlist.h"
2011-02-25 17:35:12 +00:00
# include "cache.h"
2020-08-22 12:40:53 +00:00
# include "mvnode.h"
2016-02-07 05:41:56 +00:00
# include "addhead.h"
2020-06-28 08:00:41 +00:00
# include "sighandlers.h"
2020-08-22 12:40:53 +00:00
# include "s3fs_xml.h"
# include "s3fs_util.h"
# include "string_util.h"
# include "s3fs_auth.h"
# include "s3fs_help.h"
# include "mpu_util.h"
2010-11-13 23:59:23 +00:00
2013-04-20 19:17:28 +00:00
//-------------------------------------------------------------------
2020-08-22 12:40:53 +00:00
// Symbols
2013-04-20 19:17:28 +00:00
//-------------------------------------------------------------------
2015-04-20 17:24:57 +00:00
# if !defined(ENOATTR)
2020-08-22 12:40:53 +00:00
# define ENOATTR ENODATA
2015-04-20 17:24:57 +00:00
# endif
2020-08-22 12:40:53 +00:00
enum dirtype {
DIRTYPE_UNKNOWN = - 1 ,
DIRTYPE_NEW = 0 ,
DIRTYPE_OLD = 1 ,
DIRTYPE_FOLDER = 2 ,
DIRTYPE_NOOBJ = 3 ,
2019-02-03 14:22:16 +00:00
} ;
2013-03-30 13:37:14 +00:00
//-------------------------------------------------------------------
2014-04-05 05:11:55 +00:00
// Static variables
2013-03-30 13:37:14 +00:00
//-------------------------------------------------------------------
Fixed Issue 229 and Changes codes
1) Set metadata "Content-Encoding" automatically(Issue 292)
For this issue, s3fs is added new option "ahbe_conf".
New option means the configuration file path, and this file specifies
additional HTTP header by file(object) extension.
Thus you can specify any HTTP header for each object by extension.
* ahbe_conf file format:
-----------
line = [file suffix] HTTP-header [HTTP-header-values]
file suffix = file(object) suffix, if this field is empty,
it means "*"(all object).
HTTP-header = additional HTTP header name
HTTP-header-values = additional HTTP header value
-----------
* Example:
-----------
.gz Content-Encoding gzip
.Z Content-Encoding compress
X-S3FS-MYHTTPHEAD myvalue
-----------
A sample configuration file is uploaded in "test" directory.
If ahbe_conf parameter is specified, s3fs loads it's configuration
and compares extension(suffix) of object(file) when uploading
(PUT/POST) it. If the extension is same, s3fs adds/sends specified
HTTP header and value.
A case of sample configuration file, if a object(it's extension is
".gz") which already has Content-Encoding HTTP header is renamed
to ".txt" extension, s3fs does not set Content-Encoding. Because
".txt" is not match any line in configuration file.
So, s3fs matches the extension by each PUT/POST action.
* Please take care about "Content-Encoding".
This new option allows setting ANY HTTP header by object extension.
For example, you can specify "Content-Encoding" for ".gz"/etc
extension in configuration. But this means that S3 always returns
"Content-Encoding: gzip" when a client requests with other
"Accept-Encoding:" header. It SHOULD NOT be good.
Please see RFC 2616.
2) Changes about allow_other/uid/gid option for mount point
I reviewed about mount point permission and allow_other/uid/gid
options, and found bugs about these.
s3fs is fixed bugs and changed to the following specifications.
* s3fs only allows uid(gid) options as 0(root), when the effective
user is zero(root).
* A mount point(directory) must have a permission to allow
accessing by effective user/group.
* If allow_other option is specified, the mount point permission
is set 0777(all users allow all access).
In another case, the mount point is set 0700(only allows
effective user).
* When uid/gid option is specified, the mount point owner/group
is set uid/gid option value.
If uid/gid is not set, it is set effective user/group id.
This changes maybe fixes some issue(321, 338).
3) Changes a logic about (Issue 229)
The chmod command returns -EIO when changing the mount point.
It is correct, s3fs can not changed owner/group/mtime for the
mount point, but s3fs sends a request for changing the bucket.
This revision does not send the request, and returns EIO as
soon as possible.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@465 df820570-a93a-0410-bd06-b72b767a4274
2013-08-16 19:24:01 +00:00
static uid_t mp_uid = 0 ; // owner of mount point(only not specified uid opt)
static gid_t mp_gid = 0 ; // group of mount point(only not specified gid opt)
static mode_t mp_mode = 0 ; // mode of mount point
2015-02-07 17:16:45 +00:00
static mode_t mp_umask = 0 ; // umask for mount point
static bool is_mp_umask = false ; // default does not set.
2013-03-30 13:37:14 +00:00
static std : : string mountpoint ;
2019-01-23 07:15:19 +00:00
static std : : string passwd_file ;
2020-03-19 15:13:21 +00:00
static std : : string mimetype_file ;
2013-03-30 13:37:14 +00:00
static bool nocopyapi = false ;
static bool norenameapi = false ;
2013-04-16 08:05:24 +00:00
static bool nonempty = false ;
2013-05-21 05:29:07 +00:00
static bool allow_other = false ;
2016-05-06 04:37:32 +00:00
static bool load_iamrole = false ;
Fixed Issue 229 and Changes codes
1) Set metadata "Content-Encoding" automatically(Issue 292)
For this issue, s3fs is added new option "ahbe_conf".
New option means the configuration file path, and this file specifies
additional HTTP header by file(object) extension.
Thus you can specify any HTTP header for each object by extension.
* ahbe_conf file format:
-----------
line = [file suffix] HTTP-header [HTTP-header-values]
file suffix = file(object) suffix, if this field is empty,
it means "*"(all object).
HTTP-header = additional HTTP header name
HTTP-header-values = additional HTTP header value
-----------
* Example:
-----------
.gz Content-Encoding gzip
.Z Content-Encoding compress
X-S3FS-MYHTTPHEAD myvalue
-----------
A sample configuration file is uploaded in "test" directory.
If ahbe_conf parameter is specified, s3fs loads it's configuration
and compares extension(suffix) of object(file) when uploading
(PUT/POST) it. If the extension is same, s3fs adds/sends specified
HTTP header and value.
A case of sample configuration file, if a object(it's extension is
".gz") which already has Content-Encoding HTTP header is renamed
to ".txt" extension, s3fs does not set Content-Encoding. Because
".txt" is not match any line in configuration file.
So, s3fs matches the extension by each PUT/POST action.
* Please take care about "Content-Encoding".
This new option allows setting ANY HTTP header by object extension.
For example, you can specify "Content-Encoding" for ".gz"/etc
extension in configuration. But this means that S3 always returns
"Content-Encoding: gzip" when a client requests with other
"Accept-Encoding:" header. It SHOULD NOT be good.
Please see RFC 2616.
2) Changes about allow_other/uid/gid option for mount point
I reviewed about mount point permission and allow_other/uid/gid
options, and found bugs about these.
s3fs is fixed bugs and changed to the following specifications.
* s3fs only allows uid(gid) options as 0(root), when the effective
user is zero(root).
* A mount point(directory) must have a permission to allow
accessing by effective user/group.
* If allow_other option is specified, the mount point permission
is set 0777(all users allow all access).
In another case, the mount point is set 0700(only allows
effective user).
* When uid/gid option is specified, the mount point owner/group
is set uid/gid option value.
If uid/gid is not set, it is set effective user/group id.
This changes maybe fixes some issue(321, 338).
3) Changes a logic about (Issue 229)
The chmod command returns -EIO when changing the mount point.
It is correct, s3fs can not changed owner/group/mtime for the
mount point, but s3fs sends a request for changing the bucket.
This revision does not send the request, and returns EIO as
soon as possible.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@465 df820570-a93a-0410-bd06-b72b767a4274
2013-08-16 19:24:01 +00:00
static uid_t s3fs_uid = 0 ;
static gid_t s3fs_gid = 0 ;
2013-06-18 01:17:32 +00:00
static mode_t s3fs_umask = 0 ;
Fixed Issue 229 and Changes codes
1) Set metadata "Content-Encoding" automatically(Issue 292)
For this issue, s3fs is added new option "ahbe_conf".
New option means the configuration file path, and this file specifies
additional HTTP header by file(object) extension.
Thus you can specify any HTTP header for each object by extension.
* ahbe_conf file format:
-----------
line = [file suffix] HTTP-header [HTTP-header-values]
file suffix = file(object) suffix, if this field is empty,
it means "*"(all object).
HTTP-header = additional HTTP header name
HTTP-header-values = additional HTTP header value
-----------
* Example:
-----------
.gz Content-Encoding gzip
.Z Content-Encoding compress
X-S3FS-MYHTTPHEAD myvalue
-----------
A sample configuration file is uploaded in "test" directory.
If ahbe_conf parameter is specified, s3fs loads it's configuration
and compares extension(suffix) of object(file) when uploading
(PUT/POST) it. If the extension is same, s3fs adds/sends specified
HTTP header and value.
A case of sample configuration file, if a object(it's extension is
".gz") which already has Content-Encoding HTTP header is renamed
to ".txt" extension, s3fs does not set Content-Encoding. Because
".txt" is not match any line in configuration file.
So, s3fs matches the extension by each PUT/POST action.
* Please take care about "Content-Encoding".
This new option allows setting ANY HTTP header by object extension.
For example, you can specify "Content-Encoding" for ".gz"/etc
extension in configuration. But this means that S3 always returns
"Content-Encoding: gzip" when a client requests with other
"Accept-Encoding:" header. It SHOULD NOT be good.
Please see RFC 2616.
2) Changes about allow_other/uid/gid option for mount point
I reviewed about mount point permission and allow_other/uid/gid
options, and found bugs about these.
s3fs is fixed bugs and changed to the following specifications.
* s3fs only allows uid(gid) options as 0(root), when the effective
user is zero(root).
* A mount point(directory) must have a permission to allow
accessing by effective user/group.
* If allow_other option is specified, the mount point permission
is set 0777(all users allow all access).
In another case, the mount point is set 0700(only allows
effective user).
* When uid/gid option is specified, the mount point owner/group
is set uid/gid option value.
If uid/gid is not set, it is set effective user/group id.
This changes maybe fixes some issue(321, 338).
3) Changes a logic about (Issue 229)
The chmod command returns -EIO when changing the mount point.
It is correct, s3fs can not changed owner/group/mtime for the
mount point, but s3fs sends a request for changing the bucket.
This revision does not send the request, and returns EIO as
soon as possible.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@465 df820570-a93a-0410-bd06-b72b767a4274
2013-08-16 19:24:01 +00:00
static bool is_s3fs_uid = false ; // default does not set.
static bool is_s3fs_gid = false ; // default does not set.
static bool is_s3fs_umask = false ; // default does not set.
Changes codes for performance(part 3)
* Summay
This revision includes big change about temporary file and local cache file.
By this big change, s3fs works with good performance when s3fs opens/
closes/syncs/reads object.
I made a big change about the handling about temporary file and local cache
file to do this implementation.
* Detail
1) About temporary file(local file)
s3fs uses a temporary file on local file system when s3fs does download/
upload/open/seek object on S3.
After this revision, s3fs calls ftruncate() function when s3fs makes the
temporary file.
In this way s3fs can set a file size of precisely length without downloading.
(Notice - ftruncate function is for XSI-compliant systems, so that possibly
you have a problem on non-XSI-compliant systems.)
By this change, s3fs can download a part of a object by requesting with
"Range" http header. It seems like downloading by each block unit.
The default block(part) size is 50MB, it is caused the result which is default
parallel requests count(5) by default multipart upload size(10MB).
If you need to change this block size, you can change by new option
"fd_page_size". This option can take from 1MB(1024 * 1024) to any bytes.
So that, you have to take care about that fdcache.cpp(and fdcache.h) were
changed a lot.
2) About local cache
Local cache files which are in directory specified by "use_cache" option do
not have always all of object data.
This cause is that s3fs uses ftruncate function and reads(writes) each block
unit of a temporary file.
s3fs manages each block unit's status which are "downloaded area" or "not".
For this status, s3fs makes new temporary file in cache directory which is
specified by "use_cache" option. This status files is in a directory which is
named "<use_cache sirectory>/.<bucket_name>/".
When s3fs opens this status file, s3fs locks this file for exclusive control by
calling flock function. You need to take care about this, the status files can
not be laid on network drive(like NFS).
This revision changes about file open mode, s3fs always opens a local cache
file and each status file with writable mode.
Last, this revision adds new option "del_cache", this option means that s3fs
deletes all local cache file when s3fs starts and exits.
3) Uploading
When s3fs writes data to file descriptor through FUSE request, old s3fs
revision downloads all of the object. But new revision does not download all,
it downloads only small percial area(some block units) including writing data
area.
And when s3fs closes or flushes the file descriptor, s3fs downloads other area
which is not downloaded from server. After that, s3fs uploads all of data.
Already r456 revision has parallel upload function, then this revision with
r456 and r457 are very big change for performance.
4) Downloading
By changing a temporary file and a local cache file, when s3fs downloads a
object, it downloads only the required range(some block units).
And s3fs downloads units by parallel GET request, it is same as a case of
uploading. (Maximum parallel request count and each download size are
specified same parameters for uploading.)
In the new revision, when s3fs opens file, s3fs returns file descriptor soon.
Because s3fs only opens(makes) the file descriptor with no downloading
data. And when s3fs reads a data, s3fs downloads only some block unit
including specified area.
This result is good for performance.
5) Changes option name
The option "parallel_upload" which added at r456 is changed to new option
name as "parallel_count". This reason is this option value is not only used by
uploading object, but a uploading object also uses this option. (For a while,
you can use old option name "parallel_upload" for compatibility.)
git-svn-id: http://s3fs.googlecode.com/svn/trunk@458 df820570-a93a-0410-bd06-b72b767a4274
2013-07-23 16:01:48 +00:00
static bool is_remove_cache = false ;
2017-11-05 19:24:02 +00:00
static bool is_ecs = false ;
2017-11-23 08:46:24 +00:00
static bool is_ibm_iam_auth = false ;
2016-09-19 04:28:01 +00:00
static bool is_use_xattr = false ;
2019-04-14 16:19:34 +00:00
static bool is_use_session_token = false ;
2015-02-24 13:17:59 +00:00
static bool create_bucket = false ;
2019-01-29 07:39:11 +00:00
static int64_t singlepart_copy_limit = 512 * 1024 * 1024 ;
2019-01-23 13:23:03 +00:00
static bool is_specified_endpoint = false ;
2016-01-11 08:39:17 +00:00
static int s3fs_init_deferred_exit_status = 0 ;
2017-05-05 17:28:29 +00:00
static bool support_compat_dir = true ; // default supports compatibility directory type
2018-07-08 03:49:10 +00:00
static int max_keys_list_object = 1000 ; // default is 1000
2019-02-19 10:32:37 +00:00
static bool use_wtf8 = false ;
2013-03-30 13:37:14 +00:00
2019-01-23 07:15:19 +00:00
static const std : : string allbucket_fields_type ; // special key for mapping(This name is absolutely not used as a bucket name)
2018-02-25 13:08:41 +00:00
static const std : : string keyval_fields_type = " \t " ; // special key for mapping(This name is absolutely not used as a bucket name)
2017-11-19 11:38:12 +00:00
static const std : : string aws_accesskeyid = " AWSAccessKeyId " ;
static const std : : string aws_secretkey = " AWSSecretKey " ;
2020-08-22 12:40:53 +00:00
//-------------------------------------------------------------------
// Global functions : prototype
//-------------------------------------------------------------------
2020-09-16 14:45:28 +00:00
int put_headers ( const char * path , headers_t & meta , bool is_copy , bool update_mtime = true ) ; // [NOTE] global function because this is called from FdEntity class
2020-08-22 12:40:53 +00:00
2013-03-30 13:37:14 +00:00
//-------------------------------------------------------------------
// Static functions : prototype
//-------------------------------------------------------------------
2013-07-05 02:28:31 +00:00
static bool is_special_name_folder_object ( const char * path ) ;
2020-09-11 09:37:24 +00:00
static int chk_dir_object_type ( const char * path , std : : string & newpath , std : : string & nowpath , std : : string & nowcache , headers_t * pmeta = NULL , dirtype * pDirType = NULL ) ;
static int remove_old_type_dir ( const std : : string & path , dirtype type ) ;
2016-03-13 05:43:28 +00:00
static int get_object_attribute ( const char * path , struct stat * pstbuf , headers_t * pmeta = NULL , bool overcheck = true , bool * pisforce = NULL , bool add_no_truncate_cache = false ) ;
2013-07-05 02:28:31 +00:00
static int check_object_access ( const char * path , int mask , struct stat * pstbuf ) ;
static int check_object_owner ( const char * path , struct stat * pstbuf ) ;
static int check_parent_object_access ( const char * path , int mask ) ;
2020-09-13 07:49:25 +00:00
static FdEntity * get_local_fent ( AutoFdEntity & autoent , const char * path , bool is_load = false ) ;
2013-07-05 02:28:31 +00:00
static bool multi_head_callback ( S3fsCurl * s3fscurl ) ;
static S3fsCurl * multi_head_retry_callback ( S3fsCurl * s3fscurl ) ;
2019-09-23 10:49:49 +00:00
static int readdir_multi_head ( const char * path , const S3ObjList & head , void * buf , fuse_fill_dir_t filler ) ;
2014-12-24 06:29:13 +00:00
static int list_bucket ( const char * path , S3ObjList & head , const char * delimiter , bool check_content_only = false ) ;
2013-07-05 02:28:31 +00:00
static int directory_empty ( const char * path ) ;
static int rename_large_object ( const char * from , const char * to ) ;
static int create_file_object ( const char * path , mode_t mode , uid_t uid , gid_t gid ) ;
static int create_directory_object ( const char * path , mode_t mode , time_t time , uid_t uid , gid_t gid ) ;
static int rename_object ( const char * from , const char * to ) ;
static int rename_object_nocopy ( const char * from , const char * to ) ;
static int clone_directory_object ( const char * from , const char * to ) ;
static int rename_directory ( const char * from , const char * to ) ;
static int remote_mountpath_exists ( const char * path ) ;
2015-06-06 16:39:39 +00:00
static void free_xattrs ( xattrs_t & xattrs ) ;
2020-09-11 09:37:24 +00:00
static bool parse_xattr_keyval ( const std : : string & xattrpair , std : : string & key , PXATTRVAL & pval ) ;
2015-04-20 17:24:57 +00:00
static size_t parse_xattrs ( const std : : string & strxattrs , xattrs_t & xattrs ) ;
static std : : string build_xattrs ( const xattrs_t & xattrs ) ;
2019-01-23 23:44:50 +00:00
static int s3fs_check_service ( ) ;
2017-11-19 11:38:12 +00:00
static int parse_passwd_file ( bucketkvmap_t & resmap ) ;
static int check_for_aws_format ( const kvmap_t & kvmap ) ;
2019-01-23 23:44:50 +00:00
static int check_passwd_file_perms ( ) ;
2018-11-04 19:41:49 +00:00
static int read_aws_credentials_file ( const std : : string & filename ) ;
2019-01-23 23:44:50 +00:00
static int read_passwd_file ( ) ;
static int get_access_keys ( ) ;
2019-09-05 17:42:14 +00:00
static bool set_mountpoint_attribute ( struct stat & mpst ) ;
2016-07-24 08:17:58 +00:00
static int set_bucket ( const char * arg ) ;
2013-07-05 02:28:31 +00:00
static int my_fuse_opt_proc ( void * data , const char * arg , int key , struct fuse_args * outargs ) ;
2013-03-30 13:37:14 +00:00
2020-08-22 12:40:53 +00:00
//-------------------------------------------------------------------
2013-03-30 13:37:14 +00:00
// fuse interface functions
2020-08-22 12:40:53 +00:00
//-------------------------------------------------------------------
2013-07-05 02:28:31 +00:00
static int s3fs_getattr ( const char * path , struct stat * stbuf ) ;
static int s3fs_readlink ( const char * path , char * buf , size_t size ) ;
2013-03-30 13:37:14 +00:00
static int s3fs_mknod ( const char * path , mode_t mode , dev_t rdev ) ;
2013-07-05 02:28:31 +00:00
static int s3fs_mkdir ( const char * path , mode_t mode ) ;
static int s3fs_unlink ( const char * path ) ;
static int s3fs_rmdir ( const char * path ) ;
static int s3fs_symlink ( const char * from , const char * to ) ;
static int s3fs_rename ( const char * from , const char * to ) ;
static int s3fs_link ( const char * from , const char * to ) ;
static int s3fs_chmod ( const char * path , mode_t mode ) ;
static int s3fs_chmod_nocopy ( const char * path , mode_t mode ) ;
static int s3fs_chown ( const char * path , uid_t uid , gid_t gid ) ;
static int s3fs_chown_nocopy ( const char * path , uid_t uid , gid_t gid ) ;
static int s3fs_utimens ( const char * path , const struct timespec ts [ 2 ] ) ;
static int s3fs_utimens_nocopy ( const char * path , const struct timespec ts [ 2 ] ) ;
static int s3fs_truncate ( const char * path , off_t size ) ;
static int s3fs_create ( const char * path , mode_t mode , struct fuse_file_info * fi ) ;
static int s3fs_open ( const char * path , struct fuse_file_info * fi ) ;
static int s3fs_read ( const char * path , char * buf , size_t size , off_t offset , struct fuse_file_info * fi ) ;
static int s3fs_write ( const char * path , const char * buf , size_t size , off_t offset , struct fuse_file_info * fi ) ;
static int s3fs_statfs ( const char * path , struct statvfs * stbuf ) ;
static int s3fs_flush ( const char * path , struct fuse_file_info * fi ) ;
2015-03-10 16:18:03 +00:00
static int s3fs_fsync ( const char * path , int datasync , struct fuse_file_info * fi ) ;
2013-07-05 02:28:31 +00:00
static int s3fs_release ( const char * path , struct fuse_file_info * fi ) ;
static int s3fs_opendir ( const char * path , struct fuse_file_info * fi ) ;
static int s3fs_readdir ( const char * path , void * buf , fuse_fill_dir_t filler , off_t offset , struct fuse_file_info * fi ) ;
static int s3fs_access ( const char * path , int mask ) ;
static void * s3fs_init ( struct fuse_conn_info * conn ) ;
2013-03-30 13:37:14 +00:00
static void s3fs_destroy ( void * ) ;
2015-06-25 19:55:47 +00:00
# if defined(__APPLE__)
static int s3fs_setxattr ( const char * path , const char * name , const char * value , size_t size , int flags , uint32_t position ) ;
static int s3fs_getxattr ( const char * path , const char * name , char * value , size_t size , uint32_t position ) ;
# else
2015-04-20 17:24:57 +00:00
static int s3fs_setxattr ( const char * path , const char * name , const char * value , size_t size , int flags ) ;
static int s3fs_getxattr ( const char * path , const char * name , char * value , size_t size ) ;
2015-06-25 19:55:47 +00:00
# endif
2015-04-20 17:24:57 +00:00
static int s3fs_listxattr ( const char * path , char * list , size_t size ) ;
static int s3fs_removexattr ( const char * path , const char * name ) ;
2013-03-30 13:37:14 +00:00
//-------------------------------------------------------------------
// Functions
//-------------------------------------------------------------------
2020-08-22 12:40:53 +00:00
static bool IS_REPLACEDIR ( dirtype type )
{
return DIRTYPE_OLD = = type | | DIRTYPE_FOLDER = = type | | DIRTYPE_NOOBJ = = type ;
}
static bool IS_RMTYPEDIR ( dirtype type )
{
return DIRTYPE_OLD = = type | | DIRTYPE_FOLDER = = type ;
}
2013-07-05 02:28:31 +00:00
static bool is_special_name_folder_object ( const char * path )
2013-04-20 19:17:28 +00:00
{
2020-08-22 12:40:53 +00:00
if ( ! support_compat_dir ) {
// s3fs does not support compatibility directory type("_$folder$" etc) now,
// thus always returns false.
return false ;
}
2017-05-05 17:28:29 +00:00
2020-08-22 12:40:53 +00:00
if ( ! path | | ' \0 ' = = path [ 0 ] ) {
return false ;
}
2013-04-20 19:17:28 +00:00
2020-09-11 09:37:24 +00:00
std : : string strpath = path ;
2020-08-22 12:40:53 +00:00
headers_t header ;
2015-08-06 05:05:52 +00:00
2020-09-11 09:37:24 +00:00
if ( std : : string : : npos = = strpath . find ( " _$folder$ " , 0 ) ) {
2020-08-22 12:40:53 +00:00
if ( ' / ' = = strpath [ strpath . length ( ) - 1 ] ) {
strpath = strpath . substr ( 0 , strpath . length ( ) - 1 ) ;
}
strpath + = " _$folder$ " ;
2013-04-20 19:17:28 +00:00
}
2020-08-22 12:40:53 +00:00
S3fsCurl s3fscurl ;
if ( 0 ! = s3fscurl . HeadRequest ( strpath . c_str ( ) , header ) ) {
return false ;
}
header . clear ( ) ;
S3FS_MALLOCTRIM ( 0 ) ;
return true ;
2013-04-20 19:17:28 +00:00
}
2013-04-29 14:31:10 +00:00
// [Detail]
// This function is complicated for checking directory object type.
// Arguments is used for deleting cache/path, and remake directory object.
// Please see the codes which calls this function.
//
// path: target path
// newpath: should be object path for making/putting/getting after checking
// nowpath: now object name for deleting after checking
// nowcache: now cache path for deleting after checking
// pmeta: headers map
// pDirType: directory object type
//
2020-09-11 09:37:24 +00:00
static int chk_dir_object_type ( const char * path , std : : string & newpath , std : : string & nowpath , std : : string & nowcache , headers_t * pmeta , dirtype * pDirType )
2013-04-20 19:17:28 +00:00
{
2020-08-22 12:40:53 +00:00
dirtype TypeTmp ;
int result = - 1 ;
bool isforce = false ;
dirtype * pType = pDirType ? pDirType : & TypeTmp ;
// Normalize new path.
newpath = path ;
if ( ' / ' ! = newpath [ newpath . length ( ) - 1 ] ) {
2020-09-11 09:37:24 +00:00
std : : string : : size_type Pos ;
if ( std : : string : : npos ! = ( Pos = newpath . find ( " _$folder$ " , 0 ) ) ) {
2020-08-22 12:40:53 +00:00
newpath = newpath . substr ( 0 , Pos ) ;
}
newpath + = " / " ;
}
// Always check "dir/" at first.
if ( 0 = = ( result = get_object_attribute ( newpath . c_str ( ) , NULL , pmeta , false , & isforce ) ) ) {
// Found "dir/" cache --> Check for "_$folder$", "no dir object"
nowcache = newpath ;
if ( is_special_name_folder_object ( newpath . c_str ( ) ) ) { // check support_compat_dir in this function
// "_$folder$" type.
( * pType ) = DIRTYPE_FOLDER ;
nowpath = newpath . substr ( 0 , newpath . length ( ) - 1 ) + " _$folder$ " ; // cut and add
} else if ( isforce ) {
// "no dir object" type.
( * pType ) = DIRTYPE_NOOBJ ;
nowpath = " " ;
} else {
nowpath = newpath ;
if ( 0 < nowpath . length ( ) & & ' / ' = = nowpath [ nowpath . length ( ) - 1 ] ) {
// "dir/" type
( * pType ) = DIRTYPE_NEW ;
} else {
// "dir" type
( * pType ) = DIRTYPE_OLD ;
}
}
} else if ( support_compat_dir ) {
// Check "dir" when support_compat_dir is enabled
nowpath = newpath . substr ( 0 , newpath . length ( ) - 1 ) ;
if ( 0 = = ( result = get_object_attribute ( nowpath . c_str ( ) , NULL , pmeta , false , & isforce ) ) ) {
// Found "dir" cache --> this case is only "dir" type.
// Because, if object is "_$folder$" or "no dir object", the cache is "dir/" type.
// (But "no dir object" is checked here.)
nowcache = nowpath ;
if ( isforce ) {
( * pType ) = DIRTYPE_NOOBJ ;
nowpath = " " ;
} else {
( * pType ) = DIRTYPE_OLD ;
}
} else {
// Not found cache --> check for "_$folder$" and "no dir object".
// (come here is that support_compat_dir is enabled)
nowcache = " " ; // This case is no cache.
nowpath + = " _$folder$ " ;
if ( is_special_name_folder_object ( nowpath . c_str ( ) ) ) {
// "_$folder$" type.
( * pType ) = DIRTYPE_FOLDER ;
result = 0 ; // result is OK.
} else if ( - ENOTEMPTY = = directory_empty ( newpath . c_str ( ) ) ) {
// "no dir object" type.
( * pType ) = DIRTYPE_NOOBJ ;
nowpath = " " ; // now path.
result = 0 ; // result is OK.
} else {
// Error: Unknown type.
( * pType ) = DIRTYPE_UNKNOWN ;
newpath = " " ;
nowpath = " " ;
}
}
2013-04-20 19:17:28 +00:00
}
2020-08-22 12:40:53 +00:00
return result ;
2013-04-20 19:17:28 +00:00
}
2020-09-11 09:37:24 +00:00
static int remove_old_type_dir ( const std : : string & path , dirtype type )
2017-10-15 05:03:44 +00:00
{
2020-08-22 12:40:53 +00:00
if ( IS_RMTYPEDIR ( type ) ) {
S3fsCurl s3fscurl ;
int result = s3fscurl . DeleteRequest ( path . c_str ( ) ) ;
if ( 0 ! = result & & - ENOENT ! = result ) {
return result ;
}
// succeed removing or not found the directory
} else {
// nothing to do
}
return 0 ;
2017-10-15 05:03:44 +00:00
}
Summary of Changes(1.62 -> 1.63)
1) Lifetime for the stats cache
Added the new option "stat_cache_expire".
This option which is specified by seconds means the lifetime for each stats cache entry.
If this option is not specified, the stats cache is kept in s3fs process until the stats cache grown to maximum size. (default)
If this option is specified, the stats cache entry is out from the memory when the entry expires time.
2) Enable file permission
s3fs before 1.62 did not consider the file access permission.
s3fs after this version can consider it.
For access permission, the s3fs_getattr() function was divided into sub function which can check the file access permission.
It is like access() function.
And the function calling the s3fs_getattr() calls this new sub function instead of s3fs_getattr().
Last the s3fs_opendir() function which is called by FUSE was added for checking directory access permission when listing the files in directory.
3) UID/GUID
When a file or a directory was created, the s3fs could not set the UID/GID as the user who executed a command.
(Almost the UID/GID are root, because the s3fs run by root.)
After this version, the s3fs set correct UID/GID as the user who executes the commond.
4) About the mtime
If the object does not have "x-amz-meta-mtime" meta, the s3fs uses the "Last-Modified" header instead of it.
But the s3fs had a bug in this code, and this version fixed this bug.
When user modified the file, the s3fs did not update the mtime of the file.
This version fixed this bug.
In the get_local_fd() function, the local file's mtime was changed only when s3fs run with "use_cache" option.
This version always updates the mtime whether the local cache file is used or not.
And s3fs_flush ( ) function set the mtime of local cache file from S3 object mtime, but it was wrong .
This version is that the s3fs_flush ( ) changes the mtime of S3 object from the local cache file or the tmpfile .
The s3fs cuts some requests, because the s3fs can always check mtime whether the s3fs uses or does not use the local cache file.
5) A case of no "x-amz-meta-mode"
If the object did not have "x-amz-meta-mtime" mete, the s3fs recognized the file as not regular file.
After this version, the s3fs recognizes the file as regular file.
6) "." and ".." directory
The s3fs_readdir() did not return "X" and "XX" directory name.
After this version, the s3fs is changed that it returns "X" and "XX".
Example, the result of "ls" lists "X" and "XX" directory.
7) Fixed a bug
The insert_object() had a bug, and it is fixed.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@390 df820570-a93a-0410-bd06-b72b767a4274
2013-02-24 08:58:54 +00:00
//
// Get object attributes with stat cache.
// This function is base for s3fs_getattr().
//
2013-06-19 14:53:58 +00:00
// [NOTICE]
// Checking order is changed following list because of reducing the number of the requests.
// 1) "dir"
// 2) "dir/"
// 3) "dir_$folder$"
//
2016-03-13 05:43:28 +00:00
static int get_object_attribute ( const char * path , struct stat * pstbuf , headers_t * pmeta , bool overcheck , bool * pisforce , bool add_no_truncate_cache )
2013-04-06 17:39:22 +00:00
{
2020-08-22 12:40:53 +00:00
int result = - 1 ;
struct stat tmpstbuf ;
struct stat * pstat = pstbuf ? pstbuf : & tmpstbuf ;
headers_t tmpHead ;
headers_t * pheader = pmeta ? pmeta : & tmpHead ;
2020-09-11 09:37:24 +00:00
std : : string strpath ;
2020-08-22 12:40:53 +00:00
S3fsCurl s3fscurl ;
bool forcedir = false ;
2020-09-11 09:37:24 +00:00
std : : string : : size_type Pos ;
2017-05-05 09:51:30 +00:00
2020-08-22 12:40:53 +00:00
S3FS_PRN_DBG ( " [path=%s] " , path ) ;
if ( ! path | | ' \0 ' = = path [ 0 ] ) {
return - ENOENT ;
2017-05-05 09:51:30 +00:00
}
2020-08-22 12:40:53 +00:00
memset ( pstat , 0 , sizeof ( struct stat ) ) ;
if ( 0 = = strcmp ( path , " / " ) | | 0 = = strcmp ( path , " . " ) ) {
pstat - > st_nlink = 1 ; // see fuse faq
pstat - > st_mode = mp_mode ;
pstat - > st_uid = is_s3fs_uid ? s3fs_uid : mp_uid ;
pstat - > st_gid = is_s3fs_gid ? s3fs_gid : mp_gid ;
return 0 ;
Summary of Changes(1.63 -> 1.64)
* This new version was made for fixing big issue about directory object.
Please be careful and review new s3fs.
==========================
List of Changes
==========================
1) Fixed bugs
Fixed some memory leak and un-freed curl handle.
Fixed codes with a bug which is not found yet.
Fixed a bug that the s3fs could not update object's mtime when the s3fs had a opened file descriptor.
Please let us know a bug, when you find new bug of a memory leak.
2) Changed codes
Changed codes of s3fs_readdir() and list_bucket() etc.
Changed codes so that the get_realpath() function returned std::string.
Changed codes about exit() function. Because the exit() function is called from many fuse callback function directly, these function called fuse_exit() function and retuned with error.
Changed codes so that the case of the characters for the "x-amz-meta" response header is ignored.
3) Added a option
Added the norenameapi option for the storage compatible with S3 without copy API.
This option is subset of nocopyapi option.
Please read man page or call with --help option.
4) Object for directory
This is very big and important change.
The object of directory is changed "dir/" instead of "dir" for being compatible with other S3 client applications.
And this version understands the object of directory which is made by old version.
If the new s3fs changes the attributes or owner/group or mtime of the directory object, the s3fs automatically changes the object from old object name("dir") to new("dir/").
If you need to change old object name("dir") to new("dir/") manually, you can use shell script(mergedir.sh) in test directory.
* About the directory object name
AWS S3 allows the object name as both "dir" and "dir/".
The s3fs before this version understood only "dir" as directory object name, but old version did not understand the "dir/" object name.
The new version understands both of "dir" and "dir/" object name.
The s3fs user needs to be care for the special situation that I mentioned later.
The new version deletes old "dir" object and makes new "dir/" object, when the user operates the directory object for changing the permission or owner/group or mtime.
This operation does on background and automatically.
If you need to merge manually, you can use shell script which is mergedir.sh in test directory.
This script runs chmod/chown/touch commands after finding a directory.
Other S3 client application makes a directory object("dir/") without meta information which is needed to understand by the s3fs, this script can add meta information for a directory object.
If this script function is insufficient for you, you can read and modify the codes by yourself.
Please use the shell script carefully because of changing the object.
If you find a bug in this script, please let me know.
* Details
** The directory object made by old version
The directory object made by old version is not understood by other S3 client application.
New s3fs version was updated for keeping compatibility with other clients.
You can use the mergedir.sh in test directory for merging from old directory object("dir") to new("dir/").
The directory object name is changed from "dir" to "dir/" after the mergedir.sh is run, this changed "dir/" object is understood by other S3 clients.
This script runs chmod/chown/chgrp/touch/etc commands against the old directory object("dir"), then new s3fs merges that directory automatically.
If you need to change directory object from old to new manually, you can do it by running these commands which change the directory attributes(mode/owner/group/mtime).
** The directory object made by new version
The directory object name made by new version is "dir/".
Because the name includes "/", other S3 client applications understand it as the directory.
I tested new directory by s3cmd/tntDrive/DragonDisk/Gladinet as other S3 clients, the result was good compatibility.
You need to know that the compatibility has small problem by the difference in specifications between clients.
And you need to be careful about that the old s3fs can not understand the directory object which made by new s3fs.
You should change all s3fs which accesses same bucket.
** The directory object made by other S3 client application
Because the object is determined as a directory by the s3fs, the s3fs makes and uses special meta information which is "x-amz-meta-***" and "Content-Type" as HTTP header.
The s3fs sets and uses HTTP headers for the directory object, those headers are listed below.
Content-Type: application/x-directory
x-amz-meta-mode: <mode>
x-amz-meta-uid: <UID>
x-amz-meta-gid <GID>
x-amz-meta-mtime: <unix time of modified file>
Other S3 client application builds the directory object without attributes which is needed by the s3fs.
When the "ls" command is run on the s3fs-fuse file system which has directories/files made by other S3 clients, this result is shown below.
d--------- 1 root root 0 Feb 27 11:21 dir
---------- 1 root root 1024 Mar 14 02:15 file
Because the objects don't have meta information("x-amz-meta-mode"), it means mode=0000.
In this case, the directory object is shown only "d", because the s3fs determines the object as a directory when the object is the name with "/" or has "Content-type: application/x-directory" header.
(The s3fs sets "Content-Type: application/x-directory" to the directory object, but other S3 clients set "binary/octet-stream".)
In that result, nobody without root is allowed to operate the object.
The owner and group are "root"(UID=0) because the object doesn't have "x-amz-meta-uid/gid".
If the object doesn't have "x-amz-meta-mtime", the s3fs uses "Last-Modified" HTTP header.
Therefore the object's mtime is "Last-Modified" value.(This logic is same as old version)
It has been already explained, if you need to change the object attributes, you can do it by manually operation or mergedir.sh.
* Example of the compatibility with s3cmd etc
** Case A) Only "dir/file" object
One of case, there is only "dir/file" object without "dir/" object, that object is made by s3cmd or etc.
In this case, the response of REST API(list bucket) with "delimiter=/" parameter has "CommonPrefixes", and the "dir/" is listed in "CommonPrefixes/Prefix", but the "dir/" object is not real object.
The s3fs needs to determine this object as directory, however there is no real directory object("dir" or "dir/").
But both new s3fs and old one does NOT understand this "dir/" in "CommonPrefixes", because the s3fs fails to get meta information from "dir" or "dir/".
On this case, the result of "ls" command is shown below.
??????????? ? ? ? ? ? dir
This "dir" is not operated by anyone and any process, because the s3fs does not understand this object permission.
And "dir/file" object can not be shown and operated too.
Some other S3 clients(tntDrive/Gladinet/etc) can not understand this object as same as the s3fs.
If you need to operate "dir/file" object, you need to make the "dir/" object as a directory.
To make the "dir/" directory object, you need to do below.
Because there is already the "dir" object which is not real object, you can not make "dir/" directory.
(s3cmd does not make "dir/" object because the object name has "/".).
You should make another name directory(ex: "dir2/"), and move the "dir/file" objects to in new directory.
Last, you can rename the directory name from "dir2/" to "dir/".
** Case B) Both "dir" and "dir/file" object
This case is that there are "dir" and "dir/file" objects which were made by s3cmd/etc.
s3cmd and s3fs understand the "dir" object as normal(file) object because this object does not have meta information and a name with "/".
But the result of REST API(list bucket) has "dir/" name in "CommonPrefixes/Prefix".
The s3fs checks "dir/" and "dir" as a directory, but the "dir" object is not directory object.
(Because the new s3fs need to compatible old version, the s3fs checks a directory object in order of "dir/", "dir")
In this case, the result of "ls" command is shown below.
---------- 1 root root 0 Feb 27 02:48 dir
As a result, the "dir/file" can not be shown and operated because the "dir" object is a file.
If you determine the "dir" as a directory, you need to add mete information to the "dir" object by s3cmd.
** Case C) Both "dir" and "dir/" object
Last case is that there are "dir" and "dir/" objects which were made by other S3 clients.
(example: At first you upload a object "dir/" as a directory by new 3sfs, and you upload a object "dir" by s3cmd.)
New s3fs determines "dir/" as a directory, because the s3fs searches in oder of "dir/", "dir".
As a result, the "dir" object can not be shown and operated.
** Compatibility between S3 clients
Both new and old s3fs do not understand both "dir" and "dir/" at the same time, tntDrive and Galdinet are same as the s3fs.
If there are "dir/" and "dir" objects, the s3fs gives priority to "dir/".
But s3cmd and DragonDisk understand both objects.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@392 df820570-a93a-0410-bd06-b72b767a4274
2013-03-23 14:04:07 +00:00
}
2013-06-19 14:53:58 +00:00
2020-08-22 12:40:53 +00:00
// Check cache.
pisforce = ( NULL ! = pisforce ? pisforce : & forcedir ) ;
( * pisforce ) = false ;
strpath = path ;
2020-09-11 09:37:24 +00:00
if ( support_compat_dir & & overcheck & & std : : string : : npos ! = ( Pos = strpath . find ( " _$folder$ " , 0 ) ) ) {
2020-08-22 12:40:53 +00:00
strpath = strpath . substr ( 0 , Pos ) ;
strpath + = " / " ;
}
if ( StatCache : : getStatCacheData ( ) - > GetStat ( strpath , pstat , pheader , overcheck , pisforce ) ) {
StatCache : : getStatCacheData ( ) - > ChangeNoTruncateFlag ( strpath , add_no_truncate_cache ) ;
return 0 ;
}
if ( StatCache : : getStatCacheData ( ) - > IsNoObjectCache ( strpath ) ) {
// there is the path in the cache for no object, it is no object.
2013-05-03 13:33:49 +00:00
return - ENOENT ;
}
2020-08-22 12:40:53 +00:00
// At first, check path
strpath = path ;
result = s3fscurl . HeadRequest ( strpath . c_str ( ) , ( * pheader ) ) ;
s3fscurl . DestroyCurlHandle ( ) ;
// if not found target path object, do over checking
if ( 0 ! = result ) {
if ( overcheck ) {
// when support_compat_dir is disabled, strpath maybe have "_$folder$".
2020-09-11 09:37:24 +00:00
if ( ' / ' ! = strpath [ strpath . length ( ) - 1 ] & & std : : string : : npos = = strpath . find ( " _$folder$ " , 0 ) ) {
2020-08-22 12:40:53 +00:00
// now path is "object", do check "object/" for over checking
strpath + = " / " ;
result = s3fscurl . HeadRequest ( strpath . c_str ( ) , ( * pheader ) ) ;
s3fscurl . DestroyCurlHandle ( ) ;
}
if ( support_compat_dir & & 0 ! = result ) {
// now path is "object/", do check "object_$folder$" for over checking
strpath = strpath . substr ( 0 , strpath . length ( ) - 1 ) ;
strpath + = " _$folder$ " ;
result = s3fscurl . HeadRequest ( strpath . c_str ( ) , ( * pheader ) ) ;
s3fscurl . DestroyCurlHandle ( ) ;
if ( 0 ! = result ) {
// cut "_$folder$" for over checking "no dir object" after here
2020-09-11 09:37:24 +00:00
if ( std : : string : : npos ! = ( Pos = strpath . find ( " _$folder$ " , 0 ) ) ) {
2020-08-22 12:40:53 +00:00
strpath = strpath . substr ( 0 , Pos ) ;
}
}
}
}
2020-09-11 09:37:24 +00:00
if ( support_compat_dir & & 0 ! = result & & std : : string : : npos = = strpath . find ( " _$folder$ " , 0 ) ) {
2020-08-22 12:40:53 +00:00
// now path is "object" or "object/", do check "no dir object" which is not object but has only children.
if ( ' / ' = = strpath [ strpath . length ( ) - 1 ] ) {
strpath = strpath . substr ( 0 , strpath . length ( ) - 1 ) ;
}
if ( - ENOTEMPTY = = directory_empty ( strpath . c_str ( ) ) ) {
// found "no dir object".
strpath + = " / " ;
* pisforce = true ;
result = 0 ;
}
}
} else {
2020-09-11 09:37:24 +00:00
if ( support_compat_dir & & ' / ' ! = strpath [ strpath . length ( ) - 1 ] & & std : : string : : npos = = strpath . find ( " _$folder$ " , 0 ) & & is_need_check_obj_detail ( * pheader ) ) {
2020-08-22 12:40:53 +00:00
// check a case of that "object" does not have attribute and "object" is possible to be directory.
if ( - ENOTEMPTY = = directory_empty ( strpath . c_str ( ) ) ) {
// found "no dir object".
strpath + = " / " ;
* pisforce = true ;
result = 0 ;
}
}
}
if ( 0 ! = result ) {
// finally, "path" object did not find. Add no object cache.
strpath = path ; // reset original
StatCache : : getStatCacheData ( ) - > AddNoObjectCache ( strpath ) ;
return result ;
}
// if path has "_$folder$", need to cut it.
2020-09-11 09:37:24 +00:00
if ( std : : string : : npos ! = ( Pos = strpath . find ( " _$folder$ " , 0 ) ) ) {
2020-08-22 12:40:53 +00:00
strpath = strpath . substr ( 0 , Pos ) ;
strpath + = " / " ;
}
// Set into cache
//
// [NOTE]
// When add_no_truncate_cache is true, the stats is always cached.
// This cached stats is only removed by DelStat().
// This is necessary for the case to access the attribute of opened file.
// (ex. getxattr() is called while writing to the opened file.)
//
if ( add_no_truncate_cache | | 0 ! = StatCache : : getStatCacheData ( ) - > GetCacheSize ( ) ) {
// add into stat cache
if ( ! StatCache : : getStatCacheData ( ) - > AddStat ( strpath , ( * pheader ) , forcedir , add_no_truncate_cache ) ) {
S3FS_PRN_ERR ( " failed adding stat cache [path=%s] " , strpath . c_str ( ) ) ;
return - ENOENT ;
}
if ( ! StatCache : : getStatCacheData ( ) - > GetStat ( strpath , pstat , pheader , overcheck , pisforce ) ) {
// There is not in cache.(why?) -> retry to convert.
if ( ! convert_header_to_stat ( strpath . c_str ( ) , ( * pheader ) , pstat , forcedir ) ) {
S3FS_PRN_ERR ( " failed convert headers to stat[path=%s] " , strpath . c_str ( ) ) ;
return - ENOENT ;
}
}
} else {
// cache size is Zero -> only convert.
if ( ! convert_header_to_stat ( strpath . c_str ( ) , ( * pheader ) , pstat , forcedir ) ) {
S3FS_PRN_ERR ( " failed convert headers to stat[path=%s] " , strpath . c_str ( ) ) ;
return - ENOENT ;
}
2013-05-03 13:33:49 +00:00
}
2020-08-22 12:40:53 +00:00
return 0 ;
Summary of Changes(1.62 -> 1.63)
1) Lifetime for the stats cache
Added the new option "stat_cache_expire".
This option which is specified by seconds means the lifetime for each stats cache entry.
If this option is not specified, the stats cache is kept in s3fs process until the stats cache grown to maximum size. (default)
If this option is specified, the stats cache entry is out from the memory when the entry expires time.
2) Enable file permission
s3fs before 1.62 did not consider the file access permission.
s3fs after this version can consider it.
For access permission, the s3fs_getattr() function was divided into sub function which can check the file access permission.
It is like access() function.
And the function calling the s3fs_getattr() calls this new sub function instead of s3fs_getattr().
Last the s3fs_opendir() function which is called by FUSE was added for checking directory access permission when listing the files in directory.
3) UID/GUID
When a file or a directory was created, the s3fs could not set the UID/GID as the user who executed a command.
(Almost the UID/GID are root, because the s3fs run by root.)
After this version, the s3fs set correct UID/GID as the user who executes the commond.
4) About the mtime
If the object does not have "x-amz-meta-mtime" meta, the s3fs uses the "Last-Modified" header instead of it.
But the s3fs had a bug in this code, and this version fixed this bug.
When user modified the file, the s3fs did not update the mtime of the file.
This version fixed this bug.
In the get_local_fd() function, the local file's mtime was changed only when s3fs run with "use_cache" option.
This version always updates the mtime whether the local cache file is used or not.
And s3fs_flush ( ) function set the mtime of local cache file from S3 object mtime, but it was wrong .
This version is that the s3fs_flush ( ) changes the mtime of S3 object from the local cache file or the tmpfile .
The s3fs cuts some requests, because the s3fs can always check mtime whether the s3fs uses or does not use the local cache file.
5) A case of no "x-amz-meta-mode"
If the object did not have "x-amz-meta-mtime" mete, the s3fs recognized the file as not regular file.
After this version, the s3fs recognizes the file as regular file.
6) "." and ".." directory
The s3fs_readdir() did not return "X" and "XX" directory name.
After this version, the s3fs is changed that it returns "X" and "XX".
Example, the result of "ls" lists "X" and "XX" directory.
7) Fixed a bug
The insert_object() had a bug, and it is fixed.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@390 df820570-a93a-0410-bd06-b72b767a4274
2013-02-24 08:58:54 +00:00
}
//
// Check the object uid and gid for write/read/execute.
// The param "mask" is as same as access() function.
// If there is not a target file, this function returns -ENOENT.
// If the target file can be accessed, the result always is 0.
//
// path: the target object path
// mask: bit field(F_OK, R_OK, W_OK, X_OK) like access().
// stat: NULL or the pointer of struct stat.
//
2013-07-05 02:28:31 +00:00
static int check_object_access ( const char * path , int mask , struct stat * pstbuf )
Summary of Changes(1.62 -> 1.63)
1) Lifetime for the stats cache
Added the new option "stat_cache_expire".
This option which is specified by seconds means the lifetime for each stats cache entry.
If this option is not specified, the stats cache is kept in s3fs process until the stats cache grown to maximum size. (default)
If this option is specified, the stats cache entry is out from the memory when the entry expires time.
2) Enable file permission
s3fs before 1.62 did not consider the file access permission.
s3fs after this version can consider it.
For access permission, the s3fs_getattr() function was divided into sub function which can check the file access permission.
It is like access() function.
And the function calling the s3fs_getattr() calls this new sub function instead of s3fs_getattr().
Last the s3fs_opendir() function which is called by FUSE was added for checking directory access permission when listing the files in directory.
3) UID/GUID
When a file or a directory was created, the s3fs could not set the UID/GID as the user who executed a command.
(Almost the UID/GID are root, because the s3fs run by root.)
After this version, the s3fs set correct UID/GID as the user who executes the commond.
4) About the mtime
If the object does not have "x-amz-meta-mtime" meta, the s3fs uses the "Last-Modified" header instead of it.
But the s3fs had a bug in this code, and this version fixed this bug.
When user modified the file, the s3fs did not update the mtime of the file.
This version fixed this bug.
In the get_local_fd() function, the local file's mtime was changed only when s3fs run with "use_cache" option.
This version always updates the mtime whether the local cache file is used or not.
And s3fs_flush ( ) function set the mtime of local cache file from S3 object mtime, but it was wrong .
This version is that the s3fs_flush ( ) changes the mtime of S3 object from the local cache file or the tmpfile .
The s3fs cuts some requests, because the s3fs can always check mtime whether the s3fs uses or does not use the local cache file.
5) A case of no "x-amz-meta-mode"
If the object did not have "x-amz-meta-mtime" mete, the s3fs recognized the file as not regular file.
After this version, the s3fs recognizes the file as regular file.
6) "." and ".." directory
The s3fs_readdir() did not return "X" and "XX" directory name.
After this version, the s3fs is changed that it returns "X" and "XX".
Example, the result of "ls" lists "X" and "XX" directory.
7) Fixed a bug
The insert_object() had a bug, and it is fixed.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@390 df820570-a93a-0410-bd06-b72b767a4274
2013-02-24 08:58:54 +00:00
{
2020-08-22 12:40:53 +00:00
int result ;
struct stat st ;
struct stat * pst = ( pstbuf ? pstbuf : & st ) ;
struct fuse_context * pcxt ;
S3FS_PRN_DBG ( " [path=%s] " , path ) ;
if ( NULL = = ( pcxt = fuse_get_context ( ) ) ) {
return - EIO ;
}
if ( 0 ! = ( result = get_object_attribute ( path , pst ) ) ) {
// If there is not the target file(object), result is -ENOENT.
return result ;
}
if ( 0 = = pcxt - > uid ) {
// root is allowed all accessing.
return 0 ;
}
if ( is_s3fs_uid & & s3fs_uid = = pcxt - > uid ) {
// "uid" user is allowed all accessing.
return 0 ;
}
if ( F_OK = = mask ) {
// if there is a file, always return allowed.
return 0 ;
}
// for "uid", "gid" option
uid_t obj_uid = ( is_s3fs_uid ? s3fs_uid : pst - > st_uid ) ;
gid_t obj_gid = ( is_s3fs_gid ? s3fs_gid : pst - > st_gid ) ;
// compare file mode and uid/gid + mask.
mode_t mode ;
mode_t base_mask = S_IRWXO ;
if ( is_s3fs_umask ) {
// If umask is set, all object attributes set ~umask.
mode = ( ( S_IRWXU | S_IRWXG | S_IRWXO ) & ~ s3fs_umask ) ;
} else {
mode = pst - > st_mode ;
}
if ( pcxt - > uid = = obj_uid ) {
base_mask | = S_IRWXU ;
}
if ( pcxt - > gid = = obj_gid ) {
base_mask | = S_IRWXG ;
}
if ( 1 = = is_uid_include_group ( pcxt - > uid , obj_gid ) ) {
base_mask | = S_IRWXG ;
}
mode & = base_mask ;
if ( X_OK = = ( mask & X_OK ) ) {
if ( 0 = = ( mode & ( S_IXUSR | S_IXGRP | S_IXOTH ) ) ) {
return - EPERM ;
}
}
if ( W_OK = = ( mask & W_OK ) ) {
if ( 0 = = ( mode & ( S_IWUSR | S_IWGRP | S_IWOTH ) ) ) {
return - EACCES ;
}
}
if ( R_OK = = ( mask & R_OK ) ) {
if ( 0 = = ( mode & ( S_IRUSR | S_IRGRP | S_IROTH ) ) ) {
return - EACCES ;
}
}
if ( 0 = = mode ) {
return - EACCES ;
}
Summary of Changes(1.62 -> 1.63)
1) Lifetime for the stats cache
Added the new option "stat_cache_expire".
This option which is specified by seconds means the lifetime for each stats cache entry.
If this option is not specified, the stats cache is kept in s3fs process until the stats cache grown to maximum size. (default)
If this option is specified, the stats cache entry is out from the memory when the entry expires time.
2) Enable file permission
s3fs before 1.62 did not consider the file access permission.
s3fs after this version can consider it.
For access permission, the s3fs_getattr() function was divided into sub function which can check the file access permission.
It is like access() function.
And the function calling the s3fs_getattr() calls this new sub function instead of s3fs_getattr().
Last the s3fs_opendir() function which is called by FUSE was added for checking directory access permission when listing the files in directory.
3) UID/GUID
When a file or a directory was created, the s3fs could not set the UID/GID as the user who executed a command.
(Almost the UID/GID are root, because the s3fs run by root.)
After this version, the s3fs set correct UID/GID as the user who executes the commond.
4) About the mtime
If the object does not have "x-amz-meta-mtime" meta, the s3fs uses the "Last-Modified" header instead of it.
But the s3fs had a bug in this code, and this version fixed this bug.
When user modified the file, the s3fs did not update the mtime of the file.
This version fixed this bug.
In the get_local_fd() function, the local file's mtime was changed only when s3fs run with "use_cache" option.
This version always updates the mtime whether the local cache file is used or not.
And s3fs_flush ( ) function set the mtime of local cache file from S3 object mtime, but it was wrong .
This version is that the s3fs_flush ( ) changes the mtime of S3 object from the local cache file or the tmpfile .
The s3fs cuts some requests, because the s3fs can always check mtime whether the s3fs uses or does not use the local cache file.
5) A case of no "x-amz-meta-mode"
If the object did not have "x-amz-meta-mtime" mete, the s3fs recognized the file as not regular file.
After this version, the s3fs recognizes the file as regular file.
6) "." and ".." directory
The s3fs_readdir() did not return "X" and "XX" directory name.
After this version, the s3fs is changed that it returns "X" and "XX".
Example, the result of "ls" lists "X" and "XX" directory.
7) Fixed a bug
The insert_object() had a bug, and it is fixed.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@390 df820570-a93a-0410-bd06-b72b767a4274
2013-02-24 08:58:54 +00:00
return 0 ;
}
2013-07-05 02:28:31 +00:00
static int check_object_owner ( const char * path , struct stat * pstbuf )
2013-04-06 17:39:22 +00:00
{
2020-08-22 12:40:53 +00:00
int result ;
struct stat st ;
struct stat * pst = ( pstbuf ? pstbuf : & st ) ;
struct fuse_context * pcxt ;
S3FS_PRN_DBG ( " [path=%s] " , path ) ;
if ( NULL = = ( pcxt = fuse_get_context ( ) ) ) {
return - EIO ;
}
if ( 0 ! = ( result = get_object_attribute ( path , pst ) ) ) {
// If there is not the target file(object), result is -ENOENT.
return result ;
}
// check owner
if ( 0 = = pcxt - > uid ) {
// root is allowed all accessing.
return 0 ;
}
if ( is_s3fs_uid & & s3fs_uid = = pcxt - > uid ) {
// "uid" user is allowed all accessing.
return 0 ;
}
if ( pcxt - > uid = = pst - > st_uid ) {
return 0 ;
}
return - EPERM ;
2013-04-06 17:39:22 +00:00
}
Summary of Changes(1.62 -> 1.63)
1) Lifetime for the stats cache
Added the new option "stat_cache_expire".
This option which is specified by seconds means the lifetime for each stats cache entry.
If this option is not specified, the stats cache is kept in s3fs process until the stats cache grown to maximum size. (default)
If this option is specified, the stats cache entry is out from the memory when the entry expires time.
2) Enable file permission
s3fs before 1.62 did not consider the file access permission.
s3fs after this version can consider it.
For access permission, the s3fs_getattr() function was divided into sub function which can check the file access permission.
It is like access() function.
And the function calling the s3fs_getattr() calls this new sub function instead of s3fs_getattr().
Last the s3fs_opendir() function which is called by FUSE was added for checking directory access permission when listing the files in directory.
3) UID/GUID
When a file or a directory was created, the s3fs could not set the UID/GID as the user who executed a command.
(Almost the UID/GID are root, because the s3fs run by root.)
After this version, the s3fs set correct UID/GID as the user who executes the commond.
4) About the mtime
If the object does not have "x-amz-meta-mtime" meta, the s3fs uses the "Last-Modified" header instead of it.
But the s3fs had a bug in this code, and this version fixed this bug.
When user modified the file, the s3fs did not update the mtime of the file.
This version fixed this bug.
In the get_local_fd() function, the local file's mtime was changed only when s3fs run with "use_cache" option.
This version always updates the mtime whether the local cache file is used or not.
And s3fs_flush ( ) function set the mtime of local cache file from S3 object mtime, but it was wrong .
This version is that the s3fs_flush ( ) changes the mtime of S3 object from the local cache file or the tmpfile .
The s3fs cuts some requests, because the s3fs can always check mtime whether the s3fs uses or does not use the local cache file.
5) A case of no "x-amz-meta-mode"
If the object did not have "x-amz-meta-mtime" mete, the s3fs recognized the file as not regular file.
After this version, the s3fs recognizes the file as regular file.
6) "." and ".." directory
The s3fs_readdir() did not return "X" and "XX" directory name.
After this version, the s3fs is changed that it returns "X" and "XX".
Example, the result of "ls" lists "X" and "XX" directory.
7) Fixed a bug
The insert_object() had a bug, and it is fixed.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@390 df820570-a93a-0410-bd06-b72b767a4274
2013-02-24 08:58:54 +00:00
//
// Check accessing the parent directories of the object by uid and gid.
//
2013-07-05 02:28:31 +00:00
static int check_parent_object_access ( const char * path , int mask )
Summary of Changes(1.62 -> 1.63)
1) Lifetime for the stats cache
Added the new option "stat_cache_expire".
This option which is specified by seconds means the lifetime for each stats cache entry.
If this option is not specified, the stats cache is kept in s3fs process until the stats cache grown to maximum size. (default)
If this option is specified, the stats cache entry is out from the memory when the entry expires time.
2) Enable file permission
s3fs before 1.62 did not consider the file access permission.
s3fs after this version can consider it.
For access permission, the s3fs_getattr() function was divided into sub function which can check the file access permission.
It is like access() function.
And the function calling the s3fs_getattr() calls this new sub function instead of s3fs_getattr().
Last the s3fs_opendir() function which is called by FUSE was added for checking directory access permission when listing the files in directory.
3) UID/GUID
When a file or a directory was created, the s3fs could not set the UID/GID as the user who executed a command.
(Almost the UID/GID are root, because the s3fs run by root.)
After this version, the s3fs set correct UID/GID as the user who executes the commond.
4) About the mtime
If the object does not have "x-amz-meta-mtime" meta, the s3fs uses the "Last-Modified" header instead of it.
But the s3fs had a bug in this code, and this version fixed this bug.
When user modified the file, the s3fs did not update the mtime of the file.
This version fixed this bug.
In the get_local_fd() function, the local file's mtime was changed only when s3fs run with "use_cache" option.
This version always updates the mtime whether the local cache file is used or not.
And s3fs_flush ( ) function set the mtime of local cache file from S3 object mtime, but it was wrong .
This version is that the s3fs_flush ( ) changes the mtime of S3 object from the local cache file or the tmpfile .
The s3fs cuts some requests, because the s3fs can always check mtime whether the s3fs uses or does not use the local cache file.
5) A case of no "x-amz-meta-mode"
If the object did not have "x-amz-meta-mtime" mete, the s3fs recognized the file as not regular file.
After this version, the s3fs recognizes the file as regular file.
6) "." and ".." directory
The s3fs_readdir() did not return "X" and "XX" directory name.
After this version, the s3fs is changed that it returns "X" and "XX".
Example, the result of "ls" lists "X" and "XX" directory.
7) Fixed a bug
The insert_object() had a bug, and it is fixed.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@390 df820570-a93a-0410-bd06-b72b767a4274
2013-02-24 08:58:54 +00:00
{
2020-09-11 09:37:24 +00:00
std : : string parent ;
2020-08-22 12:40:53 +00:00
int result ;
Summary of Changes(1.62 -> 1.63)
1) Lifetime for the stats cache
Added the new option "stat_cache_expire".
This option which is specified by seconds means the lifetime for each stats cache entry.
If this option is not specified, the stats cache is kept in s3fs process until the stats cache grown to maximum size. (default)
If this option is specified, the stats cache entry is out from the memory when the entry expires time.
2) Enable file permission
s3fs before 1.62 did not consider the file access permission.
s3fs after this version can consider it.
For access permission, the s3fs_getattr() function was divided into sub function which can check the file access permission.
It is like access() function.
And the function calling the s3fs_getattr() calls this new sub function instead of s3fs_getattr().
Last the s3fs_opendir() function which is called by FUSE was added for checking directory access permission when listing the files in directory.
3) UID/GUID
When a file or a directory was created, the s3fs could not set the UID/GID as the user who executed a command.
(Almost the UID/GID are root, because the s3fs run by root.)
After this version, the s3fs set correct UID/GID as the user who executes the commond.
4) About the mtime
If the object does not have "x-amz-meta-mtime" meta, the s3fs uses the "Last-Modified" header instead of it.
But the s3fs had a bug in this code, and this version fixed this bug.
When user modified the file, the s3fs did not update the mtime of the file.
This version fixed this bug.
In the get_local_fd() function, the local file's mtime was changed only when s3fs run with "use_cache" option.
This version always updates the mtime whether the local cache file is used or not.
And s3fs_flush ( ) function set the mtime of local cache file from S3 object mtime, but it was wrong .
This version is that the s3fs_flush ( ) changes the mtime of S3 object from the local cache file or the tmpfile .
The s3fs cuts some requests, because the s3fs can always check mtime whether the s3fs uses or does not use the local cache file.
5) A case of no "x-amz-meta-mode"
If the object did not have "x-amz-meta-mtime" mete, the s3fs recognized the file as not regular file.
After this version, the s3fs recognizes the file as regular file.
6) "." and ".." directory
The s3fs_readdir() did not return "X" and "XX" directory name.
After this version, the s3fs is changed that it returns "X" and "XX".
Example, the result of "ls" lists "X" and "XX" directory.
7) Fixed a bug
The insert_object() had a bug, and it is fixed.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@390 df820570-a93a-0410-bd06-b72b767a4274
2013-02-24 08:58:54 +00:00
2020-08-22 12:40:53 +00:00
S3FS_PRN_DBG ( " [path=%s] " , path ) ;
Summary of Changes(1.62 -> 1.63)
1) Lifetime for the stats cache
Added the new option "stat_cache_expire".
This option which is specified by seconds means the lifetime for each stats cache entry.
If this option is not specified, the stats cache is kept in s3fs process until the stats cache grown to maximum size. (default)
If this option is specified, the stats cache entry is out from the memory when the entry expires time.
2) Enable file permission
s3fs before 1.62 did not consider the file access permission.
s3fs after this version can consider it.
For access permission, the s3fs_getattr() function was divided into sub function which can check the file access permission.
It is like access() function.
And the function calling the s3fs_getattr() calls this new sub function instead of s3fs_getattr().
Last the s3fs_opendir() function which is called by FUSE was added for checking directory access permission when listing the files in directory.
3) UID/GUID
When a file or a directory was created, the s3fs could not set the UID/GID as the user who executed a command.
(Almost the UID/GID are root, because the s3fs run by root.)
After this version, the s3fs set correct UID/GID as the user who executes the commond.
4) About the mtime
If the object does not have "x-amz-meta-mtime" meta, the s3fs uses the "Last-Modified" header instead of it.
But the s3fs had a bug in this code, and this version fixed this bug.
When user modified the file, the s3fs did not update the mtime of the file.
This version fixed this bug.
In the get_local_fd() function, the local file's mtime was changed only when s3fs run with "use_cache" option.
This version always updates the mtime whether the local cache file is used or not.
And s3fs_flush ( ) function set the mtime of local cache file from S3 object mtime, but it was wrong .
This version is that the s3fs_flush ( ) changes the mtime of S3 object from the local cache file or the tmpfile .
The s3fs cuts some requests, because the s3fs can always check mtime whether the s3fs uses or does not use the local cache file.
5) A case of no "x-amz-meta-mode"
If the object did not have "x-amz-meta-mtime" mete, the s3fs recognized the file as not regular file.
After this version, the s3fs recognizes the file as regular file.
6) "." and ".." directory
The s3fs_readdir() did not return "X" and "XX" directory name.
After this version, the s3fs is changed that it returns "X" and "XX".
Example, the result of "ls" lists "X" and "XX" directory.
7) Fixed a bug
The insert_object() had a bug, and it is fixed.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@390 df820570-a93a-0410-bd06-b72b767a4274
2013-02-24 08:58:54 +00:00
2020-08-22 12:40:53 +00:00
if ( 0 = = strcmp ( path , " / " ) | | 0 = = strcmp ( path , " . " ) ) {
// path is mount point.
return 0 ;
2013-04-06 17:39:22 +00:00
}
2020-08-22 12:40:53 +00:00
if ( X_OK = = ( mask & X_OK ) ) {
for ( parent = mydirname ( path ) ; ! parent . empty ( ) ; parent = mydirname ( parent ) ) {
if ( parent = = " . " ) {
parent = " / " ;
}
if ( 0 ! = ( result = check_object_access ( parent . c_str ( ) , X_OK , NULL ) ) ) {
return result ;
}
if ( parent = = " / " | | parent = = " . " ) {
break ;
}
}
Summary of Changes(1.62 -> 1.63)
1) Lifetime for the stats cache
Added the new option "stat_cache_expire".
This option which is specified by seconds means the lifetime for each stats cache entry.
If this option is not specified, the stats cache is kept in s3fs process until the stats cache grown to maximum size. (default)
If this option is specified, the stats cache entry is out from the memory when the entry expires time.
2) Enable file permission
s3fs before 1.62 did not consider the file access permission.
s3fs after this version can consider it.
For access permission, the s3fs_getattr() function was divided into sub function which can check the file access permission.
It is like access() function.
And the function calling the s3fs_getattr() calls this new sub function instead of s3fs_getattr().
Last the s3fs_opendir() function which is called by FUSE was added for checking directory access permission when listing the files in directory.
3) UID/GUID
When a file or a directory was created, the s3fs could not set the UID/GID as the user who executed a command.
(Almost the UID/GID are root, because the s3fs run by root.)
After this version, the s3fs set correct UID/GID as the user who executes the commond.
4) About the mtime
If the object does not have "x-amz-meta-mtime" meta, the s3fs uses the "Last-Modified" header instead of it.
But the s3fs had a bug in this code, and this version fixed this bug.
When user modified the file, the s3fs did not update the mtime of the file.
This version fixed this bug.
In the get_local_fd() function, the local file's mtime was changed only when s3fs run with "use_cache" option.
This version always updates the mtime whether the local cache file is used or not.
And s3fs_flush ( ) function set the mtime of local cache file from S3 object mtime, but it was wrong .
This version is that the s3fs_flush ( ) changes the mtime of S3 object from the local cache file or the tmpfile .
The s3fs cuts some requests, because the s3fs can always check mtime whether the s3fs uses or does not use the local cache file.
5) A case of no "x-amz-meta-mode"
If the object did not have "x-amz-meta-mtime" mete, the s3fs recognized the file as not regular file.
After this version, the s3fs recognizes the file as regular file.
6) "." and ".." directory
The s3fs_readdir() did not return "X" and "XX" directory name.
After this version, the s3fs is changed that it returns "X" and "XX".
Example, the result of "ls" lists "X" and "XX" directory.
7) Fixed a bug
The insert_object() had a bug, and it is fixed.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@390 df820570-a93a-0410-bd06-b72b767a4274
2013-02-24 08:58:54 +00:00
}
2020-08-22 12:40:53 +00:00
mask = ( mask & ~ X_OK ) ;
if ( 0 ! = mask ) {
parent = mydirname ( path ) ;
if ( parent = = " . " ) {
parent = " / " ;
}
if ( 0 ! = ( result = check_object_access ( parent . c_str ( ) , mask , NULL ) ) ) {
return result ;
}
Summary of Changes(1.62 -> 1.63)
1) Lifetime for the stats cache
Added the new option "stat_cache_expire".
This option which is specified by seconds means the lifetime for each stats cache entry.
If this option is not specified, the stats cache is kept in s3fs process until the stats cache grown to maximum size. (default)
If this option is specified, the stats cache entry is out from the memory when the entry expires time.
2) Enable file permission
s3fs before 1.62 did not consider the file access permission.
s3fs after this version can consider it.
For access permission, the s3fs_getattr() function was divided into sub function which can check the file access permission.
It is like access() function.
And the function calling the s3fs_getattr() calls this new sub function instead of s3fs_getattr().
Last the s3fs_opendir() function which is called by FUSE was added for checking directory access permission when listing the files in directory.
3) UID/GUID
When a file or a directory was created, the s3fs could not set the UID/GID as the user who executed a command.
(Almost the UID/GID are root, because the s3fs run by root.)
After this version, the s3fs set correct UID/GID as the user who executes the commond.
4) About the mtime
If the object does not have "x-amz-meta-mtime" meta, the s3fs uses the "Last-Modified" header instead of it.
But the s3fs had a bug in this code, and this version fixed this bug.
When user modified the file, the s3fs did not update the mtime of the file.
This version fixed this bug.
In the get_local_fd() function, the local file's mtime was changed only when s3fs run with "use_cache" option.
This version always updates the mtime whether the local cache file is used or not.
And s3fs_flush ( ) function set the mtime of local cache file from S3 object mtime, but it was wrong .
This version is that the s3fs_flush ( ) changes the mtime of S3 object from the local cache file or the tmpfile .
The s3fs cuts some requests, because the s3fs can always check mtime whether the s3fs uses or does not use the local cache file.
5) A case of no "x-amz-meta-mode"
If the object did not have "x-amz-meta-mtime" mete, the s3fs recognized the file as not regular file.
After this version, the s3fs recognizes the file as regular file.
6) "." and ".." directory
The s3fs_readdir() did not return "X" and "XX" directory name.
After this version, the s3fs is changed that it returns "X" and "XX".
Example, the result of "ls" lists "X" and "XX" directory.
7) Fixed a bug
The insert_object() had a bug, and it is fixed.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@390 df820570-a93a-0410-bd06-b72b767a4274
2013-02-24 08:58:54 +00:00
}
2020-08-22 12:40:53 +00:00
return 0 ;
Summary of Changes(1.62 -> 1.63)
1) Lifetime for the stats cache
Added the new option "stat_cache_expire".
This option which is specified by seconds means the lifetime for each stats cache entry.
If this option is not specified, the stats cache is kept in s3fs process until the stats cache grown to maximum size. (default)
If this option is specified, the stats cache entry is out from the memory when the entry expires time.
2) Enable file permission
s3fs before 1.62 did not consider the file access permission.
s3fs after this version can consider it.
For access permission, the s3fs_getattr() function was divided into sub function which can check the file access permission.
It is like access() function.
And the function calling the s3fs_getattr() calls this new sub function instead of s3fs_getattr().
Last the s3fs_opendir() function which is called by FUSE was added for checking directory access permission when listing the files in directory.
3) UID/GUID
When a file or a directory was created, the s3fs could not set the UID/GID as the user who executed a command.
(Almost the UID/GID are root, because the s3fs run by root.)
After this version, the s3fs set correct UID/GID as the user who executes the commond.
4) About the mtime
If the object does not have "x-amz-meta-mtime" meta, the s3fs uses the "Last-Modified" header instead of it.
But the s3fs had a bug in this code, and this version fixed this bug.
When user modified the file, the s3fs did not update the mtime of the file.
This version fixed this bug.
In the get_local_fd() function, the local file's mtime was changed only when s3fs run with "use_cache" option.
This version always updates the mtime whether the local cache file is used or not.
And s3fs_flush ( ) function set the mtime of local cache file from S3 object mtime, but it was wrong .
This version is that the s3fs_flush ( ) changes the mtime of S3 object from the local cache file or the tmpfile .
The s3fs cuts some requests, because the s3fs can always check mtime whether the s3fs uses or does not use the local cache file.
5) A case of no "x-amz-meta-mode"
If the object did not have "x-amz-meta-mtime" mete, the s3fs recognized the file as not regular file.
After this version, the s3fs recognizes the file as regular file.
6) "." and ".." directory
The s3fs_readdir() did not return "X" and "XX" directory name.
After this version, the s3fs is changed that it returns "X" and "XX".
Example, the result of "ls" lists "X" and "XX" directory.
7) Fixed a bug
The insert_object() had a bug, and it is fixed.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@390 df820570-a93a-0410-bd06-b72b767a4274
2013-02-24 08:58:54 +00:00
}
2014-07-19 19:02:55 +00:00
//
2015-10-06 14:46:14 +00:00
// ssevalue is MD5 for SSE-C type, or KMS id for SSE-KMS
2014-07-19 19:02:55 +00:00
//
2020-09-11 09:37:24 +00:00
bool get_object_sse_type ( const char * path , sse_type_t & ssetype , std : : string & ssevalue )
2014-07-19 19:02:55 +00:00
{
2020-08-22 12:40:53 +00:00
if ( ! path ) {
return false ;
}
headers_t meta ;
if ( 0 ! = get_object_attribute ( path , NULL , & meta ) ) {
S3FS_PRN_ERR ( " Failed to get object(%s) headers " , path ) ;
return false ;
}
ssetype = sse_type_t : : SSE_DISABLE ;
ssevalue . erase ( ) ;
for ( headers_t : : iterator iter = meta . begin ( ) ; iter ! = meta . end ( ) ; + + iter ) {
2020-09-11 09:37:24 +00:00
std : : string key = ( * iter ) . first ;
2020-08-22 12:40:53 +00:00
if ( 0 = = strcasecmp ( key . c_str ( ) , " x-amz-server-side-encryption " ) & & 0 = = strcasecmp ( ( * iter ) . second . c_str ( ) , " AES256 " ) ) {
ssetype = sse_type_t : : SSE_S3 ;
} else if ( 0 = = strcasecmp ( key . c_str ( ) , " x-amz-server-side-encryption-aws-kms-key-id " ) ) {
ssetype = sse_type_t : : SSE_KMS ;
ssevalue = ( * iter ) . second ;
} else if ( 0 = = strcasecmp ( key . c_str ( ) , " x-amz-server-side-encryption-customer-key-md5 " ) ) {
ssetype = sse_type_t : : SSE_C ;
ssevalue = ( * iter ) . second ;
}
}
return true ;
2014-07-19 19:02:55 +00:00
}
2020-09-13 07:49:25 +00:00
static FdEntity * get_local_fent ( AutoFdEntity & autoent , const char * path , bool is_load )
2013-07-05 02:28:31 +00:00
{
2020-08-22 12:40:53 +00:00
struct stat stobj ;
FdEntity * ent ;
headers_t meta ;
2010-11-13 23:59:23 +00:00
2020-08-22 12:40:53 +00:00
S3FS_PRN_INFO2 ( " [path=%s] " , path ) ;
2011-02-16 16:52:45 +00:00
2020-08-22 12:40:53 +00:00
if ( 0 ! = get_object_attribute ( path , & stobj , & meta ) ) {
return NULL ;
}
2011-02-10 01:07:46 +00:00
2020-08-22 12:40:53 +00:00
// open
time_t mtime = ( ! S_ISREG ( stobj . st_mode ) | | S_ISLNK ( stobj . st_mode ) ) ? - 1 : stobj . st_mtime ;
bool force_tmpfile = S_ISREG ( stobj . st_mode ) ? false : true ;
2010-11-13 23:59:23 +00:00
2020-09-13 07:49:25 +00:00
if ( NULL = = ( ent = autoent . Open ( path , & meta , stobj . st_size , mtime , force_tmpfile , true ) ) ) {
2020-08-22 12:40:53 +00:00
S3FS_PRN_ERR ( " Could not open file. errno(%d) " , errno ) ;
return NULL ;
}
// load
if ( is_load & & ! ent - > OpenAndLoadAll ( & meta ) ) {
S3FS_PRN_ERR ( " Could not load file. errno(%d) " , errno ) ;
2020-09-13 07:49:25 +00:00
autoent . Close ( ) ;
2020-08-22 12:40:53 +00:00
return NULL ;
}
return ent ;
2010-11-13 23:59:23 +00:00
}
2020-08-22 12:40:53 +00:00
//
// create or update s3 meta
// ow_sse_flg is for over writing sse header by use_sse option.
// @return fuse return code
//
2020-09-16 14:45:28 +00:00
int put_headers ( const char * path , headers_t & meta , bool is_copy , bool update_mtime )
2013-06-04 06:04:04 +00:00
{
2020-08-22 12:40:53 +00:00
int result ;
S3fsCurl s3fscurl ( true ) ;
struct stat buf ;
2010-12-28 04:15:23 +00:00
2020-08-22 12:40:53 +00:00
S3FS_PRN_INFO2 ( " [path=%s] " , path ) ;
2010-12-28 04:15:23 +00:00
2020-08-22 12:40:53 +00:00
// files larger than 5GB must be modified via the multipart interface
// *** If there is not target object(a case of move command),
// get_object_attribute() returns error with initializing buf.
( void ) get_object_attribute ( path , & buf ) ;
Summary of Changes(1.62 -> 1.63)
1) Lifetime for the stats cache
Added the new option "stat_cache_expire".
This option which is specified by seconds means the lifetime for each stats cache entry.
If this option is not specified, the stats cache is kept in s3fs process until the stats cache grown to maximum size. (default)
If this option is specified, the stats cache entry is out from the memory when the entry expires time.
2) Enable file permission
s3fs before 1.62 did not consider the file access permission.
s3fs after this version can consider it.
For access permission, the s3fs_getattr() function was divided into sub function which can check the file access permission.
It is like access() function.
And the function calling the s3fs_getattr() calls this new sub function instead of s3fs_getattr().
Last the s3fs_opendir() function which is called by FUSE was added for checking directory access permission when listing the files in directory.
3) UID/GUID
When a file or a directory was created, the s3fs could not set the UID/GID as the user who executed a command.
(Almost the UID/GID are root, because the s3fs run by root.)
After this version, the s3fs set correct UID/GID as the user who executes the commond.
4) About the mtime
If the object does not have "x-amz-meta-mtime" meta, the s3fs uses the "Last-Modified" header instead of it.
But the s3fs had a bug in this code, and this version fixed this bug.
When user modified the file, the s3fs did not update the mtime of the file.
This version fixed this bug.
In the get_local_fd() function, the local file's mtime was changed only when s3fs run with "use_cache" option.
This version always updates the mtime whether the local cache file is used or not.
And s3fs_flush ( ) function set the mtime of local cache file from S3 object mtime, but it was wrong .
This version is that the s3fs_flush ( ) changes the mtime of S3 object from the local cache file or the tmpfile .
The s3fs cuts some requests, because the s3fs can always check mtime whether the s3fs uses or does not use the local cache file.
5) A case of no "x-amz-meta-mode"
If the object did not have "x-amz-meta-mtime" mete, the s3fs recognized the file as not regular file.
After this version, the s3fs recognizes the file as regular file.
6) "." and ".." directory
The s3fs_readdir() did not return "X" and "XX" directory name.
After this version, the s3fs is changed that it returns "X" and "XX".
Example, the result of "ls" lists "X" and "XX" directory.
7) Fixed a bug
The insert_object() had a bug, and it is fixed.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@390 df820570-a93a-0410-bd06-b72b767a4274
2013-02-24 08:58:54 +00:00
2020-08-22 12:40:53 +00:00
if ( buf . st_size > = FIVE_GB ) {
// multipart
if ( nocopyapi | | nomultipart ) {
return - EFBIG ; // File too large
}
if ( 0 ! = ( result = s3fscurl . MultipartHeadRequest ( path , buf . st_size , meta , is_copy ) ) ) {
return result ;
}
} else {
if ( 0 ! = ( result = s3fscurl . PutHeadRequest ( path , meta , is_copy ) ) ) {
return result ;
}
2011-08-29 22:01:32 +00:00
}
2013-04-17 04:50:13 +00:00
2020-08-22 12:40:53 +00:00
// [NOTE]
// if path is 'dir/', it does not have cache(could not open file for directory stat)
//
2020-09-16 14:45:28 +00:00
if ( update_mtime & & ' / ' ! = path [ strlen ( path ) - 1 ] ) {
2020-09-13 07:49:25 +00:00
AutoFdEntity autoent ;
FdEntity * ent ;
if ( NULL = = ( ent = autoent . ExistOpen ( path , - 1 , ! FdManager : : IsCacheDir ( ) ) ) ) {
2020-08-22 12:40:53 +00:00
// no opened fd
if ( FdManager : : IsCacheDir ( ) ) {
// create cache file if be needed
2020-09-13 07:49:25 +00:00
ent = autoent . Open ( path , & meta , buf . st_size , - 1 , false , true ) ;
2020-08-22 12:40:53 +00:00
}
}
if ( ent ) {
time_t mtime = get_mtime ( meta ) ;
ent - > SetMtime ( mtime ) ;
}
Changes codes for performance(part 3)
* Summay
This revision includes big change about temporary file and local cache file.
By this big change, s3fs works with good performance when s3fs opens/
closes/syncs/reads object.
I made a big change about the handling about temporary file and local cache
file to do this implementation.
* Detail
1) About temporary file(local file)
s3fs uses a temporary file on local file system when s3fs does download/
upload/open/seek object on S3.
After this revision, s3fs calls ftruncate() function when s3fs makes the
temporary file.
In this way s3fs can set a file size of precisely length without downloading.
(Notice - ftruncate function is for XSI-compliant systems, so that possibly
you have a problem on non-XSI-compliant systems.)
By this change, s3fs can download a part of a object by requesting with
"Range" http header. It seems like downloading by each block unit.
The default block(part) size is 50MB, it is caused the result which is default
parallel requests count(5) by default multipart upload size(10MB).
If you need to change this block size, you can change by new option
"fd_page_size". This option can take from 1MB(1024 * 1024) to any bytes.
So that, you have to take care about that fdcache.cpp(and fdcache.h) were
changed a lot.
2) About local cache
Local cache files which are in directory specified by "use_cache" option do
not have always all of object data.
This cause is that s3fs uses ftruncate function and reads(writes) each block
unit of a temporary file.
s3fs manages each block unit's status which are "downloaded area" or "not".
For this status, s3fs makes new temporary file in cache directory which is
specified by "use_cache" option. This status files is in a directory which is
named "<use_cache sirectory>/.<bucket_name>/".
When s3fs opens this status file, s3fs locks this file for exclusive control by
calling flock function. You need to take care about this, the status files can
not be laid on network drive(like NFS).
This revision changes about file open mode, s3fs always opens a local cache
file and each status file with writable mode.
Last, this revision adds new option "del_cache", this option means that s3fs
deletes all local cache file when s3fs starts and exits.
3) Uploading
When s3fs writes data to file descriptor through FUSE request, old s3fs
revision downloads all of the object. But new revision does not download all,
it downloads only small percial area(some block units) including writing data
area.
And when s3fs closes or flushes the file descriptor, s3fs downloads other area
which is not downloaded from server. After that, s3fs uploads all of data.
Already r456 revision has parallel upload function, then this revision with
r456 and r457 are very big change for performance.
4) Downloading
By changing a temporary file and a local cache file, when s3fs downloads a
object, it downloads only the required range(some block units).
And s3fs downloads units by parallel GET request, it is same as a case of
uploading. (Maximum parallel request count and each download size are
specified same parameters for uploading.)
In the new revision, when s3fs opens file, s3fs returns file descriptor soon.
Because s3fs only opens(makes) the file descriptor with no downloading
data. And when s3fs reads a data, s3fs downloads only some block unit
including specified area.
This result is good for performance.
5) Changes option name
The option "parallel_upload" which added at r456 is changed to new option
name as "parallel_count". This reason is this option value is not only used by
uploading object, but a uploading object also uses this option. (For a while,
you can use old option name "parallel_upload" for compatibility.)
git-svn-id: http://s3fs.googlecode.com/svn/trunk@458 df820570-a93a-0410-bd06-b72b767a4274
2013-07-23 16:01:48 +00:00
}
2020-08-22 12:40:53 +00:00
return 0 ;
2011-02-15 23:32:27 +00:00
}
2019-02-18 13:36:48 +00:00
static int s3fs_getattr ( const char * _path , struct stat * stbuf )
Summary of Changes(1.62 -> 1.63)
1) Lifetime for the stats cache
Added the new option "stat_cache_expire".
This option which is specified by seconds means the lifetime for each stats cache entry.
If this option is not specified, the stats cache is kept in s3fs process until the stats cache grown to maximum size. (default)
If this option is specified, the stats cache entry is out from the memory when the entry expires time.
2) Enable file permission
s3fs before 1.62 did not consider the file access permission.
s3fs after this version can consider it.
For access permission, the s3fs_getattr() function was divided into sub function which can check the file access permission.
It is like access() function.
And the function calling the s3fs_getattr() calls this new sub function instead of s3fs_getattr().
Last the s3fs_opendir() function which is called by FUSE was added for checking directory access permission when listing the files in directory.
3) UID/GUID
When a file or a directory was created, the s3fs could not set the UID/GID as the user who executed a command.
(Almost the UID/GID are root, because the s3fs run by root.)
After this version, the s3fs set correct UID/GID as the user who executes the commond.
4) About the mtime
If the object does not have "x-amz-meta-mtime" meta, the s3fs uses the "Last-Modified" header instead of it.
But the s3fs had a bug in this code, and this version fixed this bug.
When user modified the file, the s3fs did not update the mtime of the file.
This version fixed this bug.
In the get_local_fd() function, the local file's mtime was changed only when s3fs run with "use_cache" option.
This version always updates the mtime whether the local cache file is used or not.
And s3fs_flush ( ) function set the mtime of local cache file from S3 object mtime, but it was wrong .
This version is that the s3fs_flush ( ) changes the mtime of S3 object from the local cache file or the tmpfile .
The s3fs cuts some requests, because the s3fs can always check mtime whether the s3fs uses or does not use the local cache file.
5) A case of no "x-amz-meta-mode"
If the object did not have "x-amz-meta-mtime" mete, the s3fs recognized the file as not regular file.
After this version, the s3fs recognizes the file as regular file.
6) "." and ".." directory
The s3fs_readdir() did not return "X" and "XX" directory name.
After this version, the s3fs is changed that it returns "X" and "XX".
Example, the result of "ls" lists "X" and "XX" directory.
7) Fixed a bug
The insert_object() had a bug, and it is fixed.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@390 df820570-a93a-0410-bd06-b72b767a4274
2013-02-24 08:58:54 +00:00
{
2020-08-22 12:40:53 +00:00
WTF8_ENCODE ( path )
int result ;
2010-12-17 04:40:15 +00:00
2020-08-22 12:40:53 +00:00
S3FS_PRN_INFO ( " [path=%s] " , path ) ;
2015-10-18 17:03:41 +00:00
2020-08-22 12:40:53 +00:00
// check parent directory attribute.
if ( 0 ! = ( result = check_parent_object_access ( path , X_OK ) ) ) {
return result ;
}
if ( 0 ! = ( result = check_object_access ( path , F_OK , stbuf ) ) ) {
return result ;
2013-05-28 05:54:09 +00:00
}
2020-08-22 12:40:53 +00:00
// If has already opened fd, the st_size should be instead.
// (See: Issue 241)
if ( stbuf ) {
2020-09-13 07:49:25 +00:00
AutoFdEntity autoent ;
FdEntity * ent ;
if ( NULL ! = ( ent = autoent . ExistOpen ( path ) ) ) {
2020-08-22 12:40:53 +00:00
struct stat tmpstbuf ;
if ( ent - > GetStats ( tmpstbuf ) ) {
stbuf - > st_size = tmpstbuf . st_size ;
}
}
stbuf - > st_blksize = 4096 ;
stbuf - > st_blocks = get_blocks ( stbuf - > st_size ) ;
S3FS_PRN_DBG ( " [path=%s] uid=%u, gid=%u, mode=%04o " , path , ( unsigned int ) ( stbuf - > st_uid ) , ( unsigned int ) ( stbuf - > st_gid ) , stbuf - > st_mode ) ;
}
S3FS_MALLOCTRIM ( 0 ) ;
2013-08-23 16:28:50 +00:00
2020-08-22 12:40:53 +00:00
return result ;
2010-11-13 23:59:23 +00:00
}
2019-02-18 13:36:48 +00:00
static int s3fs_readlink ( const char * _path , char * buf , size_t size )
2013-07-05 02:28:31 +00:00
{
2020-08-22 12:40:53 +00:00
if ( ! _path | | ! buf | | 0 = = size ) {
return 0 ;
}
WTF8_ENCODE ( path )
2020-09-11 09:37:24 +00:00
std : : string strValue ;
2019-11-26 13:42:44 +00:00
2020-08-22 12:40:53 +00:00
// check symblic link cache
2020-09-11 09:37:24 +00:00
if ( ! StatCache : : getStatCacheData ( ) - > GetSymlink ( std : : string ( path ) , strValue ) ) {
2020-08-22 12:40:53 +00:00
// not found in cache, then open the path
2020-09-13 07:49:25 +00:00
{ // scope for AutoFdEntity
AutoFdEntity autoent ;
FdEntity * ent ;
if ( NULL = = ( ent = get_local_fent ( autoent , path ) ) ) {
S3FS_PRN_ERR ( " could not get fent(file=%s) " , path ) ;
return - EIO ;
}
// Get size
off_t readsize ;
if ( ! ent - > GetSize ( readsize ) ) {
S3FS_PRN_ERR ( " could not get file size(file=%s) " , path ) ;
return - EIO ;
}
if ( static_cast < off_t > ( size ) < = readsize ) {
readsize = size - 1 ;
}
// Read
ssize_t ressize ;
if ( 0 > ( ressize = ent - > Read ( buf , 0 , readsize ) ) ) {
S3FS_PRN_ERR ( " could not read file(file=%s, ressize=%zd) " , path , ressize ) ;
return static_cast < int > ( ressize ) ;
}
buf [ ressize ] = ' \0 ' ;
2020-08-22 12:40:53 +00:00
}
2019-11-26 13:42:44 +00:00
2020-08-22 12:40:53 +00:00
// check buf if it has space words.
2020-09-11 09:37:24 +00:00
strValue = trim ( std : : string ( buf ) ) ;
2020-08-22 12:40:53 +00:00
// decode wtf8. This will always be shorter
if ( use_wtf8 ) {
strValue = s3fs_wtf8_decode ( strValue ) ;
}
2019-11-26 13:42:44 +00:00
2020-08-22 12:40:53 +00:00
// add symblic link cache
2020-09-11 09:37:24 +00:00
if ( ! StatCache : : getStatCacheData ( ) - > AddSymlink ( std : : string ( path ) , strValue ) ) {
2020-08-22 12:40:53 +00:00
S3FS_PRN_ERR ( " failed to add symbolic link cache for %s " , path ) ;
}
2019-11-26 13:42:44 +00:00
}
2020-08-22 12:40:53 +00:00
// copy result
strncpy ( buf , strValue . c_str ( ) , size ) ;
2016-10-11 10:06:21 +00:00
2020-08-22 12:40:53 +00:00
S3FS_MALLOCTRIM ( 0 ) ;
Changes codes for performance(part 3)
* Summay
This revision includes big change about temporary file and local cache file.
By this big change, s3fs works with good performance when s3fs opens/
closes/syncs/reads object.
I made a big change about the handling about temporary file and local cache
file to do this implementation.
* Detail
1) About temporary file(local file)
s3fs uses a temporary file on local file system when s3fs does download/
upload/open/seek object on S3.
After this revision, s3fs calls ftruncate() function when s3fs makes the
temporary file.
In this way s3fs can set a file size of precisely length without downloading.
(Notice - ftruncate function is for XSI-compliant systems, so that possibly
you have a problem on non-XSI-compliant systems.)
By this change, s3fs can download a part of a object by requesting with
"Range" http header. It seems like downloading by each block unit.
The default block(part) size is 50MB, it is caused the result which is default
parallel requests count(5) by default multipart upload size(10MB).
If you need to change this block size, you can change by new option
"fd_page_size". This option can take from 1MB(1024 * 1024) to any bytes.
So that, you have to take care about that fdcache.cpp(and fdcache.h) were
changed a lot.
2) About local cache
Local cache files which are in directory specified by "use_cache" option do
not have always all of object data.
This cause is that s3fs uses ftruncate function and reads(writes) each block
unit of a temporary file.
s3fs manages each block unit's status which are "downloaded area" or "not".
For this status, s3fs makes new temporary file in cache directory which is
specified by "use_cache" option. This status files is in a directory which is
named "<use_cache sirectory>/.<bucket_name>/".
When s3fs opens this status file, s3fs locks this file for exclusive control by
calling flock function. You need to take care about this, the status files can
not be laid on network drive(like NFS).
This revision changes about file open mode, s3fs always opens a local cache
file and each status file with writable mode.
Last, this revision adds new option "del_cache", this option means that s3fs
deletes all local cache file when s3fs starts and exits.
3) Uploading
When s3fs writes data to file descriptor through FUSE request, old s3fs
revision downloads all of the object. But new revision does not download all,
it downloads only small percial area(some block units) including writing data
area.
And when s3fs closes or flushes the file descriptor, s3fs downloads other area
which is not downloaded from server. After that, s3fs uploads all of data.
Already r456 revision has parallel upload function, then this revision with
r456 and r457 are very big change for performance.
4) Downloading
By changing a temporary file and a local cache file, when s3fs downloads a
object, it downloads only the required range(some block units).
And s3fs downloads units by parallel GET request, it is same as a case of
uploading. (Maximum parallel request count and each download size are
specified same parameters for uploading.)
In the new revision, when s3fs opens file, s3fs returns file descriptor soon.
Because s3fs only opens(makes) the file descriptor with no downloading
data. And when s3fs reads a data, s3fs downloads only some block unit
including specified area.
This result is good for performance.
5) Changes option name
The option "parallel_upload" which added at r456 is changed to new option
name as "parallel_count". This reason is this option value is not only used by
uploading object, but a uploading object also uses this option. (For a while,
you can use old option name "parallel_upload" for compatibility.)
git-svn-id: http://s3fs.googlecode.com/svn/trunk@458 df820570-a93a-0410-bd06-b72b767a4274
2013-07-23 16:01:48 +00:00
2020-08-22 12:40:53 +00:00
return 0 ;
2010-11-13 23:59:23 +00:00
}
2019-01-23 23:44:50 +00:00
static int do_create_bucket ( )
2015-02-24 13:17:59 +00:00
{
2020-08-22 12:40:53 +00:00
S3FS_PRN_INFO2 ( " / " ) ;
2015-02-24 13:17:59 +00:00
2020-08-22 12:40:53 +00:00
FILE * ptmpfp ;
int tmpfd ;
if ( endpoint = = " us-east-1 " ) {
ptmpfp = NULL ;
tmpfd = - 1 ;
} else {
if ( NULL = = ( ptmpfp = tmpfile ( ) ) | |
- 1 = = ( tmpfd = fileno ( ptmpfp ) ) | |
0 > = fprintf ( ptmpfp , " <CreateBucketConfiguration xmlns= \" http://s3.amazonaws.com/doc/2006-03-01/ \" > \n "
" <LocationConstraint>%s</LocationConstraint> \n "
" </CreateBucketConfiguration> " , endpoint . c_str ( ) ) | |
0 ! = fflush ( ptmpfp ) | |
- 1 = = fseek ( ptmpfp , 0L , SEEK_SET ) )
{
S3FS_PRN_ERR ( " failed to create temporary file. err(%d) " , errno ) ;
if ( ptmpfp ) {
fclose ( ptmpfp ) ;
}
return ( 0 = = errno ? - EIO : - errno ) ;
}
}
headers_t meta ;
S3fsCurl s3fscurl ( true ) ;
int res = s3fscurl . PutRequest ( " / " , meta , tmpfd ) ;
if ( res < 0 ) {
long responseCode = s3fscurl . GetLastResponseCode ( ) ;
if ( ( responseCode = = 400 | | responseCode = = 403 ) & & S3fsCurl : : IsSignatureV4 ( ) ) {
S3FS_PRN_ERR ( " Could not connect, so retry to connect by signature version 2. " ) ;
S3fsCurl : : SetSignatureV4 ( false ) ;
// retry to check
s3fscurl . DestroyCurlHandle ( ) ;
res = s3fscurl . PutRequest ( " / " , meta , tmpfd ) ;
} else if ( responseCode = = 409 ) {
// bucket already exists
res = 0 ;
}
}
if ( ptmpfp ! = NULL ) {
fclose ( ptmpfp ) ;
}
return res ;
}
// common function for creation of a plain object
static int create_file_object ( const char * path , mode_t mode , uid_t uid , gid_t gid )
2013-07-05 02:28:31 +00:00
{
2020-08-22 12:40:53 +00:00
S3FS_PRN_INFO2 ( " [path=%s][mode=%04o] " , path , mode ) ;
time_t now = time ( NULL ) ;
headers_t meta ;
2020-09-11 09:37:24 +00:00
meta [ " Content-Type " ] = S3fsCurl : : LookupMimeType ( std : : string ( path ) ) ;
2020-08-22 12:40:53 +00:00
meta [ " x-amz-meta-uid " ] = str ( uid ) ;
meta [ " x-amz-meta-gid " ] = str ( gid ) ;
meta [ " x-amz-meta-mode " ] = str ( mode ) ;
meta [ " x-amz-meta-ctime " ] = str ( now ) ;
meta [ " x-amz-meta-mtime " ] = str ( now ) ;
S3fsCurl s3fscurl ( true ) ;
return s3fscurl . PutRequest ( path , meta , - 1 ) ; // fd=-1 means for creating zero byte object.
2010-11-13 23:59:23 +00:00
}
2019-02-18 13:36:48 +00:00
static int s3fs_mknod ( const char * _path , mode_t mode , dev_t rdev )
2013-01-19 16:05:07 +00:00
{
2020-08-22 12:40:53 +00:00
WTF8_ENCODE ( path )
int result ;
struct fuse_context * pcxt ;
S3FS_PRN_INFO ( " [path=%s][mode=%04o][dev=%llu] " , path , mode , ( unsigned long long ) rdev ) ;
2010-12-21 15:24:46 +00:00
2020-08-22 12:40:53 +00:00
if ( NULL = = ( pcxt = fuse_get_context ( ) ) ) {
return - EIO ;
}
2013-07-30 07:27:22 +00:00
2020-08-22 12:40:53 +00:00
if ( 0 ! = ( result = create_file_object ( path , mode , pcxt - > uid , pcxt - > gid ) ) ) {
S3FS_PRN_ERR ( " could not create object for special file(result=%d) " , result ) ;
return result ;
}
StatCache : : getStatCacheData ( ) - > DelStat ( path ) ;
S3FS_MALLOCTRIM ( 0 ) ;
2013-07-30 07:27:22 +00:00
return result ;
2010-12-22 17:19:52 +00:00
}
2010-12-21 15:24:46 +00:00
2019-02-18 13:36:48 +00:00
static int s3fs_create ( const char * _path , mode_t mode , struct fuse_file_info * fi )
2013-07-05 02:28:31 +00:00
{
2020-08-22 12:40:53 +00:00
WTF8_ENCODE ( path )
int result ;
struct fuse_context * pcxt ;
2010-12-21 15:24:46 +00:00
2020-08-22 12:40:53 +00:00
S3FS_PRN_INFO ( " [path=%s][mode=%04o][flags=0x%x] " , path , mode , fi - > flags ) ;
Summary of Changes(1.62 -> 1.63)
1) Lifetime for the stats cache
Added the new option "stat_cache_expire".
This option which is specified by seconds means the lifetime for each stats cache entry.
If this option is not specified, the stats cache is kept in s3fs process until the stats cache grown to maximum size. (default)
If this option is specified, the stats cache entry is out from the memory when the entry expires time.
2) Enable file permission
s3fs before 1.62 did not consider the file access permission.
s3fs after this version can consider it.
For access permission, the s3fs_getattr() function was divided into sub function which can check the file access permission.
It is like access() function.
And the function calling the s3fs_getattr() calls this new sub function instead of s3fs_getattr().
Last the s3fs_opendir() function which is called by FUSE was added for checking directory access permission when listing the files in directory.
3) UID/GUID
When a file or a directory was created, the s3fs could not set the UID/GID as the user who executed a command.
(Almost the UID/GID are root, because the s3fs run by root.)
After this version, the s3fs set correct UID/GID as the user who executes the commond.
4) About the mtime
If the object does not have "x-amz-meta-mtime" meta, the s3fs uses the "Last-Modified" header instead of it.
But the s3fs had a bug in this code, and this version fixed this bug.
When user modified the file, the s3fs did not update the mtime of the file.
This version fixed this bug.
In the get_local_fd() function, the local file's mtime was changed only when s3fs run with "use_cache" option.
This version always updates the mtime whether the local cache file is used or not.
And s3fs_flush ( ) function set the mtime of local cache file from S3 object mtime, but it was wrong .
This version is that the s3fs_flush ( ) changes the mtime of S3 object from the local cache file or the tmpfile .
The s3fs cuts some requests, because the s3fs can always check mtime whether the s3fs uses or does not use the local cache file.
5) A case of no "x-amz-meta-mode"
If the object did not have "x-amz-meta-mtime" mete, the s3fs recognized the file as not regular file.
After this version, the s3fs recognizes the file as regular file.
6) "." and ".." directory
The s3fs_readdir() did not return "X" and "XX" directory name.
After this version, the s3fs is changed that it returns "X" and "XX".
Example, the result of "ls" lists "X" and "XX" directory.
7) Fixed a bug
The insert_object() had a bug, and it is fixed.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@390 df820570-a93a-0410-bd06-b72b767a4274
2013-02-24 08:58:54 +00:00
2020-08-22 12:40:53 +00:00
if ( NULL = = ( pcxt = fuse_get_context ( ) ) ) {
return - EIO ;
2013-04-06 17:39:22 +00:00
}
2015-10-18 17:03:41 +00:00
2020-08-22 12:40:53 +00:00
// check parent directory attribute.
if ( 0 ! = ( result = check_parent_object_access ( path , X_OK ) ) ) {
return result ;
}
result = check_object_access ( path , W_OK , NULL ) ;
if ( - ENOENT = = result ) {
if ( 0 ! = ( result = check_parent_object_access ( path , W_OK ) ) ) {
return result ;
}
} else if ( 0 ! = result ) {
return result ;
}
result = create_file_object ( path , mode , pcxt - > uid , pcxt - > gid ) ;
2016-03-13 05:43:28 +00:00
StatCache : : getStatCacheData ( ) - > DelStat ( path ) ;
2020-08-22 12:40:53 +00:00
if ( result ! = 0 ) {
return result ;
}
2020-09-13 07:49:25 +00:00
AutoFdEntity autoent ;
FdEntity * ent ;
headers_t meta ;
2020-08-22 12:40:53 +00:00
get_object_attribute ( path , NULL , & meta , true , NULL , true ) ; // no truncate cache
2020-09-13 07:49:25 +00:00
if ( NULL = = ( ent = autoent . Open ( path , & meta , 0 , - 1 , false , true ) ) ) {
2020-08-22 12:40:53 +00:00
StatCache : : getStatCacheData ( ) - > DelStat ( path ) ;
return - EIO ;
}
2020-09-13 07:49:25 +00:00
autoent . Detach ( ) ; // KEEP fdentity open
2020-08-22 12:40:53 +00:00
fi - > fh = ent - > GetFd ( ) ;
2020-09-13 07:49:25 +00:00
2020-08-22 12:40:53 +00:00
S3FS_MALLOCTRIM ( 0 ) ;
2010-12-21 15:24:46 +00:00
2020-08-22 12:40:53 +00:00
return 0 ;
2010-12-21 15:24:46 +00:00
}
2013-07-05 02:28:31 +00:00
static int create_directory_object ( const char * path , mode_t mode , time_t time , uid_t uid , gid_t gid )
2013-01-19 16:05:07 +00:00
{
2020-08-22 12:40:53 +00:00
S3FS_PRN_INFO1 ( " [path=%s][mode=%04o][time=%lld][uid=%u][gid=%u] " , path , mode , static_cast < long long > ( time ) , ( unsigned int ) uid , ( unsigned int ) gid ) ;
if ( ! path | | ' \0 ' = = path [ 0 ] ) {
return - 1 ;
}
2020-09-11 09:37:24 +00:00
std : : string tpath = path ;
2020-08-22 12:40:53 +00:00
if ( ' / ' ! = tpath [ tpath . length ( ) - 1 ] ) {
tpath + = " / " ;
}
headers_t meta ;
meta [ " x-amz-meta-uid " ] = str ( uid ) ;
meta [ " x-amz-meta-gid " ] = str ( gid ) ;
meta [ " x-amz-meta-mode " ] = str ( mode ) ;
meta [ " x-amz-meta-ctime " ] = str ( time ) ;
meta [ " x-amz-meta-mtime " ] = str ( time ) ;
S3fsCurl s3fscurl ;
return s3fscurl . PutRequest ( tpath . c_str ( ) , meta , - 1 ) ; // fd=-1 means for creating zero byte object.
2010-11-13 23:59:23 +00:00
}
2019-02-18 13:36:48 +00:00
static int s3fs_mkdir ( const char * _path , mode_t mode )
2013-01-19 16:05:07 +00:00
{
2020-08-22 12:40:53 +00:00
WTF8_ENCODE ( path )
int result ;
struct fuse_context * pcxt ;
Summary of Changes(1.62 -> 1.63)
1) Lifetime for the stats cache
Added the new option "stat_cache_expire".
This option which is specified by seconds means the lifetime for each stats cache entry.
If this option is not specified, the stats cache is kept in s3fs process until the stats cache grown to maximum size. (default)
If this option is specified, the stats cache entry is out from the memory when the entry expires time.
2) Enable file permission
s3fs before 1.62 did not consider the file access permission.
s3fs after this version can consider it.
For access permission, the s3fs_getattr() function was divided into sub function which can check the file access permission.
It is like access() function.
And the function calling the s3fs_getattr() calls this new sub function instead of s3fs_getattr().
Last the s3fs_opendir() function which is called by FUSE was added for checking directory access permission when listing the files in directory.
3) UID/GUID
When a file or a directory was created, the s3fs could not set the UID/GID as the user who executed a command.
(Almost the UID/GID are root, because the s3fs run by root.)
After this version, the s3fs set correct UID/GID as the user who executes the commond.
4) About the mtime
If the object does not have "x-amz-meta-mtime" meta, the s3fs uses the "Last-Modified" header instead of it.
But the s3fs had a bug in this code, and this version fixed this bug.
When user modified the file, the s3fs did not update the mtime of the file.
This version fixed this bug.
In the get_local_fd() function, the local file's mtime was changed only when s3fs run with "use_cache" option.
This version always updates the mtime whether the local cache file is used or not.
And s3fs_flush ( ) function set the mtime of local cache file from S3 object mtime, but it was wrong .
This version is that the s3fs_flush ( ) changes the mtime of S3 object from the local cache file or the tmpfile .
The s3fs cuts some requests, because the s3fs can always check mtime whether the s3fs uses or does not use the local cache file.
5) A case of no "x-amz-meta-mode"
If the object did not have "x-amz-meta-mtime" mete, the s3fs recognized the file as not regular file.
After this version, the s3fs recognizes the file as regular file.
6) "." and ".." directory
The s3fs_readdir() did not return "X" and "XX" directory name.
After this version, the s3fs is changed that it returns "X" and "XX".
Example, the result of "ls" lists "X" and "XX" directory.
7) Fixed a bug
The insert_object() had a bug, and it is fixed.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@390 df820570-a93a-0410-bd06-b72b767a4274
2013-02-24 08:58:54 +00:00
2020-08-22 12:40:53 +00:00
S3FS_PRN_INFO ( " [path=%s][mode=%04o] " , path , mode ) ;
2013-01-19 16:05:07 +00:00
2020-08-22 12:40:53 +00:00
if ( NULL = = ( pcxt = fuse_get_context ( ) ) ) {
return - EIO ;
}
Summary of Changes(1.62 -> 1.63)
1) Lifetime for the stats cache
Added the new option "stat_cache_expire".
This option which is specified by seconds means the lifetime for each stats cache entry.
If this option is not specified, the stats cache is kept in s3fs process until the stats cache grown to maximum size. (default)
If this option is specified, the stats cache entry is out from the memory when the entry expires time.
2) Enable file permission
s3fs before 1.62 did not consider the file access permission.
s3fs after this version can consider it.
For access permission, the s3fs_getattr() function was divided into sub function which can check the file access permission.
It is like access() function.
And the function calling the s3fs_getattr() calls this new sub function instead of s3fs_getattr().
Last the s3fs_opendir() function which is called by FUSE was added for checking directory access permission when listing the files in directory.
3) UID/GUID
When a file or a directory was created, the s3fs could not set the UID/GID as the user who executed a command.
(Almost the UID/GID are root, because the s3fs run by root.)
After this version, the s3fs set correct UID/GID as the user who executes the commond.
4) About the mtime
If the object does not have "x-amz-meta-mtime" meta, the s3fs uses the "Last-Modified" header instead of it.
But the s3fs had a bug in this code, and this version fixed this bug.
When user modified the file, the s3fs did not update the mtime of the file.
This version fixed this bug.
In the get_local_fd() function, the local file's mtime was changed only when s3fs run with "use_cache" option.
This version always updates the mtime whether the local cache file is used or not.
And s3fs_flush ( ) function set the mtime of local cache file from S3 object mtime, but it was wrong .
This version is that the s3fs_flush ( ) changes the mtime of S3 object from the local cache file or the tmpfile .
The s3fs cuts some requests, because the s3fs can always check mtime whether the s3fs uses or does not use the local cache file.
5) A case of no "x-amz-meta-mode"
If the object did not have "x-amz-meta-mtime" mete, the s3fs recognized the file as not regular file.
After this version, the s3fs recognizes the file as regular file.
6) "." and ".." directory
The s3fs_readdir() did not return "X" and "XX" directory name.
After this version, the s3fs is changed that it returns "X" and "XX".
Example, the result of "ls" lists "X" and "XX" directory.
7) Fixed a bug
The insert_object() had a bug, and it is fixed.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@390 df820570-a93a-0410-bd06-b72b767a4274
2013-02-24 08:58:54 +00:00
2020-08-22 12:40:53 +00:00
// check parent directory attribute.
if ( 0 ! = ( result = check_parent_object_access ( path , W_OK | X_OK ) ) ) {
return result ;
}
if ( - ENOENT ! = ( result = check_object_access ( path , F_OK , NULL ) ) ) {
if ( 0 = = result ) {
result = - EEXIST ;
}
return result ;
Summary of Changes(1.62 -> 1.63)
1) Lifetime for the stats cache
Added the new option "stat_cache_expire".
This option which is specified by seconds means the lifetime for each stats cache entry.
If this option is not specified, the stats cache is kept in s3fs process until the stats cache grown to maximum size. (default)
If this option is specified, the stats cache entry is out from the memory when the entry expires time.
2) Enable file permission
s3fs before 1.62 did not consider the file access permission.
s3fs after this version can consider it.
For access permission, the s3fs_getattr() function was divided into sub function which can check the file access permission.
It is like access() function.
And the function calling the s3fs_getattr() calls this new sub function instead of s3fs_getattr().
Last the s3fs_opendir() function which is called by FUSE was added for checking directory access permission when listing the files in directory.
3) UID/GUID
When a file or a directory was created, the s3fs could not set the UID/GID as the user who executed a command.
(Almost the UID/GID are root, because the s3fs run by root.)
After this version, the s3fs set correct UID/GID as the user who executes the commond.
4) About the mtime
If the object does not have "x-amz-meta-mtime" meta, the s3fs uses the "Last-Modified" header instead of it.
But the s3fs had a bug in this code, and this version fixed this bug.
When user modified the file, the s3fs did not update the mtime of the file.
This version fixed this bug.
In the get_local_fd() function, the local file's mtime was changed only when s3fs run with "use_cache" option.
This version always updates the mtime whether the local cache file is used or not.
And s3fs_flush ( ) function set the mtime of local cache file from S3 object mtime, but it was wrong .
This version is that the s3fs_flush ( ) changes the mtime of S3 object from the local cache file or the tmpfile .
The s3fs cuts some requests, because the s3fs can always check mtime whether the s3fs uses or does not use the local cache file.
5) A case of no "x-amz-meta-mode"
If the object did not have "x-amz-meta-mtime" mete, the s3fs recognized the file as not regular file.
After this version, the s3fs recognizes the file as regular file.
6) "." and ".." directory
The s3fs_readdir() did not return "X" and "XX" directory name.
After this version, the s3fs is changed that it returns "X" and "XX".
Example, the result of "ls" lists "X" and "XX" directory.
7) Fixed a bug
The insert_object() had a bug, and it is fixed.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@390 df820570-a93a-0410-bd06-b72b767a4274
2013-02-24 08:58:54 +00:00
}
2020-08-22 12:40:53 +00:00
result = create_directory_object ( path , mode , time ( NULL ) , pcxt - > uid , pcxt - > gid ) ;
StatCache : : getStatCacheData ( ) - > DelStat ( path ) ;
S3FS_MALLOCTRIM ( 0 ) ;
2013-09-14 21:50:39 +00:00
2020-08-22 12:40:53 +00:00
return result ;
2013-01-19 16:05:07 +00:00
}
2019-02-18 13:36:48 +00:00
static int s3fs_unlink ( const char * _path )
2013-07-05 02:28:31 +00:00
{
2020-08-22 12:40:53 +00:00
WTF8_ENCODE ( path )
int result ;
2011-02-23 17:16:12 +00:00
2020-08-22 12:40:53 +00:00
S3FS_PRN_INFO ( " [path=%s] " , path ) ;
if ( 0 ! = ( result = check_parent_object_access ( path , W_OK | X_OK ) ) ) {
return result ;
}
S3fsCurl s3fscurl ;
result = s3fscurl . DeleteRequest ( path ) ;
FdManager : : DeleteCacheFile ( path ) ;
StatCache : : getStatCacheData ( ) - > DelStat ( path ) ;
StatCache : : getStatCacheData ( ) - > DelSymlink ( path ) ;
S3FS_MALLOCTRIM ( 0 ) ;
2010-11-13 23:59:23 +00:00
Summary of Changes(1.62 -> 1.63)
1) Lifetime for the stats cache
Added the new option "stat_cache_expire".
This option which is specified by seconds means the lifetime for each stats cache entry.
If this option is not specified, the stats cache is kept in s3fs process until the stats cache grown to maximum size. (default)
If this option is specified, the stats cache entry is out from the memory when the entry expires time.
2) Enable file permission
s3fs before 1.62 did not consider the file access permission.
s3fs after this version can consider it.
For access permission, the s3fs_getattr() function was divided into sub function which can check the file access permission.
It is like access() function.
And the function calling the s3fs_getattr() calls this new sub function instead of s3fs_getattr().
Last the s3fs_opendir() function which is called by FUSE was added for checking directory access permission when listing the files in directory.
3) UID/GUID
When a file or a directory was created, the s3fs could not set the UID/GID as the user who executed a command.
(Almost the UID/GID are root, because the s3fs run by root.)
After this version, the s3fs set correct UID/GID as the user who executes the commond.
4) About the mtime
If the object does not have "x-amz-meta-mtime" meta, the s3fs uses the "Last-Modified" header instead of it.
But the s3fs had a bug in this code, and this version fixed this bug.
When user modified the file, the s3fs did not update the mtime of the file.
This version fixed this bug.
In the get_local_fd() function, the local file's mtime was changed only when s3fs run with "use_cache" option.
This version always updates the mtime whether the local cache file is used or not.
And s3fs_flush ( ) function set the mtime of local cache file from S3 object mtime, but it was wrong .
This version is that the s3fs_flush ( ) changes the mtime of S3 object from the local cache file or the tmpfile .
The s3fs cuts some requests, because the s3fs can always check mtime whether the s3fs uses or does not use the local cache file.
5) A case of no "x-amz-meta-mode"
If the object did not have "x-amz-meta-mtime" mete, the s3fs recognized the file as not regular file.
After this version, the s3fs recognizes the file as regular file.
6) "." and ".." directory
The s3fs_readdir() did not return "X" and "XX" directory name.
After this version, the s3fs is changed that it returns "X" and "XX".
Example, the result of "ls" lists "X" and "XX" directory.
7) Fixed a bug
The insert_object() had a bug, and it is fixed.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@390 df820570-a93a-0410-bd06-b72b767a4274
2013-02-24 08:58:54 +00:00
return result ;
2010-11-13 23:59:23 +00:00
}
2013-07-05 02:28:31 +00:00
static int directory_empty ( const char * path )
{
2020-08-22 12:40:53 +00:00
int result ;
S3ObjList head ;
2010-11-13 23:59:23 +00:00
2020-08-22 12:40:53 +00:00
if ( ( result = list_bucket ( path , head , " / " , true ) ) ! = 0 ) {
S3FS_PRN_ERR ( " list_bucket returns error. " ) ;
return result ;
}
if ( ! head . IsEmpty ( ) ) {
return - ENOTEMPTY ;
}
return 0 ;
2011-08-30 19:08:01 +00:00
}
2019-02-18 13:36:48 +00:00
static int s3fs_rmdir ( const char * _path )
2013-04-20 19:17:28 +00:00
{
2020-08-22 12:40:53 +00:00
WTF8_ENCODE ( path )
int result ;
2020-09-11 09:37:24 +00:00
std : : string strpath ;
2020-08-22 12:40:53 +00:00
struct stat stbuf ;
S3FS_PRN_INFO ( " [path=%s] " , path ) ;
if ( 0 ! = ( result = check_parent_object_access ( path , W_OK | X_OK ) ) ) {
return result ;
}
// directory must be empty
if ( directory_empty ( path ) ! = 0 ) {
return - ENOTEMPTY ;
}
strpath = path ;
if ( ' / ' ! = strpath [ strpath . length ( ) - 1 ] ) {
strpath + = " / " ;
}
S3fsCurl s3fscurl ;
result = s3fscurl . DeleteRequest ( strpath . c_str ( ) ) ;
s3fscurl . DestroyCurlHandle ( ) ;
StatCache : : getStatCacheData ( ) - > DelStat ( strpath . c_str ( ) ) ;
// double check for old version(before 1.63)
// The old version makes "dir" object, newer version makes "dir/".
// A case, there is only "dir", the first removing object is "dir/".
// Then "dir/" is not exists, but curl_delete returns 0.
// So need to check "dir" and should be removed it.
if ( ' / ' = = strpath [ strpath . length ( ) - 1 ] ) {
strpath = strpath . substr ( 0 , strpath . length ( ) - 1 ) ;
}
if ( 0 = = get_object_attribute ( strpath . c_str ( ) , & stbuf , NULL , false ) ) {
if ( S_ISDIR ( stbuf . st_mode ) ) {
// Found "dir" object.
result = s3fscurl . DeleteRequest ( strpath . c_str ( ) ) ;
s3fscurl . DestroyCurlHandle ( ) ;
StatCache : : getStatCacheData ( ) - > DelStat ( strpath . c_str ( ) ) ;
}
}
// If there is no "dir" and "dir/" object(this case is made by s3cmd/s3sync),
// the cache key is "dir/". So we get error only once(delete "dir/").
2011-08-30 19:08:01 +00:00
2020-08-22 12:40:53 +00:00
// check for "_$folder$" object.
// This processing is necessary for other S3 clients compatibility.
if ( is_special_name_folder_object ( strpath . c_str ( ) ) ) {
strpath + = " _$folder$ " ;
result = s3fscurl . DeleteRequest ( strpath . c_str ( ) ) ;
}
S3FS_MALLOCTRIM ( 0 ) ;
2011-08-30 19:08:01 +00:00
Summary of Changes(1.62 -> 1.63)
1) Lifetime for the stats cache
Added the new option "stat_cache_expire".
This option which is specified by seconds means the lifetime for each stats cache entry.
If this option is not specified, the stats cache is kept in s3fs process until the stats cache grown to maximum size. (default)
If this option is specified, the stats cache entry is out from the memory when the entry expires time.
2) Enable file permission
s3fs before 1.62 did not consider the file access permission.
s3fs after this version can consider it.
For access permission, the s3fs_getattr() function was divided into sub function which can check the file access permission.
It is like access() function.
And the function calling the s3fs_getattr() calls this new sub function instead of s3fs_getattr().
Last the s3fs_opendir() function which is called by FUSE was added for checking directory access permission when listing the files in directory.
3) UID/GUID
When a file or a directory was created, the s3fs could not set the UID/GID as the user who executed a command.
(Almost the UID/GID are root, because the s3fs run by root.)
After this version, the s3fs set correct UID/GID as the user who executes the commond.
4) About the mtime
If the object does not have "x-amz-meta-mtime" meta, the s3fs uses the "Last-Modified" header instead of it.
But the s3fs had a bug in this code, and this version fixed this bug.
When user modified the file, the s3fs did not update the mtime of the file.
This version fixed this bug.
In the get_local_fd() function, the local file's mtime was changed only when s3fs run with "use_cache" option.
This version always updates the mtime whether the local cache file is used or not.
And s3fs_flush ( ) function set the mtime of local cache file from S3 object mtime, but it was wrong .
This version is that the s3fs_flush ( ) changes the mtime of S3 object from the local cache file or the tmpfile .
The s3fs cuts some requests, because the s3fs can always check mtime whether the s3fs uses or does not use the local cache file.
5) A case of no "x-amz-meta-mode"
If the object did not have "x-amz-meta-mtime" mete, the s3fs recognized the file as not regular file.
After this version, the s3fs recognizes the file as regular file.
6) "." and ".." directory
The s3fs_readdir() did not return "X" and "XX" directory name.
After this version, the s3fs is changed that it returns "X" and "XX".
Example, the result of "ls" lists "X" and "XX" directory.
7) Fixed a bug
The insert_object() had a bug, and it is fixed.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@390 df820570-a93a-0410-bd06-b72b767a4274
2013-02-24 08:58:54 +00:00
return result ;
2010-11-13 23:59:23 +00:00
}
2019-02-18 13:36:48 +00:00
static int s3fs_symlink ( const char * _from , const char * _to )
2013-07-05 02:28:31 +00:00
{
2020-08-22 12:40:53 +00:00
WTF8_ENCODE ( from )
WTF8_ENCODE ( to )
int result ;
struct fuse_context * pcxt ;
2010-12-19 22:27:56 +00:00
2020-08-22 12:40:53 +00:00
S3FS_PRN_INFO ( " [from=%s][to=%s] " , from , to ) ;
2010-11-13 23:59:23 +00:00
2020-08-22 12:40:53 +00:00
if ( NULL = = ( pcxt = fuse_get_context ( ) ) ) {
return - EIO ;
}
if ( 0 ! = ( result = check_parent_object_access ( to , W_OK | X_OK ) ) ) {
return result ;
}
if ( - ENOENT ! = ( result = check_object_access ( to , F_OK , NULL ) ) ) {
if ( 0 = = result ) {
result = - EEXIST ;
}
return result ;
}
time_t now = time ( NULL ) ;
headers_t headers ;
2020-09-11 09:37:24 +00:00
headers [ " Content-Type " ] = std : : string ( " application/octet-stream " ) ; // Static
2020-08-22 12:40:53 +00:00
headers [ " x-amz-meta-mode " ] = str ( S_IFLNK | S_IRWXU | S_IRWXG | S_IRWXO ) ;
headers [ " x-amz-meta-ctime " ] = str ( now ) ;
headers [ " x-amz-meta-mtime " ] = str ( now ) ;
headers [ " x-amz-meta-uid " ] = str ( pcxt - > uid ) ;
headers [ " x-amz-meta-gid " ] = str ( pcxt - > gid ) ;
// open tmpfile
2020-09-13 07:49:25 +00:00
std : : string strFrom ;
{ // scope for AutoFdEntity
AutoFdEntity autoent ;
FdEntity * ent ;
if ( NULL = = ( ent = autoent . Open ( to , & headers , 0 , - 1 , true , true ) ) ) {
S3FS_PRN_ERR ( " could not open tmpfile(errno=%d) " , errno ) ;
return - errno ;
}
// write(without space words)
strFrom = trim ( std : : string ( from ) ) ;
ssize_t from_size = static_cast < ssize_t > ( strFrom . length ( ) ) ;
if ( from_size ! = ent - > Write ( strFrom . c_str ( ) , 0 , from_size ) ) {
S3FS_PRN_ERR ( " could not write tmpfile(errno=%d) " , errno ) ;
return - errno ;
}
// upload
if ( 0 ! = ( result = ent - > Flush ( true ) ) ) {
S3FS_PRN_WARN ( " could not upload tmpfile(result=%d) " , result ) ;
}
Summary of Changes(1.62 -> 1.63)
1) Lifetime for the stats cache
Added the new option "stat_cache_expire".
This option which is specified by seconds means the lifetime for each stats cache entry.
If this option is not specified, the stats cache is kept in s3fs process until the stats cache grown to maximum size. (default)
If this option is specified, the stats cache entry is out from the memory when the entry expires time.
2) Enable file permission
s3fs before 1.62 did not consider the file access permission.
s3fs after this version can consider it.
For access permission, the s3fs_getattr() function was divided into sub function which can check the file access permission.
It is like access() function.
And the function calling the s3fs_getattr() calls this new sub function instead of s3fs_getattr().
Last the s3fs_opendir() function which is called by FUSE was added for checking directory access permission when listing the files in directory.
3) UID/GUID
When a file or a directory was created, the s3fs could not set the UID/GID as the user who executed a command.
(Almost the UID/GID are root, because the s3fs run by root.)
After this version, the s3fs set correct UID/GID as the user who executes the commond.
4) About the mtime
If the object does not have "x-amz-meta-mtime" meta, the s3fs uses the "Last-Modified" header instead of it.
But the s3fs had a bug in this code, and this version fixed this bug.
When user modified the file, the s3fs did not update the mtime of the file.
This version fixed this bug.
In the get_local_fd() function, the local file's mtime was changed only when s3fs run with "use_cache" option.
This version always updates the mtime whether the local cache file is used or not.
And s3fs_flush ( ) function set the mtime of local cache file from S3 object mtime, but it was wrong .
This version is that the s3fs_flush ( ) changes the mtime of S3 object from the local cache file or the tmpfile .
The s3fs cuts some requests, because the s3fs can always check mtime whether the s3fs uses or does not use the local cache file.
5) A case of no "x-amz-meta-mode"
If the object did not have "x-amz-meta-mtime" mete, the s3fs recognized the file as not regular file.
After this version, the s3fs recognizes the file as regular file.
6) "." and ".." directory
The s3fs_readdir() did not return "X" and "XX" directory name.
After this version, the s3fs is changed that it returns "X" and "XX".
Example, the result of "ls" lists "X" and "XX" directory.
7) Fixed a bug
The insert_object() had a bug, and it is fixed.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@390 df820570-a93a-0410-bd06-b72b767a4274
2013-02-24 08:58:54 +00:00
}
2020-08-22 12:40:53 +00:00
StatCache : : getStatCacheData ( ) - > DelStat ( to ) ;
2020-09-11 09:37:24 +00:00
if ( ! StatCache : : getStatCacheData ( ) - > AddSymlink ( std : : string ( to ) , strFrom ) ) {
2020-08-22 12:40:53 +00:00
S3FS_PRN_ERR ( " failed to add symbolic link cache for %s " , to ) ;
}
S3FS_MALLOCTRIM ( 0 ) ;
return result ;
2010-11-13 23:59:23 +00:00
}
2013-07-05 02:28:31 +00:00
static int rename_object ( const char * from , const char * to )
{
2020-08-22 12:40:53 +00:00
int result ;
2020-09-11 09:37:24 +00:00
std : : string s3_realpath ;
2020-08-22 12:40:53 +00:00
headers_t meta ;
2010-12-17 04:40:15 +00:00
2020-08-22 12:40:53 +00:00
S3FS_PRN_INFO1 ( " [from=%s][to=%s] " , from , to ) ;
2011-03-01 19:35:55 +00:00
2020-08-22 12:40:53 +00:00
if ( 0 ! = ( result = check_parent_object_access ( to , W_OK | X_OK ) ) ) {
// not permit writing "to" object parent dir.
return result ;
}
if ( 0 ! = ( result = check_parent_object_access ( from , W_OK | X_OK ) ) ) {
// not permit removing "from" object parent dir.
return result ;
}
if ( 0 ! = ( result = get_object_attribute ( from , NULL , & meta ) ) ) {
return result ;
}
s3_realpath = get_realpath ( from ) ;
2010-12-19 22:27:56 +00:00
2020-08-22 12:40:53 +00:00
meta [ " x-amz-copy-source " ] = urlEncode ( service_path + bucket + s3_realpath ) ;
2020-09-11 09:37:24 +00:00
meta [ " Content-Type " ] = S3fsCurl : : LookupMimeType ( std : : string ( to ) ) ;
2020-08-22 12:40:53 +00:00
meta [ " x-amz-metadata-directive " ] = " REPLACE " ;
2010-11-13 23:59:23 +00:00
2020-08-22 12:40:53 +00:00
if ( 0 ! = ( result = put_headers ( to , meta , true ) ) ) {
return result ;
}
2015-01-12 22:46:24 +00:00
2020-08-22 12:40:53 +00:00
FdManager : : get ( ) - > Rename ( from , to ) ;
2015-01-12 22:46:24 +00:00
2020-08-22 12:40:53 +00:00
// Remove file
result = s3fs_unlink ( from ) ;
2020-06-25 11:53:53 +00:00
2020-08-22 12:40:53 +00:00
StatCache : : getStatCacheData ( ) - > DelStat ( to ) ;
FdManager : : DeleteCacheFile ( to ) ;
2010-12-20 05:26:27 +00:00
2020-08-22 12:40:53 +00:00
return result ;
2010-12-20 05:26:27 +00:00
}
2013-07-05 02:28:31 +00:00
static int rename_object_nocopy ( const char * from , const char * to )
{
2020-08-22 12:40:53 +00:00
int result ;
2013-01-19 16:05:07 +00:00
2020-08-22 12:40:53 +00:00
S3FS_PRN_INFO1 ( " [from=%s][to=%s] " , from , to ) ;
2013-01-19 16:05:07 +00:00
2020-08-22 12:40:53 +00:00
if ( 0 ! = ( result = check_parent_object_access ( to , W_OK | X_OK ) ) ) {
// not permit writing "to" object parent dir.
return result ;
}
if ( 0 ! = ( result = check_parent_object_access ( from , W_OK | X_OK ) ) ) {
// not permit removing "from" object parent dir.
return result ;
}
// open & load
2020-09-13 07:49:25 +00:00
{ // scope for AutoFdEntity
AutoFdEntity autoent ;
FdEntity * ent ;
if ( NULL = = ( ent = get_local_fent ( autoent , from , true ) ) ) {
S3FS_PRN_ERR ( " could not open and read file(%s) " , from ) ;
return - EIO ;
}
2020-05-30 06:45:43 +00:00
2020-09-13 07:49:25 +00:00
// Set header
if ( ! ent - > SetContentType ( to ) ) {
S3FS_PRN_ERR ( " could not set content-type for %s " , to ) ;
return - EIO ;
}
2020-08-22 12:40:53 +00:00
2020-09-13 07:49:25 +00:00
// upload
if ( 0 ! = ( result = ent - > RowFlush ( to , true ) ) ) {
S3FS_PRN_ERR ( " could not upload file(%s): result=%d " , to , result ) ;
return result ;
}
FdManager : : get ( ) - > Rename ( from , to ) ;
2020-08-22 12:40:53 +00:00
}
// Remove file
result = s3fs_unlink ( from ) ;
2013-01-19 16:05:07 +00:00
2020-08-22 12:40:53 +00:00
// Stats
StatCache : : getStatCacheData ( ) - > DelStat ( to ) ;
FdManager : : DeleteCacheFile ( to ) ;
2013-01-19 16:05:07 +00:00
2020-08-22 12:40:53 +00:00
return result ;
2013-01-19 16:05:07 +00:00
}
2013-07-05 02:28:31 +00:00
static int rename_large_object ( const char * from , const char * to )
{
2020-08-22 12:40:53 +00:00
int result ;
struct stat buf ;
headers_t meta ;
2011-08-29 22:01:32 +00:00
2020-08-22 12:40:53 +00:00
S3FS_PRN_INFO1 ( " [from=%s][to=%s] " , from , to ) ;
2011-08-29 22:01:32 +00:00
2020-08-22 12:40:53 +00:00
if ( 0 ! = ( result = check_parent_object_access ( to , W_OK | X_OK ) ) ) {
// not permit writing "to" object parent dir.
return result ;
}
if ( 0 ! = ( result = check_parent_object_access ( from , W_OK | X_OK ) ) ) {
// not permit removing "from" object parent dir.
return result ;
}
if ( 0 ! = ( result = get_object_attribute ( from , & buf , & meta , false ) ) ) {
return result ;
}
2011-08-29 22:01:32 +00:00
2020-08-22 12:40:53 +00:00
S3fsCurl s3fscurl ( true ) ;
if ( 0 ! = ( result = s3fscurl . MultipartRenameRequest ( from , to , meta , buf . st_size ) ) ) {
return result ;
}
s3fscurl . DestroyCurlHandle ( ) ;
2020-06-25 11:53:53 +00:00
2020-08-22 12:40:53 +00:00
// Remove file
result = s3fs_unlink ( from ) ;
2020-06-25 11:53:53 +00:00
2020-08-22 12:40:53 +00:00
StatCache : : getStatCacheData ( ) - > DelStat ( to ) ;
FdManager : : DeleteCacheFile ( to ) ;
2011-08-29 22:01:32 +00:00
2020-08-22 12:40:53 +00:00
return result ;
2011-08-29 22:01:32 +00:00
}
2013-07-05 02:28:31 +00:00
static int clone_directory_object ( const char * from , const char * to )
2013-01-19 16:05:07 +00:00
{
2020-08-22 12:40:53 +00:00
int result = - 1 ;
struct stat stbuf ;
2010-12-21 15:24:46 +00:00
2020-08-22 12:40:53 +00:00
S3FS_PRN_INFO1 ( " [from=%s][to=%s] " , from , to ) ;
2011-06-26 00:37:52 +00:00
2020-08-22 12:40:53 +00:00
// get target's attributes
if ( 0 ! = ( result = get_object_attribute ( from , & stbuf ) ) ) {
return result ;
}
result = create_directory_object ( to , stbuf . st_mode , stbuf . st_mtime , stbuf . st_uid , stbuf . st_gid ) ;
StatCache : : getStatCacheData ( ) - > DelStat ( to ) ;
2010-12-21 15:24:46 +00:00
2020-08-22 12:40:53 +00:00
return result ;
2010-12-21 15:24:46 +00:00
}
2013-07-05 02:28:31 +00:00
static int rename_directory ( const char * from , const char * to )
2013-04-29 14:31:10 +00:00
{
2020-08-22 12:40:53 +00:00
S3ObjList head ;
s3obj_list_t headlist ;
2020-09-11 09:37:24 +00:00
std : : string strfrom = from ? from : " " ; // from is without "/".
std : : string strto = to ? to : " " ; // to is without "/" too.
std : : string basepath = strfrom + " / " ;
std : : string newpath ; // should be from name(not used)
std : : string nowcache ; // now cache path(not used)
2020-08-22 12:40:53 +00:00
dirtype DirType ;
bool normdir ;
MVNODE * mn_head = NULL ;
MVNODE * mn_tail = NULL ;
MVNODE * mn_cur ;
struct stat stbuf ;
int result ;
bool is_dir ;
S3FS_PRN_INFO1 ( " [from=%s][to=%s] " , from , to ) ;
//
// Initiate and Add base directory into MVNODE struct.
//
strto + = " / " ;
if ( 0 = = chk_dir_object_type ( from , newpath , strfrom , nowcache , NULL , & DirType ) & & DIRTYPE_UNKNOWN ! = DirType ) {
if ( DIRTYPE_NOOBJ ! = DirType ) {
normdir = false ;
} else {
normdir = true ;
strfrom = from ; // from directory is not removed, but from directory attr is needed.
}
if ( NULL = = ( add_mvnode ( & mn_head , & mn_tail , strfrom . c_str ( ) , strto . c_str ( ) , true , normdir ) ) ) {
return - ENOMEM ;
}
2013-04-29 14:31:10 +00:00
} else {
2020-08-22 12:40:53 +00:00
// Something wrong about "from" directory.
2010-12-20 05:26:27 +00:00
}
2020-08-22 12:40:53 +00:00
//
// get a list of all the objects
//
// No delimiter is specified, the result(head) is all object keys.
// (CommonPrefixes is empty, but all object is listed in Key.)
if ( 0 ! = ( result = list_bucket ( basepath . c_str ( ) , head , NULL ) ) ) {
S3FS_PRN_ERR ( " list_bucket returns error. " ) ;
return result ;
}
head . GetNameList ( headlist ) ; // get name without "/".
S3ObjList : : MakeHierarchizedList ( headlist , false ) ; // add hierarchized dir.
s3obj_list_t : : const_iterator liter ;
for ( liter = headlist . begin ( ) ; headlist . end ( ) ! = liter ; + + liter ) {
// make "from" and "to" object name.
2020-09-11 09:37:24 +00:00
std : : string from_name = basepath + ( * liter ) ;
std : : string to_name = strto + ( * liter ) ;
std : : string etag = head . GetETag ( ( * liter ) . c_str ( ) ) ;
2020-08-22 12:40:53 +00:00
// Check subdirectory.
StatCache : : getStatCacheData ( ) - > HasStat ( from_name , etag . c_str ( ) ) ; // Check ETag
if ( 0 ! = get_object_attribute ( from_name . c_str ( ) , & stbuf , NULL ) ) {
S3FS_PRN_WARN ( " failed to get %s object attribute. " , from_name . c_str ( ) ) ;
continue ;
}
if ( S_ISDIR ( stbuf . st_mode ) ) {
is_dir = true ;
if ( 0 ! = chk_dir_object_type ( from_name . c_str ( ) , newpath , from_name , nowcache , NULL , & DirType ) | | DIRTYPE_UNKNOWN = = DirType ) {
S3FS_PRN_WARN ( " failed to get %s%s object directory type. " , basepath . c_str ( ) , ( * liter ) . c_str ( ) ) ;
continue ;
}
if ( DIRTYPE_NOOBJ ! = DirType ) {
normdir = false ;
} else {
normdir = true ;
from_name = basepath + ( * liter ) ; // from directory is not removed, but from directory attr is needed.
}
} else {
is_dir = false ;
normdir = false ;
}
// push this one onto the stack
if ( NULL = = add_mvnode ( & mn_head , & mn_tail , from_name . c_str ( ) , to_name . c_str ( ) , is_dir , normdir ) ) {
return - ENOMEM ;
}
2013-04-20 19:17:28 +00:00
}
2020-08-22 12:40:53 +00:00
//
// rename
//
// rename directory objects.
for ( mn_cur = mn_head ; mn_cur ; mn_cur = mn_cur - > next ) {
if ( mn_cur - > is_dir & & mn_cur - > old_path & & ' \0 ' ! = mn_cur - > old_path [ 0 ] ) {
if ( 0 ! = ( result = clone_directory_object ( mn_cur - > old_path , mn_cur - > new_path ) ) ) {
S3FS_PRN_ERR ( " clone_directory_object returned an error(%d) " , result ) ;
free_mvnodes ( mn_head ) ;
return - EIO ;
}
}
2010-12-20 05:26:27 +00:00
}
2020-08-22 12:40:53 +00:00
// iterate over the list - copy the files with rename_object
// does a safe copy - copies first and then deletes old
for ( mn_cur = mn_head ; mn_cur ; mn_cur = mn_cur - > next ) {
if ( ! mn_cur - > is_dir ) {
// TODO: call s3fs_rename instead?
if ( ! nocopyapi & & ! norenameapi ) {
result = rename_object ( mn_cur - > old_path , mn_cur - > new_path ) ;
} else {
result = rename_object_nocopy ( mn_cur - > old_path , mn_cur - > new_path ) ;
}
if ( 0 ! = result ) {
S3FS_PRN_ERR ( " rename_object returned an error(%d) " , result ) ;
free_mvnodes ( mn_head ) ;
return - EIO ;
}
}
2010-12-20 05:26:27 +00:00
}
2020-08-22 12:40:53 +00:00
// Iterate over old the directories, bottoms up and remove
for ( mn_cur = mn_tail ; mn_cur ; mn_cur = mn_cur - > prev ) {
if ( mn_cur - > is_dir & & mn_cur - > old_path & & ' \0 ' ! = mn_cur - > old_path [ 0 ] ) {
if ( ! ( mn_cur - > is_normdir ) ) {
if ( 0 ! = ( result = s3fs_rmdir ( mn_cur - > old_path ) ) ) {
S3FS_PRN_ERR ( " s3fs_rmdir returned an error(%d) " , result ) ;
free_mvnodes ( mn_head ) ;
return - EIO ;
}
} else {
// cache clear.
StatCache : : getStatCacheData ( ) - > DelStat ( mn_cur - > old_path ) ;
}
2013-04-29 14:31:10 +00:00
}
2010-12-20 05:26:27 +00:00
}
2020-08-22 12:40:53 +00:00
free_mvnodes ( mn_head ) ;
2010-12-20 05:26:27 +00:00
2020-08-22 12:40:53 +00:00
return 0 ;
2010-12-20 05:26:27 +00:00
}
2019-02-18 13:36:48 +00:00
static int s3fs_rename ( const char * _from , const char * _to )
2013-07-05 02:28:31 +00:00
{
2020-08-22 12:40:53 +00:00
WTF8_ENCODE ( from )
WTF8_ENCODE ( to )
struct stat buf ;
int result ;
2011-03-01 19:35:55 +00:00
2020-08-22 12:40:53 +00:00
S3FS_PRN_INFO ( " [from=%s][to=%s] " , from , to ) ;
2010-12-20 05:26:27 +00:00
2020-08-22 12:40:53 +00:00
if ( 0 ! = ( result = check_parent_object_access ( to , W_OK | X_OK ) ) ) {
// not permit writing "to" object parent dir.
return result ;
}
if ( 0 ! = ( result = check_parent_object_access ( from , W_OK | X_OK ) ) ) {
// not permit removing "from" object parent dir.
return result ;
}
if ( 0 ! = ( result = get_object_attribute ( from , & buf , NULL ) ) ) {
return result ;
}
// flush pending writes if file is open
2020-09-13 07:49:25 +00:00
{ // scope for AutoFdEntity
AutoFdEntity autoent ;
FdEntity * ent ;
if ( NULL ! = ( ent = autoent . ExistOpen ( from ) ) ) {
if ( 0 ! = ( result = ent - > Flush ( true ) ) ) {
S3FS_PRN_ERR ( " could not upload file(%s): result=%d " , to , result ) ;
return result ;
}
StatCache : : getStatCacheData ( ) - > DelStat ( from ) ;
2020-08-22 12:40:53 +00:00
}
}
// files larger than 5GB must be modified via the multipart interface
if ( S_ISDIR ( buf . st_mode ) ) {
result = rename_directory ( from , to ) ;
} else if ( ! nomultipart & & buf . st_size > = singlepart_copy_limit ) {
result = rename_large_object ( from , to ) ;
2013-01-19 16:05:07 +00:00
} else {
2020-08-22 12:40:53 +00:00
if ( ! nocopyapi & & ! norenameapi ) {
result = rename_object ( from , to ) ;
} else {
result = rename_object_nocopy ( from , to ) ;
}
2013-01-19 16:05:07 +00:00
}
2020-08-22 12:40:53 +00:00
S3FS_MALLOCTRIM ( 0 ) ;
2013-09-14 21:50:39 +00:00
2020-08-22 12:40:53 +00:00
return result ;
2010-11-13 23:59:23 +00:00
}
2019-02-18 13:36:48 +00:00
static int s3fs_link ( const char * _from , const char * _to )
2013-03-30 13:37:14 +00:00
{
2020-08-22 12:40:53 +00:00
WTF8_ENCODE ( from )
WTF8_ENCODE ( to )
S3FS_PRN_INFO ( " [from=%s][to=%s] " , from , to ) ;
return - ENOTSUP ;
2010-11-13 23:59:23 +00:00
}
2019-02-18 13:36:48 +00:00
static int s3fs_chmod ( const char * _path , mode_t mode )
2013-04-20 19:17:28 +00:00
{
2020-08-22 12:40:53 +00:00
WTF8_ENCODE ( path )
int result ;
2020-09-11 09:37:24 +00:00
std : : string strpath ;
std : : string newpath ;
std : : string nowcache ;
2020-08-22 12:40:53 +00:00
headers_t meta ;
struct stat stbuf ;
dirtype nDirType = DIRTYPE_UNKNOWN ;
2010-12-19 22:27:56 +00:00
2020-08-22 12:40:53 +00:00
S3FS_PRN_INFO ( " [path=%s][mode=%04o] " , path , mode ) ;
2011-02-11 03:30:02 +00:00
2020-08-22 12:40:53 +00:00
if ( 0 = = strcmp ( path , " / " ) ) {
S3FS_PRN_ERR ( " Could not change mode for mount point. " ) ;
return - EIO ;
}
if ( 0 ! = ( result = check_parent_object_access ( path , X_OK ) ) ) {
return result ;
}
if ( 0 ! = ( result = check_object_owner ( path , & stbuf ) ) ) {
return result ;
Summary of Changes(1.63 -> 1.64)
* This new version was made for fixing big issue about directory object.
Please be careful and review new s3fs.
==========================
List of Changes
==========================
1) Fixed bugs
Fixed some memory leak and un-freed curl handle.
Fixed codes with a bug which is not found yet.
Fixed a bug that the s3fs could not update object's mtime when the s3fs had a opened file descriptor.
Please let us know a bug, when you find new bug of a memory leak.
2) Changed codes
Changed codes of s3fs_readdir() and list_bucket() etc.
Changed codes so that the get_realpath() function returned std::string.
Changed codes about exit() function. Because the exit() function is called from many fuse callback function directly, these function called fuse_exit() function and retuned with error.
Changed codes so that the case of the characters for the "x-amz-meta" response header is ignored.
3) Added a option
Added the norenameapi option for the storage compatible with S3 without copy API.
This option is subset of nocopyapi option.
Please read man page or call with --help option.
4) Object for directory
This is very big and important change.
The object of directory is changed "dir/" instead of "dir" for being compatible with other S3 client applications.
And this version understands the object of directory which is made by old version.
If the new s3fs changes the attributes or owner/group or mtime of the directory object, the s3fs automatically changes the object from old object name("dir") to new("dir/").
If you need to change old object name("dir") to new("dir/") manually, you can use shell script(mergedir.sh) in test directory.
* About the directory object name
AWS S3 allows the object name as both "dir" and "dir/".
The s3fs before this version understood only "dir" as directory object name, but old version did not understand the "dir/" object name.
The new version understands both of "dir" and "dir/" object name.
The s3fs user needs to be care for the special situation that I mentioned later.
The new version deletes old "dir" object and makes new "dir/" object, when the user operates the directory object for changing the permission or owner/group or mtime.
This operation does on background and automatically.
If you need to merge manually, you can use shell script which is mergedir.sh in test directory.
This script runs chmod/chown/touch commands after finding a directory.
Other S3 client application makes a directory object("dir/") without meta information which is needed to understand by the s3fs, this script can add meta information for a directory object.
If this script function is insufficient for you, you can read and modify the codes by yourself.
Please use the shell script carefully because of changing the object.
If you find a bug in this script, please let me know.
* Details
** The directory object made by old version
The directory object made by old version is not understood by other S3 client application.
New s3fs version was updated for keeping compatibility with other clients.
You can use the mergedir.sh in test directory for merging from old directory object("dir") to new("dir/").
The directory object name is changed from "dir" to "dir/" after the mergedir.sh is run, this changed "dir/" object is understood by other S3 clients.
This script runs chmod/chown/chgrp/touch/etc commands against the old directory object("dir"), then new s3fs merges that directory automatically.
If you need to change directory object from old to new manually, you can do it by running these commands which change the directory attributes(mode/owner/group/mtime).
** The directory object made by new version
The directory object name made by new version is "dir/".
Because the name includes "/", other S3 client applications understand it as the directory.
I tested new directory by s3cmd/tntDrive/DragonDisk/Gladinet as other S3 clients, the result was good compatibility.
You need to know that the compatibility has small problem by the difference in specifications between clients.
And you need to be careful about that the old s3fs can not understand the directory object which made by new s3fs.
You should change all s3fs which accesses same bucket.
** The directory object made by other S3 client application
Because the object is determined as a directory by the s3fs, the s3fs makes and uses special meta information which is "x-amz-meta-***" and "Content-Type" as HTTP header.
The s3fs sets and uses HTTP headers for the directory object, those headers are listed below.
Content-Type: application/x-directory
x-amz-meta-mode: <mode>
x-amz-meta-uid: <UID>
x-amz-meta-gid <GID>
x-amz-meta-mtime: <unix time of modified file>
Other S3 client application builds the directory object without attributes which is needed by the s3fs.
When the "ls" command is run on the s3fs-fuse file system which has directories/files made by other S3 clients, this result is shown below.
d--------- 1 root root 0 Feb 27 11:21 dir
---------- 1 root root 1024 Mar 14 02:15 file
Because the objects don't have meta information("x-amz-meta-mode"), it means mode=0000.
In this case, the directory object is shown only "d", because the s3fs determines the object as a directory when the object is the name with "/" or has "Content-type: application/x-directory" header.
(The s3fs sets "Content-Type: application/x-directory" to the directory object, but other S3 clients set "binary/octet-stream".)
In that result, nobody without root is allowed to operate the object.
The owner and group are "root"(UID=0) because the object doesn't have "x-amz-meta-uid/gid".
If the object doesn't have "x-amz-meta-mtime", the s3fs uses "Last-Modified" HTTP header.
Therefore the object's mtime is "Last-Modified" value.(This logic is same as old version)
It has been already explained, if you need to change the object attributes, you can do it by manually operation or mergedir.sh.
* Example of the compatibility with s3cmd etc
** Case A) Only "dir/file" object
One of case, there is only "dir/file" object without "dir/" object, that object is made by s3cmd or etc.
In this case, the response of REST API(list bucket) with "delimiter=/" parameter has "CommonPrefixes", and the "dir/" is listed in "CommonPrefixes/Prefix", but the "dir/" object is not real object.
The s3fs needs to determine this object as directory, however there is no real directory object("dir" or "dir/").
But both new s3fs and old one does NOT understand this "dir/" in "CommonPrefixes", because the s3fs fails to get meta information from "dir" or "dir/".
On this case, the result of "ls" command is shown below.
??????????? ? ? ? ? ? dir
This "dir" is not operated by anyone and any process, because the s3fs does not understand this object permission.
And "dir/file" object can not be shown and operated too.
Some other S3 clients(tntDrive/Gladinet/etc) can not understand this object as same as the s3fs.
If you need to operate "dir/file" object, you need to make the "dir/" object as a directory.
To make the "dir/" directory object, you need to do below.
Because there is already the "dir" object which is not real object, you can not make "dir/" directory.
(s3cmd does not make "dir/" object because the object name has "/".).
You should make another name directory(ex: "dir2/"), and move the "dir/file" objects to in new directory.
Last, you can rename the directory name from "dir2/" to "dir/".
** Case B) Both "dir" and "dir/file" object
This case is that there are "dir" and "dir/file" objects which were made by s3cmd/etc.
s3cmd and s3fs understand the "dir" object as normal(file) object because this object does not have meta information and a name with "/".
But the result of REST API(list bucket) has "dir/" name in "CommonPrefixes/Prefix".
The s3fs checks "dir/" and "dir" as a directory, but the "dir" object is not directory object.
(Because the new s3fs need to compatible old version, the s3fs checks a directory object in order of "dir/", "dir")
In this case, the result of "ls" command is shown below.
---------- 1 root root 0 Feb 27 02:48 dir
As a result, the "dir/file" can not be shown and operated because the "dir" object is a file.
If you determine the "dir" as a directory, you need to add mete information to the "dir" object by s3cmd.
** Case C) Both "dir" and "dir/" object
Last case is that there are "dir" and "dir/" objects which were made by other S3 clients.
(example: At first you upload a object "dir/" as a directory by new 3sfs, and you upload a object "dir" by s3cmd.)
New s3fs determines "dir/" as a directory, because the s3fs searches in oder of "dir/", "dir".
As a result, the "dir" object can not be shown and operated.
** Compatibility between S3 clients
Both new and old s3fs do not understand both "dir" and "dir/" at the same time, tntDrive and Galdinet are same as the s3fs.
If there are "dir/" and "dir" objects, the s3fs gives priority to "dir/".
But s3cmd and DragonDisk understand both objects.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@392 df820570-a93a-0410-bd06-b72b767a4274
2013-03-23 14:04:07 +00:00
}
2011-08-29 22:01:32 +00:00
2020-08-22 12:40:53 +00:00
if ( S_ISDIR ( stbuf . st_mode ) ) {
result = chk_dir_object_type ( path , newpath , strpath , nowcache , & meta , & nDirType ) ;
} else {
strpath = path ;
nowcache = strpath ;
result = get_object_attribute ( strpath . c_str ( ) , NULL , & meta ) ;
}
if ( 0 ! = result ) {
return result ;
Summary of Changes(1.63 -> 1.64)
* This new version was made for fixing big issue about directory object.
Please be careful and review new s3fs.
==========================
List of Changes
==========================
1) Fixed bugs
Fixed some memory leak and un-freed curl handle.
Fixed codes with a bug which is not found yet.
Fixed a bug that the s3fs could not update object's mtime when the s3fs had a opened file descriptor.
Please let us know a bug, when you find new bug of a memory leak.
2) Changed codes
Changed codes of s3fs_readdir() and list_bucket() etc.
Changed codes so that the get_realpath() function returned std::string.
Changed codes about exit() function. Because the exit() function is called from many fuse callback function directly, these function called fuse_exit() function and retuned with error.
Changed codes so that the case of the characters for the "x-amz-meta" response header is ignored.
3) Added a option
Added the norenameapi option for the storage compatible with S3 without copy API.
This option is subset of nocopyapi option.
Please read man page or call with --help option.
4) Object for directory
This is very big and important change.
The object of directory is changed "dir/" instead of "dir" for being compatible with other S3 client applications.
And this version understands the object of directory which is made by old version.
If the new s3fs changes the attributes or owner/group or mtime of the directory object, the s3fs automatically changes the object from old object name("dir") to new("dir/").
If you need to change old object name("dir") to new("dir/") manually, you can use shell script(mergedir.sh) in test directory.
* About the directory object name
AWS S3 allows the object name as both "dir" and "dir/".
The s3fs before this version understood only "dir" as directory object name, but old version did not understand the "dir/" object name.
The new version understands both of "dir" and "dir/" object name.
The s3fs user needs to be care for the special situation that I mentioned later.
The new version deletes old "dir" object and makes new "dir/" object, when the user operates the directory object for changing the permission or owner/group or mtime.
This operation does on background and automatically.
If you need to merge manually, you can use shell script which is mergedir.sh in test directory.
This script runs chmod/chown/touch commands after finding a directory.
Other S3 client application makes a directory object("dir/") without meta information which is needed to understand by the s3fs, this script can add meta information for a directory object.
If this script function is insufficient for you, you can read and modify the codes by yourself.
Please use the shell script carefully because of changing the object.
If you find a bug in this script, please let me know.
* Details
** The directory object made by old version
The directory object made by old version is not understood by other S3 client application.
New s3fs version was updated for keeping compatibility with other clients.
You can use the mergedir.sh in test directory for merging from old directory object("dir") to new("dir/").
The directory object name is changed from "dir" to "dir/" after the mergedir.sh is run, this changed "dir/" object is understood by other S3 clients.
This script runs chmod/chown/chgrp/touch/etc commands against the old directory object("dir"), then new s3fs merges that directory automatically.
If you need to change directory object from old to new manually, you can do it by running these commands which change the directory attributes(mode/owner/group/mtime).
** The directory object made by new version
The directory object name made by new version is "dir/".
Because the name includes "/", other S3 client applications understand it as the directory.
I tested new directory by s3cmd/tntDrive/DragonDisk/Gladinet as other S3 clients, the result was good compatibility.
You need to know that the compatibility has small problem by the difference in specifications between clients.
And you need to be careful about that the old s3fs can not understand the directory object which made by new s3fs.
You should change all s3fs which accesses same bucket.
** The directory object made by other S3 client application
Because the object is determined as a directory by the s3fs, the s3fs makes and uses special meta information which is "x-amz-meta-***" and "Content-Type" as HTTP header.
The s3fs sets and uses HTTP headers for the directory object, those headers are listed below.
Content-Type: application/x-directory
x-amz-meta-mode: <mode>
x-amz-meta-uid: <UID>
x-amz-meta-gid <GID>
x-amz-meta-mtime: <unix time of modified file>
Other S3 client application builds the directory object without attributes which is needed by the s3fs.
When the "ls" command is run on the s3fs-fuse file system which has directories/files made by other S3 clients, this result is shown below.
d--------- 1 root root 0 Feb 27 11:21 dir
---------- 1 root root 1024 Mar 14 02:15 file
Because the objects don't have meta information("x-amz-meta-mode"), it means mode=0000.
In this case, the directory object is shown only "d", because the s3fs determines the object as a directory when the object is the name with "/" or has "Content-type: application/x-directory" header.
(The s3fs sets "Content-Type: application/x-directory" to the directory object, but other S3 clients set "binary/octet-stream".)
In that result, nobody without root is allowed to operate the object.
The owner and group are "root"(UID=0) because the object doesn't have "x-amz-meta-uid/gid".
If the object doesn't have "x-amz-meta-mtime", the s3fs uses "Last-Modified" HTTP header.
Therefore the object's mtime is "Last-Modified" value.(This logic is same as old version)
It has been already explained, if you need to change the object attributes, you can do it by manually operation or mergedir.sh.
* Example of the compatibility with s3cmd etc
** Case A) Only "dir/file" object
One of case, there is only "dir/file" object without "dir/" object, that object is made by s3cmd or etc.
In this case, the response of REST API(list bucket) with "delimiter=/" parameter has "CommonPrefixes", and the "dir/" is listed in "CommonPrefixes/Prefix", but the "dir/" object is not real object.
The s3fs needs to determine this object as directory, however there is no real directory object("dir" or "dir/").
But both new s3fs and old one does NOT understand this "dir/" in "CommonPrefixes", because the s3fs fails to get meta information from "dir" or "dir/".
On this case, the result of "ls" command is shown below.
??????????? ? ? ? ? ? dir
This "dir" is not operated by anyone and any process, because the s3fs does not understand this object permission.
And "dir/file" object can not be shown and operated too.
Some other S3 clients(tntDrive/Gladinet/etc) can not understand this object as same as the s3fs.
If you need to operate "dir/file" object, you need to make the "dir/" object as a directory.
To make the "dir/" directory object, you need to do below.
Because there is already the "dir" object which is not real object, you can not make "dir/" directory.
(s3cmd does not make "dir/" object because the object name has "/".).
You should make another name directory(ex: "dir2/"), and move the "dir/file" objects to in new directory.
Last, you can rename the directory name from "dir2/" to "dir/".
** Case B) Both "dir" and "dir/file" object
This case is that there are "dir" and "dir/file" objects which were made by s3cmd/etc.
s3cmd and s3fs understand the "dir" object as normal(file) object because this object does not have meta information and a name with "/".
But the result of REST API(list bucket) has "dir/" name in "CommonPrefixes/Prefix".
The s3fs checks "dir/" and "dir" as a directory, but the "dir" object is not directory object.
(Because the new s3fs need to compatible old version, the s3fs checks a directory object in order of "dir/", "dir")
In this case, the result of "ls" command is shown below.
---------- 1 root root 0 Feb 27 02:48 dir
As a result, the "dir/file" can not be shown and operated because the "dir" object is a file.
If you determine the "dir" as a directory, you need to add mete information to the "dir" object by s3cmd.
** Case C) Both "dir" and "dir/" object
Last case is that there are "dir" and "dir/" objects which were made by other S3 clients.
(example: At first you upload a object "dir/" as a directory by new 3sfs, and you upload a object "dir" by s3cmd.)
New s3fs determines "dir/" as a directory, because the s3fs searches in oder of "dir/", "dir".
As a result, the "dir" object can not be shown and operated.
** Compatibility between S3 clients
Both new and old s3fs do not understand both "dir" and "dir/" at the same time, tntDrive and Galdinet are same as the s3fs.
If there are "dir/" and "dir" objects, the s3fs gives priority to "dir/".
But s3cmd and DragonDisk understand both objects.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@392 df820570-a93a-0410-bd06-b72b767a4274
2013-03-23 14:04:07 +00:00
}
2016-09-11 13:09:23 +00:00
2020-08-22 12:40:53 +00:00
if ( S_ISDIR ( stbuf . st_mode ) & & IS_REPLACEDIR ( nDirType ) ) {
// Should rebuild directory object(except new type)
// Need to remove old dir("dir" etc) and make new dir("dir/")
// At first, remove directory old object
if ( 0 ! = ( result = remove_old_type_dir ( strpath , nDirType ) ) ) {
return result ;
2020-08-02 13:37:06 +00:00
}
StatCache : : getStatCacheData ( ) - > DelStat ( nowcache ) ;
2020-08-22 12:40:53 +00:00
// Make new directory object("dir/")
if ( 0 ! = ( result = create_directory_object ( newpath . c_str ( ) , mode , stbuf . st_mtime , stbuf . st_uid , stbuf . st_gid ) ) ) {
return result ;
}
2020-08-02 13:37:06 +00:00
} else {
2020-08-22 12:40:53 +00:00
// normal object or directory object of newer version
headers_t updatemeta ;
updatemeta [ " x-amz-meta-ctime " ] = str ( time ( NULL ) ) ;
updatemeta [ " x-amz-meta-mode " ] = str ( mode ) ;
updatemeta [ " x-amz-copy-source " ] = urlEncode ( service_path + bucket + get_realpath ( strpath . c_str ( ) ) ) ;
updatemeta [ " x-amz-metadata-directive " ] = " REPLACE " ;
// check opened file handle.
//
// If the file starts uploading by multipart when the disk capacity is insufficient,
// we need to put these header after finishing upload.
// Or if the file is only open, we must update to FdEntity's internal meta.
//
2020-09-13 07:49:25 +00:00
AutoFdEntity autoent ;
FdEntity * ent ;
if ( NULL ! = ( ent = autoent . ExistOpen ( path , - 1 , true ) ) ) {
2020-08-22 12:40:53 +00:00
// the file is opened now.
if ( ent - > MergeOrgMeta ( updatemeta ) ) {
// now uploading
// the meta is pending and accumulated to be put after the upload is complete.
S3FS_PRN_INFO ( " meta pending until upload is complete " ) ;
} else {
// allow to put header
// updatemeta already merged the orgmeta of the opened files.
if ( 0 ! = put_headers ( strpath . c_str ( ) , updatemeta , true ) ) {
return - EIO ;
}
StatCache : : getStatCacheData ( ) - > DelStat ( nowcache ) ;
}
} else {
// not opened file, then put headers
merge_headers ( meta , updatemeta , true ) ;
if ( 0 ! = put_headers ( strpath . c_str ( ) , meta , true ) ) {
return - EIO ;
}
StatCache : : getStatCacheData ( ) - > DelStat ( nowcache ) ;
}
2016-09-11 13:09:23 +00:00
}
2020-08-22 12:40:53 +00:00
S3FS_MALLOCTRIM ( 0 ) ;
2011-02-11 03:30:02 +00:00
2020-08-22 12:40:53 +00:00
return 0 ;
2010-11-13 23:59:23 +00:00
}
2019-02-18 13:36:48 +00:00
static int s3fs_chmod_nocopy ( const char * _path , mode_t mode )
2013-07-05 02:28:31 +00:00
{
2020-08-22 12:40:53 +00:00
WTF8_ENCODE ( path )
int result ;
2020-09-11 09:37:24 +00:00
std : : string strpath ;
std : : string newpath ;
std : : string nowcache ;
2020-08-22 12:40:53 +00:00
struct stat stbuf ;
dirtype nDirType = DIRTYPE_UNKNOWN ;
S3FS_PRN_INFO1 ( " [path=%s][mode=%04o] " , path , mode ) ;
if ( 0 = = strcmp ( path , " / " ) ) {
S3FS_PRN_ERR ( " Could not change mode for mount point. " ) ;
return - EIO ;
2013-01-19 16:05:07 +00:00
}
2020-08-22 12:40:53 +00:00
if ( 0 ! = ( result = check_parent_object_access ( path , X_OK ) ) ) {
return result ;
}
if ( 0 ! = ( result = check_object_owner ( path , & stbuf ) ) ) {
return result ;
2013-04-29 14:31:10 +00:00
}
2013-01-19 16:05:07 +00:00
2020-08-22 12:40:53 +00:00
// Get attributes
if ( S_ISDIR ( stbuf . st_mode ) ) {
result = chk_dir_object_type ( path , newpath , strpath , nowcache , NULL , & nDirType ) ;
} else {
strpath = path ;
nowcache = strpath ;
result = get_object_attribute ( strpath . c_str ( ) , NULL , NULL ) ;
}
if ( 0 ! = result ) {
return result ;
Summary of Changes(1.63 -> 1.64)
* This new version was made for fixing big issue about directory object.
Please be careful and review new s3fs.
==========================
List of Changes
==========================
1) Fixed bugs
Fixed some memory leak and un-freed curl handle.
Fixed codes with a bug which is not found yet.
Fixed a bug that the s3fs could not update object's mtime when the s3fs had a opened file descriptor.
Please let us know a bug, when you find new bug of a memory leak.
2) Changed codes
Changed codes of s3fs_readdir() and list_bucket() etc.
Changed codes so that the get_realpath() function returned std::string.
Changed codes about exit() function. Because the exit() function is called from many fuse callback function directly, these function called fuse_exit() function and retuned with error.
Changed codes so that the case of the characters for the "x-amz-meta" response header is ignored.
3) Added a option
Added the norenameapi option for the storage compatible with S3 without copy API.
This option is subset of nocopyapi option.
Please read man page or call with --help option.
4) Object for directory
This is very big and important change.
The object of directory is changed "dir/" instead of "dir" for being compatible with other S3 client applications.
And this version understands the object of directory which is made by old version.
If the new s3fs changes the attributes or owner/group or mtime of the directory object, the s3fs automatically changes the object from old object name("dir") to new("dir/").
If you need to change old object name("dir") to new("dir/") manually, you can use shell script(mergedir.sh) in test directory.
* About the directory object name
AWS S3 allows the object name as both "dir" and "dir/".
The s3fs before this version understood only "dir" as directory object name, but old version did not understand the "dir/" object name.
The new version understands both of "dir" and "dir/" object name.
The s3fs user needs to be care for the special situation that I mentioned later.
The new version deletes old "dir" object and makes new "dir/" object, when the user operates the directory object for changing the permission or owner/group or mtime.
This operation does on background and automatically.
If you need to merge manually, you can use shell script which is mergedir.sh in test directory.
This script runs chmod/chown/touch commands after finding a directory.
Other S3 client application makes a directory object("dir/") without meta information which is needed to understand by the s3fs, this script can add meta information for a directory object.
If this script function is insufficient for you, you can read and modify the codes by yourself.
Please use the shell script carefully because of changing the object.
If you find a bug in this script, please let me know.
* Details
** The directory object made by old version
The directory object made by old version is not understood by other S3 client application.
New s3fs version was updated for keeping compatibility with other clients.
You can use the mergedir.sh in test directory for merging from old directory object("dir") to new("dir/").
The directory object name is changed from "dir" to "dir/" after the mergedir.sh is run, this changed "dir/" object is understood by other S3 clients.
This script runs chmod/chown/chgrp/touch/etc commands against the old directory object("dir"), then new s3fs merges that directory automatically.
If you need to change directory object from old to new manually, you can do it by running these commands which change the directory attributes(mode/owner/group/mtime).
** The directory object made by new version
The directory object name made by new version is "dir/".
Because the name includes "/", other S3 client applications understand it as the directory.
I tested new directory by s3cmd/tntDrive/DragonDisk/Gladinet as other S3 clients, the result was good compatibility.
You need to know that the compatibility has small problem by the difference in specifications between clients.
And you need to be careful about that the old s3fs can not understand the directory object which made by new s3fs.
You should change all s3fs which accesses same bucket.
** The directory object made by other S3 client application
Because the object is determined as a directory by the s3fs, the s3fs makes and uses special meta information which is "x-amz-meta-***" and "Content-Type" as HTTP header.
The s3fs sets and uses HTTP headers for the directory object, those headers are listed below.
Content-Type: application/x-directory
x-amz-meta-mode: <mode>
x-amz-meta-uid: <UID>
x-amz-meta-gid <GID>
x-amz-meta-mtime: <unix time of modified file>
Other S3 client application builds the directory object without attributes which is needed by the s3fs.
When the "ls" command is run on the s3fs-fuse file system which has directories/files made by other S3 clients, this result is shown below.
d--------- 1 root root 0 Feb 27 11:21 dir
---------- 1 root root 1024 Mar 14 02:15 file
Because the objects don't have meta information("x-amz-meta-mode"), it means mode=0000.
In this case, the directory object is shown only "d", because the s3fs determines the object as a directory when the object is the name with "/" or has "Content-type: application/x-directory" header.
(The s3fs sets "Content-Type: application/x-directory" to the directory object, but other S3 clients set "binary/octet-stream".)
In that result, nobody without root is allowed to operate the object.
The owner and group are "root"(UID=0) because the object doesn't have "x-amz-meta-uid/gid".
If the object doesn't have "x-amz-meta-mtime", the s3fs uses "Last-Modified" HTTP header.
Therefore the object's mtime is "Last-Modified" value.(This logic is same as old version)
It has been already explained, if you need to change the object attributes, you can do it by manually operation or mergedir.sh.
* Example of the compatibility with s3cmd etc
** Case A) Only "dir/file" object
One of case, there is only "dir/file" object without "dir/" object, that object is made by s3cmd or etc.
In this case, the response of REST API(list bucket) with "delimiter=/" parameter has "CommonPrefixes", and the "dir/" is listed in "CommonPrefixes/Prefix", but the "dir/" object is not real object.
The s3fs needs to determine this object as directory, however there is no real directory object("dir" or "dir/").
But both new s3fs and old one does NOT understand this "dir/" in "CommonPrefixes", because the s3fs fails to get meta information from "dir" or "dir/".
On this case, the result of "ls" command is shown below.
??????????? ? ? ? ? ? dir
This "dir" is not operated by anyone and any process, because the s3fs does not understand this object permission.
And "dir/file" object can not be shown and operated too.
Some other S3 clients(tntDrive/Gladinet/etc) can not understand this object as same as the s3fs.
If you need to operate "dir/file" object, you need to make the "dir/" object as a directory.
To make the "dir/" directory object, you need to do below.
Because there is already the "dir" object which is not real object, you can not make "dir/" directory.
(s3cmd does not make "dir/" object because the object name has "/".).
You should make another name directory(ex: "dir2/"), and move the "dir/file" objects to in new directory.
Last, you can rename the directory name from "dir2/" to "dir/".
** Case B) Both "dir" and "dir/file" object
This case is that there are "dir" and "dir/file" objects which were made by s3cmd/etc.
s3cmd and s3fs understand the "dir" object as normal(file) object because this object does not have meta information and a name with "/".
But the result of REST API(list bucket) has "dir/" name in "CommonPrefixes/Prefix".
The s3fs checks "dir/" and "dir" as a directory, but the "dir" object is not directory object.
(Because the new s3fs need to compatible old version, the s3fs checks a directory object in order of "dir/", "dir")
In this case, the result of "ls" command is shown below.
---------- 1 root root 0 Feb 27 02:48 dir
As a result, the "dir/file" can not be shown and operated because the "dir" object is a file.
If you determine the "dir" as a directory, you need to add mete information to the "dir" object by s3cmd.
** Case C) Both "dir" and "dir/" object
Last case is that there are "dir" and "dir/" objects which were made by other S3 clients.
(example: At first you upload a object "dir/" as a directory by new 3sfs, and you upload a object "dir" by s3cmd.)
New s3fs determines "dir/" as a directory, because the s3fs searches in oder of "dir/", "dir".
As a result, the "dir" object can not be shown and operated.
** Compatibility between S3 clients
Both new and old s3fs do not understand both "dir" and "dir/" at the same time, tntDrive and Galdinet are same as the s3fs.
If there are "dir/" and "dir" objects, the s3fs gives priority to "dir/".
But s3cmd and DragonDisk understand both objects.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@392 df820570-a93a-0410-bd06-b72b767a4274
2013-03-23 14:04:07 +00:00
}
Changes codes for performance(part 3)
* Summay
This revision includes big change about temporary file and local cache file.
By this big change, s3fs works with good performance when s3fs opens/
closes/syncs/reads object.
I made a big change about the handling about temporary file and local cache
file to do this implementation.
* Detail
1) About temporary file(local file)
s3fs uses a temporary file on local file system when s3fs does download/
upload/open/seek object on S3.
After this revision, s3fs calls ftruncate() function when s3fs makes the
temporary file.
In this way s3fs can set a file size of precisely length without downloading.
(Notice - ftruncate function is for XSI-compliant systems, so that possibly
you have a problem on non-XSI-compliant systems.)
By this change, s3fs can download a part of a object by requesting with
"Range" http header. It seems like downloading by each block unit.
The default block(part) size is 50MB, it is caused the result which is default
parallel requests count(5) by default multipart upload size(10MB).
If you need to change this block size, you can change by new option
"fd_page_size". This option can take from 1MB(1024 * 1024) to any bytes.
So that, you have to take care about that fdcache.cpp(and fdcache.h) were
changed a lot.
2) About local cache
Local cache files which are in directory specified by "use_cache" option do
not have always all of object data.
This cause is that s3fs uses ftruncate function and reads(writes) each block
unit of a temporary file.
s3fs manages each block unit's status which are "downloaded area" or "not".
For this status, s3fs makes new temporary file in cache directory which is
specified by "use_cache" option. This status files is in a directory which is
named "<use_cache sirectory>/.<bucket_name>/".
When s3fs opens this status file, s3fs locks this file for exclusive control by
calling flock function. You need to take care about this, the status files can
not be laid on network drive(like NFS).
This revision changes about file open mode, s3fs always opens a local cache
file and each status file with writable mode.
Last, this revision adds new option "del_cache", this option means that s3fs
deletes all local cache file when s3fs starts and exits.
3) Uploading
When s3fs writes data to file descriptor through FUSE request, old s3fs
revision downloads all of the object. But new revision does not download all,
it downloads only small percial area(some block units) including writing data
area.
And when s3fs closes or flushes the file descriptor, s3fs downloads other area
which is not downloaded from server. After that, s3fs uploads all of data.
Already r456 revision has parallel upload function, then this revision with
r456 and r457 are very big change for performance.
4) Downloading
By changing a temporary file and a local cache file, when s3fs downloads a
object, it downloads only the required range(some block units).
And s3fs downloads units by parallel GET request, it is same as a case of
uploading. (Maximum parallel request count and each download size are
specified same parameters for uploading.)
In the new revision, when s3fs opens file, s3fs returns file descriptor soon.
Because s3fs only opens(makes) the file descriptor with no downloading
data. And when s3fs reads a data, s3fs downloads only some block unit
including specified area.
This result is good for performance.
5) Changes option name
The option "parallel_upload" which added at r456 is changed to new option
name as "parallel_count". This reason is this option value is not only used by
uploading object, but a uploading object also uses this option. (For a while,
you can use old option name "parallel_upload" for compatibility.)
git-svn-id: http://s3fs.googlecode.com/svn/trunk@458 df820570-a93a-0410-bd06-b72b767a4274
2013-07-23 16:01:48 +00:00
2020-08-22 12:40:53 +00:00
if ( S_ISDIR ( stbuf . st_mode ) ) {
// Should rebuild all directory object
// Need to remove old dir("dir" etc) and make new dir("dir/")
2019-04-30 08:37:17 +00:00
2020-08-22 12:40:53 +00:00
// At first, remove directory old object
if ( 0 ! = ( result = remove_old_type_dir ( strpath , nDirType ) ) ) {
return result ;
}
StatCache : : getStatCacheData ( ) - > DelStat ( nowcache ) ;
2015-10-18 17:03:41 +00:00
2020-08-22 12:40:53 +00:00
// Make new directory object("dir/")
if ( 0 ! = ( result = create_directory_object ( newpath . c_str ( ) , mode , stbuf . st_mtime , stbuf . st_uid , stbuf . st_gid ) ) ) {
return result ;
}
} else {
// normal object or directory object of newer version
// open & load
2020-09-13 07:49:25 +00:00
AutoFdEntity autoent ;
FdEntity * ent ;
if ( NULL = = ( ent = get_local_fent ( autoent , strpath . c_str ( ) , true ) ) ) {
2020-08-22 12:40:53 +00:00
S3FS_PRN_ERR ( " could not open and read file(%s) " , strpath . c_str ( ) ) ;
return - EIO ;
}
ent - > SetCtime ( time ( NULL ) ) ;
Changes codes for performance(part 3)
* Summay
This revision includes big change about temporary file and local cache file.
By this big change, s3fs works with good performance when s3fs opens/
closes/syncs/reads object.
I made a big change about the handling about temporary file and local cache
file to do this implementation.
* Detail
1) About temporary file(local file)
s3fs uses a temporary file on local file system when s3fs does download/
upload/open/seek object on S3.
After this revision, s3fs calls ftruncate() function when s3fs makes the
temporary file.
In this way s3fs can set a file size of precisely length without downloading.
(Notice - ftruncate function is for XSI-compliant systems, so that possibly
you have a problem on non-XSI-compliant systems.)
By this change, s3fs can download a part of a object by requesting with
"Range" http header. It seems like downloading by each block unit.
The default block(part) size is 50MB, it is caused the result which is default
parallel requests count(5) by default multipart upload size(10MB).
If you need to change this block size, you can change by new option
"fd_page_size". This option can take from 1MB(1024 * 1024) to any bytes.
So that, you have to take care about that fdcache.cpp(and fdcache.h) were
changed a lot.
2) About local cache
Local cache files which are in directory specified by "use_cache" option do
not have always all of object data.
This cause is that s3fs uses ftruncate function and reads(writes) each block
unit of a temporary file.
s3fs manages each block unit's status which are "downloaded area" or "not".
For this status, s3fs makes new temporary file in cache directory which is
specified by "use_cache" option. This status files is in a directory which is
named "<use_cache sirectory>/.<bucket_name>/".
When s3fs opens this status file, s3fs locks this file for exclusive control by
calling flock function. You need to take care about this, the status files can
not be laid on network drive(like NFS).
This revision changes about file open mode, s3fs always opens a local cache
file and each status file with writable mode.
Last, this revision adds new option "del_cache", this option means that s3fs
deletes all local cache file when s3fs starts and exits.
3) Uploading
When s3fs writes data to file descriptor through FUSE request, old s3fs
revision downloads all of the object. But new revision does not download all,
it downloads only small percial area(some block units) including writing data
area.
And when s3fs closes or flushes the file descriptor, s3fs downloads other area
which is not downloaded from server. After that, s3fs uploads all of data.
Already r456 revision has parallel upload function, then this revision with
r456 and r457 are very big change for performance.
4) Downloading
By changing a temporary file and a local cache file, when s3fs downloads a
object, it downloads only the required range(some block units).
And s3fs downloads units by parallel GET request, it is same as a case of
uploading. (Maximum parallel request count and each download size are
specified same parameters for uploading.)
In the new revision, when s3fs opens file, s3fs returns file descriptor soon.
Because s3fs only opens(makes) the file descriptor with no downloading
data. And when s3fs reads a data, s3fs downloads only some block unit
including specified area.
This result is good for performance.
5) Changes option name
The option "parallel_upload" which added at r456 is changed to new option
name as "parallel_count". This reason is this option value is not only used by
uploading object, but a uploading object also uses this option. (For a while,
you can use old option name "parallel_upload" for compatibility.)
git-svn-id: http://s3fs.googlecode.com/svn/trunk@458 df820570-a93a-0410-bd06-b72b767a4274
2013-07-23 16:01:48 +00:00
2020-08-22 12:40:53 +00:00
// Change file mode
ent - > SetMode ( mode ) ;
2013-09-14 21:50:39 +00:00
2020-08-22 12:40:53 +00:00
// upload
if ( 0 ! = ( result = ent - > Flush ( true ) ) ) {
S3FS_PRN_ERR ( " could not upload file(%s): result=%d " , strpath . c_str ( ) , result ) ;
return result ;
}
StatCache : : getStatCacheData ( ) - > DelStat ( nowcache ) ;
}
S3FS_MALLOCTRIM ( 0 ) ;
return result ;
2013-01-19 16:05:07 +00:00
}
2019-02-18 13:36:48 +00:00
static int s3fs_chown ( const char * _path , uid_t uid , gid_t gid )
2013-07-05 02:28:31 +00:00
{
2020-08-22 12:40:53 +00:00
WTF8_ENCODE ( path )
int result ;
2020-09-11 09:37:24 +00:00
std : : string strpath ;
std : : string newpath ;
std : : string nowcache ;
2020-08-22 12:40:53 +00:00
headers_t meta ;
struct stat stbuf ;
dirtype nDirType = DIRTYPE_UNKNOWN ;
2010-11-13 23:59:23 +00:00
2020-08-22 12:40:53 +00:00
S3FS_PRN_INFO ( " [path=%s][uid=%u][gid=%u] " , path , ( unsigned int ) uid , ( unsigned int ) gid ) ;
2010-11-13 23:59:23 +00:00
2020-08-22 12:40:53 +00:00
if ( 0 = = strcmp ( path , " / " ) ) {
S3FS_PRN_ERR ( " Could not change owner for mount point. " ) ;
return - EIO ;
}
if ( 0 ! = ( result = check_parent_object_access ( path , X_OK ) ) ) {
return result ;
}
if ( 0 ! = ( result = check_object_owner ( path , & stbuf ) ) ) {
return result ;
Summary of Changes(1.63 -> 1.64)
* This new version was made for fixing big issue about directory object.
Please be careful and review new s3fs.
==========================
List of Changes
==========================
1) Fixed bugs
Fixed some memory leak and un-freed curl handle.
Fixed codes with a bug which is not found yet.
Fixed a bug that the s3fs could not update object's mtime when the s3fs had a opened file descriptor.
Please let us know a bug, when you find new bug of a memory leak.
2) Changed codes
Changed codes of s3fs_readdir() and list_bucket() etc.
Changed codes so that the get_realpath() function returned std::string.
Changed codes about exit() function. Because the exit() function is called from many fuse callback function directly, these function called fuse_exit() function and retuned with error.
Changed codes so that the case of the characters for the "x-amz-meta" response header is ignored.
3) Added a option
Added the norenameapi option for the storage compatible with S3 without copy API.
This option is subset of nocopyapi option.
Please read man page or call with --help option.
4) Object for directory
This is very big and important change.
The object of directory is changed "dir/" instead of "dir" for being compatible with other S3 client applications.
And this version understands the object of directory which is made by old version.
If the new s3fs changes the attributes or owner/group or mtime of the directory object, the s3fs automatically changes the object from old object name("dir") to new("dir/").
If you need to change old object name("dir") to new("dir/") manually, you can use shell script(mergedir.sh) in test directory.
* About the directory object name
AWS S3 allows the object name as both "dir" and "dir/".
The s3fs before this version understood only "dir" as directory object name, but old version did not understand the "dir/" object name.
The new version understands both of "dir" and "dir/" object name.
The s3fs user needs to be care for the special situation that I mentioned later.
The new version deletes old "dir" object and makes new "dir/" object, when the user operates the directory object for changing the permission or owner/group or mtime.
This operation does on background and automatically.
If you need to merge manually, you can use shell script which is mergedir.sh in test directory.
This script runs chmod/chown/touch commands after finding a directory.
Other S3 client application makes a directory object("dir/") without meta information which is needed to understand by the s3fs, this script can add meta information for a directory object.
If this script function is insufficient for you, you can read and modify the codes by yourself.
Please use the shell script carefully because of changing the object.
If you find a bug in this script, please let me know.
* Details
** The directory object made by old version
The directory object made by old version is not understood by other S3 client application.
New s3fs version was updated for keeping compatibility with other clients.
You can use the mergedir.sh in test directory for merging from old directory object("dir") to new("dir/").
The directory object name is changed from "dir" to "dir/" after the mergedir.sh is run, this changed "dir/" object is understood by other S3 clients.
This script runs chmod/chown/chgrp/touch/etc commands against the old directory object("dir"), then new s3fs merges that directory automatically.
If you need to change directory object from old to new manually, you can do it by running these commands which change the directory attributes(mode/owner/group/mtime).
** The directory object made by new version
The directory object name made by new version is "dir/".
Because the name includes "/", other S3 client applications understand it as the directory.
I tested new directory by s3cmd/tntDrive/DragonDisk/Gladinet as other S3 clients, the result was good compatibility.
You need to know that the compatibility has small problem by the difference in specifications between clients.
And you need to be careful about that the old s3fs can not understand the directory object which made by new s3fs.
You should change all s3fs which accesses same bucket.
** The directory object made by other S3 client application
Because the object is determined as a directory by the s3fs, the s3fs makes and uses special meta information which is "x-amz-meta-***" and "Content-Type" as HTTP header.
The s3fs sets and uses HTTP headers for the directory object, those headers are listed below.
Content-Type: application/x-directory
x-amz-meta-mode: <mode>
x-amz-meta-uid: <UID>
x-amz-meta-gid <GID>
x-amz-meta-mtime: <unix time of modified file>
Other S3 client application builds the directory object without attributes which is needed by the s3fs.
When the "ls" command is run on the s3fs-fuse file system which has directories/files made by other S3 clients, this result is shown below.
d--------- 1 root root 0 Feb 27 11:21 dir
---------- 1 root root 1024 Mar 14 02:15 file
Because the objects don't have meta information("x-amz-meta-mode"), it means mode=0000.
In this case, the directory object is shown only "d", because the s3fs determines the object as a directory when the object is the name with "/" or has "Content-type: application/x-directory" header.
(The s3fs sets "Content-Type: application/x-directory" to the directory object, but other S3 clients set "binary/octet-stream".)
In that result, nobody without root is allowed to operate the object.
The owner and group are "root"(UID=0) because the object doesn't have "x-amz-meta-uid/gid".
If the object doesn't have "x-amz-meta-mtime", the s3fs uses "Last-Modified" HTTP header.
Therefore the object's mtime is "Last-Modified" value.(This logic is same as old version)
It has been already explained, if you need to change the object attributes, you can do it by manually operation or mergedir.sh.
* Example of the compatibility with s3cmd etc
** Case A) Only "dir/file" object
One of case, there is only "dir/file" object without "dir/" object, that object is made by s3cmd or etc.
In this case, the response of REST API(list bucket) with "delimiter=/" parameter has "CommonPrefixes", and the "dir/" is listed in "CommonPrefixes/Prefix", but the "dir/" object is not real object.
The s3fs needs to determine this object as directory, however there is no real directory object("dir" or "dir/").
But both new s3fs and old one does NOT understand this "dir/" in "CommonPrefixes", because the s3fs fails to get meta information from "dir" or "dir/".
On this case, the result of "ls" command is shown below.
??????????? ? ? ? ? ? dir
This "dir" is not operated by anyone and any process, because the s3fs does not understand this object permission.
And "dir/file" object can not be shown and operated too.
Some other S3 clients(tntDrive/Gladinet/etc) can not understand this object as same as the s3fs.
If you need to operate "dir/file" object, you need to make the "dir/" object as a directory.
To make the "dir/" directory object, you need to do below.
Because there is already the "dir" object which is not real object, you can not make "dir/" directory.
(s3cmd does not make "dir/" object because the object name has "/".).
You should make another name directory(ex: "dir2/"), and move the "dir/file" objects to in new directory.
Last, you can rename the directory name from "dir2/" to "dir/".
** Case B) Both "dir" and "dir/file" object
This case is that there are "dir" and "dir/file" objects which were made by s3cmd/etc.
s3cmd and s3fs understand the "dir" object as normal(file) object because this object does not have meta information and a name with "/".
But the result of REST API(list bucket) has "dir/" name in "CommonPrefixes/Prefix".
The s3fs checks "dir/" and "dir" as a directory, but the "dir" object is not directory object.
(Because the new s3fs need to compatible old version, the s3fs checks a directory object in order of "dir/", "dir")
In this case, the result of "ls" command is shown below.
---------- 1 root root 0 Feb 27 02:48 dir
As a result, the "dir/file" can not be shown and operated because the "dir" object is a file.
If you determine the "dir" as a directory, you need to add mete information to the "dir" object by s3cmd.
** Case C) Both "dir" and "dir/" object
Last case is that there are "dir" and "dir/" objects which were made by other S3 clients.
(example: At first you upload a object "dir/" as a directory by new 3sfs, and you upload a object "dir" by s3cmd.)
New s3fs determines "dir/" as a directory, because the s3fs searches in oder of "dir/", "dir".
As a result, the "dir" object can not be shown and operated.
** Compatibility between S3 clients
Both new and old s3fs do not understand both "dir" and "dir/" at the same time, tntDrive and Galdinet are same as the s3fs.
If there are "dir/" and "dir" objects, the s3fs gives priority to "dir/".
But s3cmd and DragonDisk understand both objects.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@392 df820570-a93a-0410-bd06-b72b767a4274
2013-03-23 14:04:07 +00:00
}
2011-02-11 03:30:02 +00:00
2020-08-22 12:40:53 +00:00
if ( ( uid_t ) ( - 1 ) = = uid ) {
uid = stbuf . st_uid ;
}
if ( ( gid_t ) ( - 1 ) = = gid ) {
gid = stbuf . st_gid ;
}
if ( S_ISDIR ( stbuf . st_mode ) ) {
result = chk_dir_object_type ( path , newpath , strpath , nowcache , & meta , & nDirType ) ;
} else {
strpath = path ;
nowcache = strpath ;
result = get_object_attribute ( strpath . c_str ( ) , NULL , & meta ) ;
}
if ( 0 ! = result ) {
return result ;
Summary of Changes(1.63 -> 1.64)
* This new version was made for fixing big issue about directory object.
Please be careful and review new s3fs.
==========================
List of Changes
==========================
1) Fixed bugs
Fixed some memory leak and un-freed curl handle.
Fixed codes with a bug which is not found yet.
Fixed a bug that the s3fs could not update object's mtime when the s3fs had a opened file descriptor.
Please let us know a bug, when you find new bug of a memory leak.
2) Changed codes
Changed codes of s3fs_readdir() and list_bucket() etc.
Changed codes so that the get_realpath() function returned std::string.
Changed codes about exit() function. Because the exit() function is called from many fuse callback function directly, these function called fuse_exit() function and retuned with error.
Changed codes so that the case of the characters for the "x-amz-meta" response header is ignored.
3) Added a option
Added the norenameapi option for the storage compatible with S3 without copy API.
This option is subset of nocopyapi option.
Please read man page or call with --help option.
4) Object for directory
This is very big and important change.
The object of directory is changed "dir/" instead of "dir" for being compatible with other S3 client applications.
And this version understands the object of directory which is made by old version.
If the new s3fs changes the attributes or owner/group or mtime of the directory object, the s3fs automatically changes the object from old object name("dir") to new("dir/").
If you need to change old object name("dir") to new("dir/") manually, you can use shell script(mergedir.sh) in test directory.
* About the directory object name
AWS S3 allows the object name as both "dir" and "dir/".
The s3fs before this version understood only "dir" as directory object name, but old version did not understand the "dir/" object name.
The new version understands both of "dir" and "dir/" object name.
The s3fs user needs to be care for the special situation that I mentioned later.
The new version deletes old "dir" object and makes new "dir/" object, when the user operates the directory object for changing the permission or owner/group or mtime.
This operation does on background and automatically.
If you need to merge manually, you can use shell script which is mergedir.sh in test directory.
This script runs chmod/chown/touch commands after finding a directory.
Other S3 client application makes a directory object("dir/") without meta information which is needed to understand by the s3fs, this script can add meta information for a directory object.
If this script function is insufficient for you, you can read and modify the codes by yourself.
Please use the shell script carefully because of changing the object.
If you find a bug in this script, please let me know.
* Details
** The directory object made by old version
The directory object made by old version is not understood by other S3 client application.
New s3fs version was updated for keeping compatibility with other clients.
You can use the mergedir.sh in test directory for merging from old directory object("dir") to new("dir/").
The directory object name is changed from "dir" to "dir/" after the mergedir.sh is run, this changed "dir/" object is understood by other S3 clients.
This script runs chmod/chown/chgrp/touch/etc commands against the old directory object("dir"), then new s3fs merges that directory automatically.
If you need to change directory object from old to new manually, you can do it by running these commands which change the directory attributes(mode/owner/group/mtime).
** The directory object made by new version
The directory object name made by new version is "dir/".
Because the name includes "/", other S3 client applications understand it as the directory.
I tested new directory by s3cmd/tntDrive/DragonDisk/Gladinet as other S3 clients, the result was good compatibility.
You need to know that the compatibility has small problem by the difference in specifications between clients.
And you need to be careful about that the old s3fs can not understand the directory object which made by new s3fs.
You should change all s3fs which accesses same bucket.
** The directory object made by other S3 client application
Because the object is determined as a directory by the s3fs, the s3fs makes and uses special meta information which is "x-amz-meta-***" and "Content-Type" as HTTP header.
The s3fs sets and uses HTTP headers for the directory object, those headers are listed below.
Content-Type: application/x-directory
x-amz-meta-mode: <mode>
x-amz-meta-uid: <UID>
x-amz-meta-gid <GID>
x-amz-meta-mtime: <unix time of modified file>
Other S3 client application builds the directory object without attributes which is needed by the s3fs.
When the "ls" command is run on the s3fs-fuse file system which has directories/files made by other S3 clients, this result is shown below.
d--------- 1 root root 0 Feb 27 11:21 dir
---------- 1 root root 1024 Mar 14 02:15 file
Because the objects don't have meta information("x-amz-meta-mode"), it means mode=0000.
In this case, the directory object is shown only "d", because the s3fs determines the object as a directory when the object is the name with "/" or has "Content-type: application/x-directory" header.
(The s3fs sets "Content-Type: application/x-directory" to the directory object, but other S3 clients set "binary/octet-stream".)
In that result, nobody without root is allowed to operate the object.
The owner and group are "root"(UID=0) because the object doesn't have "x-amz-meta-uid/gid".
If the object doesn't have "x-amz-meta-mtime", the s3fs uses "Last-Modified" HTTP header.
Therefore the object's mtime is "Last-Modified" value.(This logic is same as old version)
It has been already explained, if you need to change the object attributes, you can do it by manually operation or mergedir.sh.
* Example of the compatibility with s3cmd etc
** Case A) Only "dir/file" object
One of case, there is only "dir/file" object without "dir/" object, that object is made by s3cmd or etc.
In this case, the response of REST API(list bucket) with "delimiter=/" parameter has "CommonPrefixes", and the "dir/" is listed in "CommonPrefixes/Prefix", but the "dir/" object is not real object.
The s3fs needs to determine this object as directory, however there is no real directory object("dir" or "dir/").
But both new s3fs and old one does NOT understand this "dir/" in "CommonPrefixes", because the s3fs fails to get meta information from "dir" or "dir/".
On this case, the result of "ls" command is shown below.
??????????? ? ? ? ? ? dir
This "dir" is not operated by anyone and any process, because the s3fs does not understand this object permission.
And "dir/file" object can not be shown and operated too.
Some other S3 clients(tntDrive/Gladinet/etc) can not understand this object as same as the s3fs.
If you need to operate "dir/file" object, you need to make the "dir/" object as a directory.
To make the "dir/" directory object, you need to do below.
Because there is already the "dir" object which is not real object, you can not make "dir/" directory.
(s3cmd does not make "dir/" object because the object name has "/".).
You should make another name directory(ex: "dir2/"), and move the "dir/file" objects to in new directory.
Last, you can rename the directory name from "dir2/" to "dir/".
** Case B) Both "dir" and "dir/file" object
This case is that there are "dir" and "dir/file" objects which were made by s3cmd/etc.
s3cmd and s3fs understand the "dir" object as normal(file) object because this object does not have meta information and a name with "/".
But the result of REST API(list bucket) has "dir/" name in "CommonPrefixes/Prefix".
The s3fs checks "dir/" and "dir" as a directory, but the "dir" object is not directory object.
(Because the new s3fs need to compatible old version, the s3fs checks a directory object in order of "dir/", "dir")
In this case, the result of "ls" command is shown below.
---------- 1 root root 0 Feb 27 02:48 dir
As a result, the "dir/file" can not be shown and operated because the "dir" object is a file.
If you determine the "dir" as a directory, you need to add mete information to the "dir" object by s3cmd.
** Case C) Both "dir" and "dir/" object
Last case is that there are "dir" and "dir/" objects which were made by other S3 clients.
(example: At first you upload a object "dir/" as a directory by new 3sfs, and you upload a object "dir" by s3cmd.)
New s3fs determines "dir/" as a directory, because the s3fs searches in oder of "dir/", "dir".
As a result, the "dir" object can not be shown and operated.
** Compatibility between S3 clients
Both new and old s3fs do not understand both "dir" and "dir/" at the same time, tntDrive and Galdinet are same as the s3fs.
If there are "dir/" and "dir" objects, the s3fs gives priority to "dir/".
But s3cmd and DragonDisk understand both objects.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@392 df820570-a93a-0410-bd06-b72b767a4274
2013-03-23 14:04:07 +00:00
}
2020-08-22 12:40:53 +00:00
if ( S_ISDIR ( stbuf . st_mode ) & & IS_REPLACEDIR ( nDirType ) ) {
// Should rebuild directory object(except new type)
// Need to remove old dir("dir" etc) and make new dir("dir/")
// At first, remove directory old object
if ( 0 ! = ( result = remove_old_type_dir ( strpath , nDirType ) ) ) {
return result ;
2020-08-02 13:37:06 +00:00
}
StatCache : : getStatCacheData ( ) - > DelStat ( nowcache ) ;
2020-08-22 12:40:53 +00:00
// Make new directory object("dir/")
if ( 0 ! = ( result = create_directory_object ( newpath . c_str ( ) , stbuf . st_mode , stbuf . st_mtime , uid , gid ) ) ) {
return result ;
}
2020-08-02 13:37:06 +00:00
} else {
2020-08-22 12:40:53 +00:00
headers_t updatemeta ;
updatemeta [ " x-amz-meta-ctime " ] = str ( time ( NULL ) ) ;
updatemeta [ " x-amz-meta-uid " ] = str ( uid ) ;
updatemeta [ " x-amz-meta-gid " ] = str ( gid ) ;
updatemeta [ " x-amz-copy-source " ] = urlEncode ( service_path + bucket + get_realpath ( strpath . c_str ( ) ) ) ;
updatemeta [ " x-amz-metadata-directive " ] = " REPLACE " ;
// check opened file handle.
//
// If the file starts uploading by multipart when the disk capacity is insufficient,
// we need to put these header after finishing upload.
// Or if the file is only open, we must update to FdEntity's internal meta.
//
2020-09-13 07:49:25 +00:00
AutoFdEntity autoent ;
FdEntity * ent ;
if ( NULL ! = ( ent = autoent . ExistOpen ( path , - 1 , true ) ) ) {
2020-08-22 12:40:53 +00:00
// the file is opened now.
if ( ent - > MergeOrgMeta ( updatemeta ) ) {
// now uploading
// the meta is pending and accumulated to be put after the upload is complete.
S3FS_PRN_INFO ( " meta pending until upload is complete " ) ;
} else {
// allow to put header
// updatemeta already merged the orgmeta of the opened files.
if ( 0 ! = put_headers ( strpath . c_str ( ) , updatemeta , true ) ) {
return - EIO ;
}
StatCache : : getStatCacheData ( ) - > DelStat ( nowcache ) ;
}
} else {
// not opened file, then put headers
merge_headers ( meta , updatemeta , true ) ;
if ( 0 ! = put_headers ( strpath . c_str ( ) , meta , true ) ) {
return - EIO ;
}
StatCache : : getStatCacheData ( ) - > DelStat ( nowcache ) ;
}
2013-04-06 17:39:22 +00:00
}
2020-08-22 12:40:53 +00:00
S3FS_MALLOCTRIM ( 0 ) ;
2011-02-11 03:30:02 +00:00
2020-08-22 12:40:53 +00:00
return 0 ;
2010-11-13 23:59:23 +00:00
}
2019-02-18 13:36:48 +00:00
static int s3fs_chown_nocopy ( const char * _path , uid_t uid , gid_t gid )
2013-07-05 02:28:31 +00:00
{
2020-08-22 12:40:53 +00:00
WTF8_ENCODE ( path )
int result ;
2020-09-11 09:37:24 +00:00
std : : string strpath ;
std : : string newpath ;
std : : string nowcache ;
2020-08-22 12:40:53 +00:00
struct stat stbuf ;
dirtype nDirType = DIRTYPE_UNKNOWN ;
S3FS_PRN_INFO1 ( " [path=%s][uid=%u][gid=%u] " , path , ( unsigned int ) uid , ( unsigned int ) gid ) ;
if ( 0 = = strcmp ( path , " / " ) ) {
S3FS_PRN_ERR ( " Could not change owner for mount point. " ) ;
return - EIO ;
}
if ( 0 ! = ( result = check_parent_object_access ( path , X_OK ) ) ) {
return result ;
}
if ( 0 ! = ( result = check_object_owner ( path , & stbuf ) ) ) {
return result ;
2013-04-29 14:31:10 +00:00
}
Summary of Changes(1.63 -> 1.64)
* This new version was made for fixing big issue about directory object.
Please be careful and review new s3fs.
==========================
List of Changes
==========================
1) Fixed bugs
Fixed some memory leak and un-freed curl handle.
Fixed codes with a bug which is not found yet.
Fixed a bug that the s3fs could not update object's mtime when the s3fs had a opened file descriptor.
Please let us know a bug, when you find new bug of a memory leak.
2) Changed codes
Changed codes of s3fs_readdir() and list_bucket() etc.
Changed codes so that the get_realpath() function returned std::string.
Changed codes about exit() function. Because the exit() function is called from many fuse callback function directly, these function called fuse_exit() function and retuned with error.
Changed codes so that the case of the characters for the "x-amz-meta" response header is ignored.
3) Added a option
Added the norenameapi option for the storage compatible with S3 without copy API.
This option is subset of nocopyapi option.
Please read man page or call with --help option.
4) Object for directory
This is very big and important change.
The object of directory is changed "dir/" instead of "dir" for being compatible with other S3 client applications.
And this version understands the object of directory which is made by old version.
If the new s3fs changes the attributes or owner/group or mtime of the directory object, the s3fs automatically changes the object from old object name("dir") to new("dir/").
If you need to change old object name("dir") to new("dir/") manually, you can use shell script(mergedir.sh) in test directory.
* About the directory object name
AWS S3 allows the object name as both "dir" and "dir/".
The s3fs before this version understood only "dir" as directory object name, but old version did not understand the "dir/" object name.
The new version understands both of "dir" and "dir/" object name.
The s3fs user needs to be care for the special situation that I mentioned later.
The new version deletes old "dir" object and makes new "dir/" object, when the user operates the directory object for changing the permission or owner/group or mtime.
This operation does on background and automatically.
If you need to merge manually, you can use shell script which is mergedir.sh in test directory.
This script runs chmod/chown/touch commands after finding a directory.
Other S3 client application makes a directory object("dir/") without meta information which is needed to understand by the s3fs, this script can add meta information for a directory object.
If this script function is insufficient for you, you can read and modify the codes by yourself.
Please use the shell script carefully because of changing the object.
If you find a bug in this script, please let me know.
* Details
** The directory object made by old version
The directory object made by old version is not understood by other S3 client application.
New s3fs version was updated for keeping compatibility with other clients.
You can use the mergedir.sh in test directory for merging from old directory object("dir") to new("dir/").
The directory object name is changed from "dir" to "dir/" after the mergedir.sh is run, this changed "dir/" object is understood by other S3 clients.
This script runs chmod/chown/chgrp/touch/etc commands against the old directory object("dir"), then new s3fs merges that directory automatically.
If you need to change directory object from old to new manually, you can do it by running these commands which change the directory attributes(mode/owner/group/mtime).
** The directory object made by new version
The directory object name made by new version is "dir/".
Because the name includes "/", other S3 client applications understand it as the directory.
I tested new directory by s3cmd/tntDrive/DragonDisk/Gladinet as other S3 clients, the result was good compatibility.
You need to know that the compatibility has small problem by the difference in specifications between clients.
And you need to be careful about that the old s3fs can not understand the directory object which made by new s3fs.
You should change all s3fs which accesses same bucket.
** The directory object made by other S3 client application
Because the object is determined as a directory by the s3fs, the s3fs makes and uses special meta information which is "x-amz-meta-***" and "Content-Type" as HTTP header.
The s3fs sets and uses HTTP headers for the directory object, those headers are listed below.
Content-Type: application/x-directory
x-amz-meta-mode: <mode>
x-amz-meta-uid: <UID>
x-amz-meta-gid <GID>
x-amz-meta-mtime: <unix time of modified file>
Other S3 client application builds the directory object without attributes which is needed by the s3fs.
When the "ls" command is run on the s3fs-fuse file system which has directories/files made by other S3 clients, this result is shown below.
d--------- 1 root root 0 Feb 27 11:21 dir
---------- 1 root root 1024 Mar 14 02:15 file
Because the objects don't have meta information("x-amz-meta-mode"), it means mode=0000.
In this case, the directory object is shown only "d", because the s3fs determines the object as a directory when the object is the name with "/" or has "Content-type: application/x-directory" header.
(The s3fs sets "Content-Type: application/x-directory" to the directory object, but other S3 clients set "binary/octet-stream".)
In that result, nobody without root is allowed to operate the object.
The owner and group are "root"(UID=0) because the object doesn't have "x-amz-meta-uid/gid".
If the object doesn't have "x-amz-meta-mtime", the s3fs uses "Last-Modified" HTTP header.
Therefore the object's mtime is "Last-Modified" value.(This logic is same as old version)
It has been already explained, if you need to change the object attributes, you can do it by manually operation or mergedir.sh.
* Example of the compatibility with s3cmd etc
** Case A) Only "dir/file" object
One of case, there is only "dir/file" object without "dir/" object, that object is made by s3cmd or etc.
In this case, the response of REST API(list bucket) with "delimiter=/" parameter has "CommonPrefixes", and the "dir/" is listed in "CommonPrefixes/Prefix", but the "dir/" object is not real object.
The s3fs needs to determine this object as directory, however there is no real directory object("dir" or "dir/").
But both new s3fs and old one does NOT understand this "dir/" in "CommonPrefixes", because the s3fs fails to get meta information from "dir" or "dir/".
On this case, the result of "ls" command is shown below.
??????????? ? ? ? ? ? dir
This "dir" is not operated by anyone and any process, because the s3fs does not understand this object permission.
And "dir/file" object can not be shown and operated too.
Some other S3 clients(tntDrive/Gladinet/etc) can not understand this object as same as the s3fs.
If you need to operate "dir/file" object, you need to make the "dir/" object as a directory.
To make the "dir/" directory object, you need to do below.
Because there is already the "dir" object which is not real object, you can not make "dir/" directory.
(s3cmd does not make "dir/" object because the object name has "/".).
You should make another name directory(ex: "dir2/"), and move the "dir/file" objects to in new directory.
Last, you can rename the directory name from "dir2/" to "dir/".
** Case B) Both "dir" and "dir/file" object
This case is that there are "dir" and "dir/file" objects which were made by s3cmd/etc.
s3cmd and s3fs understand the "dir" object as normal(file) object because this object does not have meta information and a name with "/".
But the result of REST API(list bucket) has "dir/" name in "CommonPrefixes/Prefix".
The s3fs checks "dir/" and "dir" as a directory, but the "dir" object is not directory object.
(Because the new s3fs need to compatible old version, the s3fs checks a directory object in order of "dir/", "dir")
In this case, the result of "ls" command is shown below.
---------- 1 root root 0 Feb 27 02:48 dir
As a result, the "dir/file" can not be shown and operated because the "dir" object is a file.
If you determine the "dir" as a directory, you need to add mete information to the "dir" object by s3cmd.
** Case C) Both "dir" and "dir/" object
Last case is that there are "dir" and "dir/" objects which were made by other S3 clients.
(example: At first you upload a object "dir/" as a directory by new 3sfs, and you upload a object "dir" by s3cmd.)
New s3fs determines "dir/" as a directory, because the s3fs searches in oder of "dir/", "dir".
As a result, the "dir" object can not be shown and operated.
** Compatibility between S3 clients
Both new and old s3fs do not understand both "dir" and "dir/" at the same time, tntDrive and Galdinet are same as the s3fs.
If there are "dir/" and "dir" objects, the s3fs gives priority to "dir/".
But s3cmd and DragonDisk understand both objects.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@392 df820570-a93a-0410-bd06-b72b767a4274
2013-03-23 14:04:07 +00:00
2020-08-22 12:40:53 +00:00
if ( ( uid_t ) ( - 1 ) = = uid ) {
uid = stbuf . st_uid ;
}
if ( ( gid_t ) ( - 1 ) = = gid ) {
gid = stbuf . st_gid ;
2013-01-19 16:05:07 +00:00
}
2011-08-31 20:17:53 +00:00
2020-08-22 12:40:53 +00:00
// Get attributes
if ( S_ISDIR ( stbuf . st_mode ) ) {
result = chk_dir_object_type ( path , newpath , strpath , nowcache , NULL , & nDirType ) ;
} else {
strpath = path ;
nowcache = strpath ;
result = get_object_attribute ( strpath . c_str ( ) , NULL , NULL ) ;
}
if ( 0 ! = result ) {
return result ;
2013-01-19 16:05:07 +00:00
}
2020-08-22 12:40:53 +00:00
if ( S_ISDIR ( stbuf . st_mode ) ) {
// Should rebuild all directory object
// Need to remove old dir("dir" etc) and make new dir("dir/")
2019-04-30 08:37:17 +00:00
2020-08-22 12:40:53 +00:00
// At first, remove directory old object
if ( 0 ! = ( result = remove_old_type_dir ( strpath , nDirType ) ) ) {
return result ;
}
StatCache : : getStatCacheData ( ) - > DelStat ( nowcache ) ;
2015-10-18 17:03:41 +00:00
2020-08-22 12:40:53 +00:00
// Make new directory object("dir/")
if ( 0 ! = ( result = create_directory_object ( newpath . c_str ( ) , stbuf . st_mode , stbuf . st_mtime , uid , gid ) ) ) {
return result ;
}
} else {
// normal object or directory object of newer version
// open & load
2020-09-13 07:49:25 +00:00
AutoFdEntity autoent ;
FdEntity * ent ;
if ( NULL = = ( ent = get_local_fent ( autoent , strpath . c_str ( ) , true ) ) ) {
2020-08-22 12:40:53 +00:00
S3FS_PRN_ERR ( " could not open and read file(%s) " , strpath . c_str ( ) ) ;
return - EIO ;
}
ent - > SetCtime ( time ( NULL ) ) ;
Changes codes for performance(part 3)
* Summay
This revision includes big change about temporary file and local cache file.
By this big change, s3fs works with good performance when s3fs opens/
closes/syncs/reads object.
I made a big change about the handling about temporary file and local cache
file to do this implementation.
* Detail
1) About temporary file(local file)
s3fs uses a temporary file on local file system when s3fs does download/
upload/open/seek object on S3.
After this revision, s3fs calls ftruncate() function when s3fs makes the
temporary file.
In this way s3fs can set a file size of precisely length without downloading.
(Notice - ftruncate function is for XSI-compliant systems, so that possibly
you have a problem on non-XSI-compliant systems.)
By this change, s3fs can download a part of a object by requesting with
"Range" http header. It seems like downloading by each block unit.
The default block(part) size is 50MB, it is caused the result which is default
parallel requests count(5) by default multipart upload size(10MB).
If you need to change this block size, you can change by new option
"fd_page_size". This option can take from 1MB(1024 * 1024) to any bytes.
So that, you have to take care about that fdcache.cpp(and fdcache.h) were
changed a lot.
2) About local cache
Local cache files which are in directory specified by "use_cache" option do
not have always all of object data.
This cause is that s3fs uses ftruncate function and reads(writes) each block
unit of a temporary file.
s3fs manages each block unit's status which are "downloaded area" or "not".
For this status, s3fs makes new temporary file in cache directory which is
specified by "use_cache" option. This status files is in a directory which is
named "<use_cache sirectory>/.<bucket_name>/".
When s3fs opens this status file, s3fs locks this file for exclusive control by
calling flock function. You need to take care about this, the status files can
not be laid on network drive(like NFS).
This revision changes about file open mode, s3fs always opens a local cache
file and each status file with writable mode.
Last, this revision adds new option "del_cache", this option means that s3fs
deletes all local cache file when s3fs starts and exits.
3) Uploading
When s3fs writes data to file descriptor through FUSE request, old s3fs
revision downloads all of the object. But new revision does not download all,
it downloads only small percial area(some block units) including writing data
area.
And when s3fs closes or flushes the file descriptor, s3fs downloads other area
which is not downloaded from server. After that, s3fs uploads all of data.
Already r456 revision has parallel upload function, then this revision with
r456 and r457 are very big change for performance.
4) Downloading
By changing a temporary file and a local cache file, when s3fs downloads a
object, it downloads only the required range(some block units).
And s3fs downloads units by parallel GET request, it is same as a case of
uploading. (Maximum parallel request count and each download size are
specified same parameters for uploading.)
In the new revision, when s3fs opens file, s3fs returns file descriptor soon.
Because s3fs only opens(makes) the file descriptor with no downloading
data. And when s3fs reads a data, s3fs downloads only some block unit
including specified area.
This result is good for performance.
5) Changes option name
The option "parallel_upload" which added at r456 is changed to new option
name as "parallel_count". This reason is this option value is not only used by
uploading object, but a uploading object also uses this option. (For a while,
you can use old option name "parallel_upload" for compatibility.)
git-svn-id: http://s3fs.googlecode.com/svn/trunk@458 df820570-a93a-0410-bd06-b72b767a4274
2013-07-23 16:01:48 +00:00
2020-08-22 12:40:53 +00:00
// Change owner
ent - > SetUId ( uid ) ;
ent - > SetGId ( gid ) ;
// upload
if ( 0 ! = ( result = ent - > Flush ( true ) ) ) {
S3FS_PRN_ERR ( " could not upload file(%s): result=%d " , strpath . c_str ( ) , result ) ;
return result ;
}
StatCache : : getStatCacheData ( ) - > DelStat ( nowcache ) ;
}
S3FS_MALLOCTRIM ( 0 ) ;
return result ;
2013-01-19 16:05:07 +00:00
}
2019-02-18 13:36:48 +00:00
static int s3fs_utimens ( const char * _path , const struct timespec ts [ 2 ] )
2013-07-05 02:28:31 +00:00
{
2020-08-22 12:40:53 +00:00
WTF8_ENCODE ( path )
int result ;
2020-09-11 09:37:24 +00:00
std : : string strpath ;
std : : string newpath ;
std : : string nowcache ;
2020-08-22 12:40:53 +00:00
headers_t meta ;
struct stat stbuf ;
dirtype nDirType = DIRTYPE_UNKNOWN ;
2013-07-05 02:28:31 +00:00
2020-08-22 12:40:53 +00:00
S3FS_PRN_INFO ( " [path=%s][mtime=%lld] " , path , static_cast < long long > ( ts [ 1 ] . tv_sec ) ) ;
2013-07-05 02:28:31 +00:00
2020-08-22 12:40:53 +00:00
if ( 0 = = strcmp ( path , " / " ) ) {
S3FS_PRN_ERR ( " Could not change mtime for mount point. " ) ;
return - EIO ;
}
if ( 0 ! = ( result = check_parent_object_access ( path , X_OK ) ) ) {
return result ;
}
if ( 0 ! = ( result = check_object_access ( path , W_OK , & stbuf ) ) ) {
if ( 0 ! = check_object_owner ( path , & stbuf ) ) {
return result ;
}
2013-07-05 02:28:31 +00:00
}
2020-08-22 12:40:53 +00:00
if ( S_ISDIR ( stbuf . st_mode ) ) {
result = chk_dir_object_type ( path , newpath , strpath , nowcache , & meta , & nDirType ) ;
} else {
strpath = path ;
nowcache = strpath ;
result = get_object_attribute ( strpath . c_str ( ) , NULL , & meta ) ;
}
if ( 0 ! = result ) {
return result ;
2013-07-05 02:28:31 +00:00
}
2020-08-22 12:40:53 +00:00
if ( S_ISDIR ( stbuf . st_mode ) & & IS_REPLACEDIR ( nDirType ) ) {
// Should rebuild directory object(except new type)
// Need to remove old dir("dir" etc) and make new dir("dir/")
// At first, remove directory old object
if ( 0 ! = ( result = remove_old_type_dir ( strpath , nDirType ) ) ) {
return result ;
2020-08-02 13:37:06 +00:00
}
StatCache : : getStatCacheData ( ) - > DelStat ( nowcache ) ;
2020-08-22 12:40:53 +00:00
// Make new directory object("dir/")
if ( 0 ! = ( result = create_directory_object ( newpath . c_str ( ) , stbuf . st_mode , ts [ 1 ] . tv_sec , stbuf . st_uid , stbuf . st_gid ) ) ) {
return result ;
}
2020-08-02 13:37:06 +00:00
} else {
2020-08-22 12:40:53 +00:00
headers_t updatemeta ;
updatemeta [ " x-amz-meta-mtime " ] = str ( ts [ 1 ] . tv_sec ) ;
updatemeta [ " x-amz-copy-source " ] = urlEncode ( service_path + bucket + get_realpath ( strpath . c_str ( ) ) ) ;
updatemeta [ " x-amz-metadata-directive " ] = " REPLACE " ;
// check opened file handle.
//
// If the file starts uploading by multipart when the disk capacity is insufficient,
// we need to put these header after finishing upload.
// Or if the file is only open, we must update to FdEntity's internal meta.
//
2020-09-13 07:49:25 +00:00
AutoFdEntity autoent ;
FdEntity * ent ;
if ( NULL ! = ( ent = autoent . ExistOpen ( path , - 1 , true ) ) ) {
2020-08-22 12:40:53 +00:00
// the file is opened now.
if ( ent - > MergeOrgMeta ( updatemeta ) ) {
// now uploading
// the meta is pending and accumulated to be put after the upload is complete.
S3FS_PRN_INFO ( " meta pending until upload is complete " ) ;
} else {
// allow to put header
// updatemeta already merged the orgmeta of the opened files.
if ( 0 ! = put_headers ( strpath . c_str ( ) , updatemeta , true ) ) {
return - EIO ;
}
StatCache : : getStatCacheData ( ) - > DelStat ( nowcache ) ;
}
} else {
// not opened file, then put headers
merge_headers ( meta , updatemeta , true ) ;
if ( 0 ! = put_headers ( strpath . c_str ( ) , meta , true ) ) {
return - EIO ;
}
StatCache : : getStatCacheData ( ) - > DelStat ( nowcache ) ;
}
2013-07-05 02:28:31 +00:00
}
2020-08-22 12:40:53 +00:00
S3FS_MALLOCTRIM ( 0 ) ;
2013-07-05 02:28:31 +00:00
2020-08-22 12:40:53 +00:00
return 0 ;
2013-07-05 02:28:31 +00:00
}
2019-02-18 13:36:48 +00:00
static int s3fs_utimens_nocopy ( const char * _path , const struct timespec ts [ 2 ] )
2013-07-05 02:28:31 +00:00
{
2020-08-22 12:40:53 +00:00
WTF8_ENCODE ( path )
int result ;
2020-09-11 09:37:24 +00:00
std : : string strpath ;
std : : string newpath ;
std : : string nowcache ;
2020-08-22 12:40:53 +00:00
struct stat stbuf ;
dirtype nDirType = DIRTYPE_UNKNOWN ;
S3FS_PRN_INFO1 ( " [path=%s][mtime=%lld] " , path , static_cast < long long > ( ts [ 1 ] . tv_sec ) ) ;
if ( 0 = = strcmp ( path , " / " ) ) {
S3FS_PRN_ERR ( " Could not change mtime for mount point. " ) ;
return - EIO ;
2013-07-05 02:28:31 +00:00
}
2020-08-22 12:40:53 +00:00
if ( 0 ! = ( result = check_parent_object_access ( path , X_OK ) ) ) {
return result ;
2013-07-05 02:28:31 +00:00
}
2020-08-22 12:40:53 +00:00
if ( 0 ! = ( result = check_object_access ( path , W_OK , & stbuf ) ) ) {
if ( 0 ! = check_object_owner ( path , & stbuf ) ) {
return result ;
}
2013-07-05 02:28:31 +00:00
}
2020-08-22 12:40:53 +00:00
// Get attributes
if ( S_ISDIR ( stbuf . st_mode ) ) {
result = chk_dir_object_type ( path , newpath , strpath , nowcache , NULL , & nDirType ) ;
} else {
strpath = path ;
nowcache = strpath ;
result = get_object_attribute ( strpath . c_str ( ) , NULL , NULL ) ;
2013-07-05 02:28:31 +00:00
}
2020-08-22 12:40:53 +00:00
if ( 0 ! = result ) {
return result ;
2013-07-05 02:28:31 +00:00
}
Changes codes for performance(part 3)
* Summay
This revision includes big change about temporary file and local cache file.
By this big change, s3fs works with good performance when s3fs opens/
closes/syncs/reads object.
I made a big change about the handling about temporary file and local cache
file to do this implementation.
* Detail
1) About temporary file(local file)
s3fs uses a temporary file on local file system when s3fs does download/
upload/open/seek object on S3.
After this revision, s3fs calls ftruncate() function when s3fs makes the
temporary file.
In this way s3fs can set a file size of precisely length without downloading.
(Notice - ftruncate function is for XSI-compliant systems, so that possibly
you have a problem on non-XSI-compliant systems.)
By this change, s3fs can download a part of a object by requesting with
"Range" http header. It seems like downloading by each block unit.
The default block(part) size is 50MB, it is caused the result which is default
parallel requests count(5) by default multipart upload size(10MB).
If you need to change this block size, you can change by new option
"fd_page_size". This option can take from 1MB(1024 * 1024) to any bytes.
So that, you have to take care about that fdcache.cpp(and fdcache.h) were
changed a lot.
2) About local cache
Local cache files which are in directory specified by "use_cache" option do
not have always all of object data.
This cause is that s3fs uses ftruncate function and reads(writes) each block
unit of a temporary file.
s3fs manages each block unit's status which are "downloaded area" or "not".
For this status, s3fs makes new temporary file in cache directory which is
specified by "use_cache" option. This status files is in a directory which is
named "<use_cache sirectory>/.<bucket_name>/".
When s3fs opens this status file, s3fs locks this file for exclusive control by
calling flock function. You need to take care about this, the status files can
not be laid on network drive(like NFS).
This revision changes about file open mode, s3fs always opens a local cache
file and each status file with writable mode.
Last, this revision adds new option "del_cache", this option means that s3fs
deletes all local cache file when s3fs starts and exits.
3) Uploading
When s3fs writes data to file descriptor through FUSE request, old s3fs
revision downloads all of the object. But new revision does not download all,
it downloads only small percial area(some block units) including writing data
area.
And when s3fs closes or flushes the file descriptor, s3fs downloads other area
which is not downloaded from server. After that, s3fs uploads all of data.
Already r456 revision has parallel upload function, then this revision with
r456 and r457 are very big change for performance.
4) Downloading
By changing a temporary file and a local cache file, when s3fs downloads a
object, it downloads only the required range(some block units).
And s3fs downloads units by parallel GET request, it is same as a case of
uploading. (Maximum parallel request count and each download size are
specified same parameters for uploading.)
In the new revision, when s3fs opens file, s3fs returns file descriptor soon.
Because s3fs only opens(makes) the file descriptor with no downloading
data. And when s3fs reads a data, s3fs downloads only some block unit
including specified area.
This result is good for performance.
5) Changes option name
The option "parallel_upload" which added at r456 is changed to new option
name as "parallel_count". This reason is this option value is not only used by
uploading object, but a uploading object also uses this option. (For a while,
you can use old option name "parallel_upload" for compatibility.)
git-svn-id: http://s3fs.googlecode.com/svn/trunk@458 df820570-a93a-0410-bd06-b72b767a4274
2013-07-23 16:01:48 +00:00
2020-08-22 12:40:53 +00:00
if ( S_ISDIR ( stbuf . st_mode ) ) {
// Should rebuild all directory object
// Need to remove old dir("dir" etc) and make new dir("dir/")
// At first, remove directory old object
if ( 0 ! = ( result = remove_old_type_dir ( strpath , nDirType ) ) ) {
return result ;
}
StatCache : : getStatCacheData ( ) - > DelStat ( nowcache ) ;
// Make new directory object("dir/")
if ( 0 ! = ( result = create_directory_object ( newpath . c_str ( ) , stbuf . st_mode , ts [ 1 ] . tv_sec , stbuf . st_uid , stbuf . st_gid ) ) ) {
return result ;
}
} else {
// normal object or directory object of newer version
// open & load
2020-09-13 07:49:25 +00:00
AutoFdEntity autoent ;
FdEntity * ent ;
if ( NULL = = ( ent = get_local_fent ( autoent , strpath . c_str ( ) , true ) ) ) {
2020-08-22 12:40:53 +00:00
S3FS_PRN_ERR ( " could not open and read file(%s) " , strpath . c_str ( ) ) ;
return - EIO ;
}
// set mtime
if ( 0 ! = ( result = ent - > SetMtime ( ts [ 1 ] . tv_sec ) ) ) {
S3FS_PRN_ERR ( " could not set mtime to file(%s): result=%d " , strpath . c_str ( ) , result ) ;
return result ;
}
// upload
if ( 0 ! = ( result = ent - > Flush ( true ) ) ) {
S3FS_PRN_ERR ( " could not upload file(%s): result=%d " , strpath . c_str ( ) , result ) ;
return result ;
}
StatCache : : getStatCacheData ( ) - > DelStat ( nowcache ) ;
}
S3FS_MALLOCTRIM ( 0 ) ;
2013-09-14 21:50:39 +00:00
2020-08-22 12:40:53 +00:00
return result ;
2013-07-05 02:28:31 +00:00
}
2019-02-18 13:36:48 +00:00
static int s3fs_truncate ( const char * _path , off_t size )
2013-07-05 02:28:31 +00:00
{
2020-08-22 12:40:53 +00:00
WTF8_ENCODE ( path )
2020-09-13 07:49:25 +00:00
int result ;
headers_t meta ;
AutoFdEntity autoent ;
FdEntity * ent = NULL ;
2013-01-19 16:05:07 +00:00
2020-08-22 12:40:53 +00:00
S3FS_PRN_INFO ( " [path=%s][size=%lld] " , path , static_cast < long long > ( size ) ) ;
2015-10-18 17:03:41 +00:00
2020-08-22 12:40:53 +00:00
if ( size < 0 ) {
size = 0 ;
}
Summary of Changes(1.62 -> 1.63)
1) Lifetime for the stats cache
Added the new option "stat_cache_expire".
This option which is specified by seconds means the lifetime for each stats cache entry.
If this option is not specified, the stats cache is kept in s3fs process until the stats cache grown to maximum size. (default)
If this option is specified, the stats cache entry is out from the memory when the entry expires time.
2) Enable file permission
s3fs before 1.62 did not consider the file access permission.
s3fs after this version can consider it.
For access permission, the s3fs_getattr() function was divided into sub function which can check the file access permission.
It is like access() function.
And the function calling the s3fs_getattr() calls this new sub function instead of s3fs_getattr().
Last the s3fs_opendir() function which is called by FUSE was added for checking directory access permission when listing the files in directory.
3) UID/GUID
When a file or a directory was created, the s3fs could not set the UID/GID as the user who executed a command.
(Almost the UID/GID are root, because the s3fs run by root.)
After this version, the s3fs set correct UID/GID as the user who executes the commond.
4) About the mtime
If the object does not have "x-amz-meta-mtime" meta, the s3fs uses the "Last-Modified" header instead of it.
But the s3fs had a bug in this code, and this version fixed this bug.
When user modified the file, the s3fs did not update the mtime of the file.
This version fixed this bug.
In the get_local_fd() function, the local file's mtime was changed only when s3fs run with "use_cache" option.
This version always updates the mtime whether the local cache file is used or not.
And s3fs_flush ( ) function set the mtime of local cache file from S3 object mtime, but it was wrong .
This version is that the s3fs_flush ( ) changes the mtime of S3 object from the local cache file or the tmpfile .
The s3fs cuts some requests, because the s3fs can always check mtime whether the s3fs uses or does not use the local cache file.
5) A case of no "x-amz-meta-mode"
If the object did not have "x-amz-meta-mtime" mete, the s3fs recognized the file as not regular file.
After this version, the s3fs recognizes the file as regular file.
6) "." and ".." directory
The s3fs_readdir() did not return "X" and "XX" directory name.
After this version, the s3fs is changed that it returns "X" and "XX".
Example, the result of "ls" lists "X" and "XX" directory.
7) Fixed a bug
The insert_object() had a bug, and it is fixed.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@390 df820570-a93a-0410-bd06-b72b767a4274
2013-02-24 08:58:54 +00:00
2020-08-22 12:40:53 +00:00
if ( 0 ! = ( result = check_parent_object_access ( path , X_OK ) ) ) {
return result ;
2013-01-19 16:05:07 +00:00
}
2020-08-22 12:40:53 +00:00
if ( 0 ! = ( result = check_object_access ( path , W_OK , NULL ) ) ) {
return result ;
2013-01-19 16:05:07 +00:00
}
2020-08-22 12:40:53 +00:00
// Get file information
if ( 0 = = ( result = get_object_attribute ( path , NULL , & meta ) ) ) {
// Exists -> Get file(with size)
2020-09-13 07:49:25 +00:00
if ( NULL = = ( ent = autoent . Open ( path , & meta , size , - 1 , false , true ) ) ) {
2020-08-22 12:40:53 +00:00
S3FS_PRN_ERR ( " could not open file(%s): errno=%d " , path , errno ) ;
return - EIO ;
}
if ( 0 ! = ( result = ent - > Load ( 0 , size ) ) ) {
S3FS_PRN_ERR ( " could not download file(%s): result=%d " , path , result ) ;
return result ;
}
2015-10-18 17:03:41 +00:00
2020-08-22 12:40:53 +00:00
} else {
// Not found -> Make tmpfile(with size)
2015-10-18 17:03:41 +00:00
2020-08-22 12:40:53 +00:00
struct fuse_context * pcxt ;
if ( NULL = = ( pcxt = fuse_get_context ( ) ) ) {
return - EIO ;
}
time_t now = time ( NULL ) ;
2020-09-11 09:37:24 +00:00
meta [ " Content-Type " ] = std : : string ( " application/octet-stream " ) ; // Static
2020-08-22 12:40:53 +00:00
meta [ " x-amz-meta-mode " ] = str ( S_IFLNK | S_IRWXU | S_IRWXG | S_IRWXO ) ;
meta [ " x-amz-meta-ctime " ] = str ( now ) ;
meta [ " x-amz-meta-mtime " ] = str ( now ) ;
meta [ " x-amz-meta-uid " ] = str ( pcxt - > uid ) ;
meta [ " x-amz-meta-gid " ] = str ( pcxt - > gid ) ;
2020-09-13 07:49:25 +00:00
if ( NULL = = ( ent = autoent . Open ( path , & meta , size , - 1 , true , true ) ) ) {
2020-08-22 12:40:53 +00:00
S3FS_PRN_ERR ( " could not open file(%s): errno=%d " , path , errno ) ;
return - EIO ;
}
2013-01-19 16:05:07 +00:00
}
2011-03-10 00:11:55 +00:00
2020-08-22 12:40:53 +00:00
// upload
if ( 0 ! = ( result = ent - > Flush ( true ) ) ) {
S3FS_PRN_ERR ( " could not upload file(%s): result=%d " , path , result ) ;
return result ;
}
Changes codes for performance(part 3)
* Summay
This revision includes big change about temporary file and local cache file.
By this big change, s3fs works with good performance when s3fs opens/
closes/syncs/reads object.
I made a big change about the handling about temporary file and local cache
file to do this implementation.
* Detail
1) About temporary file(local file)
s3fs uses a temporary file on local file system when s3fs does download/
upload/open/seek object on S3.
After this revision, s3fs calls ftruncate() function when s3fs makes the
temporary file.
In this way s3fs can set a file size of precisely length without downloading.
(Notice - ftruncate function is for XSI-compliant systems, so that possibly
you have a problem on non-XSI-compliant systems.)
By this change, s3fs can download a part of a object by requesting with
"Range" http header. It seems like downloading by each block unit.
The default block(part) size is 50MB, it is caused the result which is default
parallel requests count(5) by default multipart upload size(10MB).
If you need to change this block size, you can change by new option
"fd_page_size". This option can take from 1MB(1024 * 1024) to any bytes.
So that, you have to take care about that fdcache.cpp(and fdcache.h) were
changed a lot.
2) About local cache
Local cache files which are in directory specified by "use_cache" option do
not have always all of object data.
This cause is that s3fs uses ftruncate function and reads(writes) each block
unit of a temporary file.
s3fs manages each block unit's status which are "downloaded area" or "not".
For this status, s3fs makes new temporary file in cache directory which is
specified by "use_cache" option. This status files is in a directory which is
named "<use_cache sirectory>/.<bucket_name>/".
When s3fs opens this status file, s3fs locks this file for exclusive control by
calling flock function. You need to take care about this, the status files can
not be laid on network drive(like NFS).
This revision changes about file open mode, s3fs always opens a local cache
file and each status file with writable mode.
Last, this revision adds new option "del_cache", this option means that s3fs
deletes all local cache file when s3fs starts and exits.
3) Uploading
When s3fs writes data to file descriptor through FUSE request, old s3fs
revision downloads all of the object. But new revision does not download all,
it downloads only small percial area(some block units) including writing data
area.
And when s3fs closes or flushes the file descriptor, s3fs downloads other area
which is not downloaded from server. After that, s3fs uploads all of data.
Already r456 revision has parallel upload function, then this revision with
r456 and r457 are very big change for performance.
4) Downloading
By changing a temporary file and a local cache file, when s3fs downloads a
object, it downloads only the required range(some block units).
And s3fs downloads units by parallel GET request, it is same as a case of
uploading. (Maximum parallel request count and each download size are
specified same parameters for uploading.)
In the new revision, when s3fs opens file, s3fs returns file descriptor soon.
Because s3fs only opens(makes) the file descriptor with no downloading
data. And when s3fs reads a data, s3fs downloads only some block unit
including specified area.
This result is good for performance.
5) Changes option name
The option "parallel_upload" which added at r456 is changed to new option
name as "parallel_count". This reason is this option value is not only used by
uploading object, but a uploading object also uses this option. (For a while,
you can use old option name "parallel_upload" for compatibility.)
git-svn-id: http://s3fs.googlecode.com/svn/trunk@458 df820570-a93a-0410-bd06-b72b767a4274
2013-07-23 16:01:48 +00:00
2020-08-22 12:40:53 +00:00
StatCache : : getStatCacheData ( ) - > DelStat ( path ) ;
S3FS_MALLOCTRIM ( 0 ) ;
2013-04-06 17:39:22 +00:00
2020-08-22 12:40:53 +00:00
return result ;
2010-11-13 23:59:23 +00:00
}
2019-02-18 13:36:48 +00:00
static int s3fs_open ( const char * _path , struct fuse_file_info * fi )
2013-07-05 02:28:31 +00:00
{
2020-08-22 12:40:53 +00:00
WTF8_ENCODE ( path )
int result ;
struct stat st ;
bool needs_flush = false ;
S3FS_PRN_INFO ( " [path=%s][flags=0x%x] " , path , fi - > flags ) ;
2011-06-26 00:37:52 +00:00
2020-08-22 12:40:53 +00:00
// clear stat for reading fresh stat.
// (if object stat is changed, we refresh it. then s3fs gets always
// stat when s3fs open the object).
if ( StatCache : : getStatCacheData ( ) - > HasStat ( path ) ) {
// flush any dirty data so that subsequent stat gets correct size
if ( ( result = s3fs_flush ( _path , fi ) ) ! = 0 ) {
S3FS_PRN_ERR ( " could not flush(%s): result=%d " , path , result ) ;
}
StatCache : : getStatCacheData ( ) - > DelStat ( path ) ;
}
2011-01-19 05:26:01 +00:00
2020-08-22 12:40:53 +00:00
int mask = ( O_RDONLY ! = ( fi - > flags & O_ACCMODE ) ? W_OK : R_OK ) ;
if ( 0 ! = ( result = check_parent_object_access ( path , X_OK ) ) ) {
return result ;
2019-05-05 00:05:28 +00:00
}
2013-09-17 05:16:30 +00:00
2020-08-22 12:40:53 +00:00
result = check_object_access ( path , mask , & st ) ;
if ( - ENOENT = = result ) {
if ( 0 ! = ( result = check_parent_object_access ( path , W_OK ) ) ) {
return result ;
}
} else if ( 0 ! = result ) {
return result ;
}
Changes codes for performance(part 3)
* Summay
This revision includes big change about temporary file and local cache file.
By this big change, s3fs works with good performance when s3fs opens/
closes/syncs/reads object.
I made a big change about the handling about temporary file and local cache
file to do this implementation.
* Detail
1) About temporary file(local file)
s3fs uses a temporary file on local file system when s3fs does download/
upload/open/seek object on S3.
After this revision, s3fs calls ftruncate() function when s3fs makes the
temporary file.
In this way s3fs can set a file size of precisely length without downloading.
(Notice - ftruncate function is for XSI-compliant systems, so that possibly
you have a problem on non-XSI-compliant systems.)
By this change, s3fs can download a part of a object by requesting with
"Range" http header. It seems like downloading by each block unit.
The default block(part) size is 50MB, it is caused the result which is default
parallel requests count(5) by default multipart upload size(10MB).
If you need to change this block size, you can change by new option
"fd_page_size". This option can take from 1MB(1024 * 1024) to any bytes.
So that, you have to take care about that fdcache.cpp(and fdcache.h) were
changed a lot.
2) About local cache
Local cache files which are in directory specified by "use_cache" option do
not have always all of object data.
This cause is that s3fs uses ftruncate function and reads(writes) each block
unit of a temporary file.
s3fs manages each block unit's status which are "downloaded area" or "not".
For this status, s3fs makes new temporary file in cache directory which is
specified by "use_cache" option. This status files is in a directory which is
named "<use_cache sirectory>/.<bucket_name>/".
When s3fs opens this status file, s3fs locks this file for exclusive control by
calling flock function. You need to take care about this, the status files can
not be laid on network drive(like NFS).
This revision changes about file open mode, s3fs always opens a local cache
file and each status file with writable mode.
Last, this revision adds new option "del_cache", this option means that s3fs
deletes all local cache file when s3fs starts and exits.
3) Uploading
When s3fs writes data to file descriptor through FUSE request, old s3fs
revision downloads all of the object. But new revision does not download all,
it downloads only small percial area(some block units) including writing data
area.
And when s3fs closes or flushes the file descriptor, s3fs downloads other area
which is not downloaded from server. After that, s3fs uploads all of data.
Already r456 revision has parallel upload function, then this revision with
r456 and r457 are very big change for performance.
4) Downloading
By changing a temporary file and a local cache file, when s3fs downloads a
object, it downloads only the required range(some block units).
And s3fs downloads units by parallel GET request, it is same as a case of
uploading. (Maximum parallel request count and each download size are
specified same parameters for uploading.)
In the new revision, when s3fs opens file, s3fs returns file descriptor soon.
Because s3fs only opens(makes) the file descriptor with no downloading
data. And when s3fs reads a data, s3fs downloads only some block unit
including specified area.
This result is good for performance.
5) Changes option name
The option "parallel_upload" which added at r456 is changed to new option
name as "parallel_count". This reason is this option value is not only used by
uploading object, but a uploading object also uses this option. (For a while,
you can use old option name "parallel_upload" for compatibility.)
git-svn-id: http://s3fs.googlecode.com/svn/trunk@458 df820570-a93a-0410-bd06-b72b767a4274
2013-07-23 16:01:48 +00:00
2020-08-22 12:40:53 +00:00
if ( ( unsigned int ) fi - > flags & O_TRUNC ) {
if ( 0 ! = st . st_size ) {
st . st_size = 0 ;
needs_flush = true ;
}
2013-04-06 17:39:22 +00:00
}
2020-08-22 12:40:53 +00:00
if ( ! S_ISREG ( st . st_mode ) | | S_ISLNK ( st . st_mode ) ) {
st . st_mtime = - 1 ;
}
2020-09-13 07:49:25 +00:00
AutoFdEntity autoent ;
FdEntity * ent ;
headers_t meta ;
2020-08-22 12:40:53 +00:00
get_object_attribute ( path , NULL , & meta , true , NULL , true ) ; // no truncate cache
2020-09-13 07:49:25 +00:00
if ( NULL = = ( ent = autoent . Open ( path , & meta , st . st_size , st . st_mtime , false , true ) ) ) {
2020-08-22 12:40:53 +00:00
StatCache : : getStatCacheData ( ) - > DelStat ( path ) ;
return - EIO ;
2015-11-04 06:04:16 +00:00
}
2020-08-22 12:40:53 +00:00
if ( needs_flush ) {
if ( 0 ! = ( result = ent - > RowFlush ( path , true ) ) ) {
S3FS_PRN_ERR ( " could not upload file(%s): result=%d " , path , result ) ;
StatCache : : getStatCacheData ( ) - > DelStat ( path ) ;
return result ;
}
}
2020-09-13 07:49:25 +00:00
autoent . Detach ( ) ; // KEEP fdentity open
2020-08-22 12:40:53 +00:00
fi - > fh = ent - > GetFd ( ) ;
2020-09-13 07:49:25 +00:00
2020-08-22 12:40:53 +00:00
S3FS_MALLOCTRIM ( 0 ) ;
return 0 ;
2010-11-13 23:59:23 +00:00
}
2019-02-18 13:36:48 +00:00
static int s3fs_read ( const char * _path , char * buf , size_t size , off_t offset , struct fuse_file_info * fi )
2013-07-05 02:28:31 +00:00
{
2020-08-22 12:40:53 +00:00
WTF8_ENCODE ( path )
ssize_t res ;
Changes codes for performance(part 3)
* Summay
This revision includes big change about temporary file and local cache file.
By this big change, s3fs works with good performance when s3fs opens/
closes/syncs/reads object.
I made a big change about the handling about temporary file and local cache
file to do this implementation.
* Detail
1) About temporary file(local file)
s3fs uses a temporary file on local file system when s3fs does download/
upload/open/seek object on S3.
After this revision, s3fs calls ftruncate() function when s3fs makes the
temporary file.
In this way s3fs can set a file size of precisely length without downloading.
(Notice - ftruncate function is for XSI-compliant systems, so that possibly
you have a problem on non-XSI-compliant systems.)
By this change, s3fs can download a part of a object by requesting with
"Range" http header. It seems like downloading by each block unit.
The default block(part) size is 50MB, it is caused the result which is default
parallel requests count(5) by default multipart upload size(10MB).
If you need to change this block size, you can change by new option
"fd_page_size". This option can take from 1MB(1024 * 1024) to any bytes.
So that, you have to take care about that fdcache.cpp(and fdcache.h) were
changed a lot.
2) About local cache
Local cache files which are in directory specified by "use_cache" option do
not have always all of object data.
This cause is that s3fs uses ftruncate function and reads(writes) each block
unit of a temporary file.
s3fs manages each block unit's status which are "downloaded area" or "not".
For this status, s3fs makes new temporary file in cache directory which is
specified by "use_cache" option. This status files is in a directory which is
named "<use_cache sirectory>/.<bucket_name>/".
When s3fs opens this status file, s3fs locks this file for exclusive control by
calling flock function. You need to take care about this, the status files can
not be laid on network drive(like NFS).
This revision changes about file open mode, s3fs always opens a local cache
file and each status file with writable mode.
Last, this revision adds new option "del_cache", this option means that s3fs
deletes all local cache file when s3fs starts and exits.
3) Uploading
When s3fs writes data to file descriptor through FUSE request, old s3fs
revision downloads all of the object. But new revision does not download all,
it downloads only small percial area(some block units) including writing data
area.
And when s3fs closes or flushes the file descriptor, s3fs downloads other area
which is not downloaded from server. After that, s3fs uploads all of data.
Already r456 revision has parallel upload function, then this revision with
r456 and r457 are very big change for performance.
4) Downloading
By changing a temporary file and a local cache file, when s3fs downloads a
object, it downloads only the required range(some block units).
And s3fs downloads units by parallel GET request, it is same as a case of
uploading. (Maximum parallel request count and each download size are
specified same parameters for uploading.)
In the new revision, when s3fs opens file, s3fs returns file descriptor soon.
Because s3fs only opens(makes) the file descriptor with no downloading
data. And when s3fs reads a data, s3fs downloads only some block unit
including specified area.
This result is good for performance.
5) Changes option name
The option "parallel_upload" which added at r456 is changed to new option
name as "parallel_count". This reason is this option value is not only used by
uploading object, but a uploading object also uses this option. (For a while,
you can use old option name "parallel_upload" for compatibility.)
git-svn-id: http://s3fs.googlecode.com/svn/trunk@458 df820570-a93a-0410-bd06-b72b767a4274
2013-07-23 16:01:48 +00:00
2020-08-22 12:40:53 +00:00
S3FS_PRN_DBG ( " [path=%s][size=%zu][offset=%lld][fd=%llu] " , path , size , static_cast < long long > ( offset ) , ( unsigned long long ) ( fi - > fh ) ) ;
Changes codes for performance(part 3)
* Summay
This revision includes big change about temporary file and local cache file.
By this big change, s3fs works with good performance when s3fs opens/
closes/syncs/reads object.
I made a big change about the handling about temporary file and local cache
file to do this implementation.
* Detail
1) About temporary file(local file)
s3fs uses a temporary file on local file system when s3fs does download/
upload/open/seek object on S3.
After this revision, s3fs calls ftruncate() function when s3fs makes the
temporary file.
In this way s3fs can set a file size of precisely length without downloading.
(Notice - ftruncate function is for XSI-compliant systems, so that possibly
you have a problem on non-XSI-compliant systems.)
By this change, s3fs can download a part of a object by requesting with
"Range" http header. It seems like downloading by each block unit.
The default block(part) size is 50MB, it is caused the result which is default
parallel requests count(5) by default multipart upload size(10MB).
If you need to change this block size, you can change by new option
"fd_page_size". This option can take from 1MB(1024 * 1024) to any bytes.
So that, you have to take care about that fdcache.cpp(and fdcache.h) were
changed a lot.
2) About local cache
Local cache files which are in directory specified by "use_cache" option do
not have always all of object data.
This cause is that s3fs uses ftruncate function and reads(writes) each block
unit of a temporary file.
s3fs manages each block unit's status which are "downloaded area" or "not".
For this status, s3fs makes new temporary file in cache directory which is
specified by "use_cache" option. This status files is in a directory which is
named "<use_cache sirectory>/.<bucket_name>/".
When s3fs opens this status file, s3fs locks this file for exclusive control by
calling flock function. You need to take care about this, the status files can
not be laid on network drive(like NFS).
This revision changes about file open mode, s3fs always opens a local cache
file and each status file with writable mode.
Last, this revision adds new option "del_cache", this option means that s3fs
deletes all local cache file when s3fs starts and exits.
3) Uploading
When s3fs writes data to file descriptor through FUSE request, old s3fs
revision downloads all of the object. But new revision does not download all,
it downloads only small percial area(some block units) including writing data
area.
And when s3fs closes or flushes the file descriptor, s3fs downloads other area
which is not downloaded from server. After that, s3fs uploads all of data.
Already r456 revision has parallel upload function, then this revision with
r456 and r457 are very big change for performance.
4) Downloading
By changing a temporary file and a local cache file, when s3fs downloads a
object, it downloads only the required range(some block units).
And s3fs downloads units by parallel GET request, it is same as a case of
uploading. (Maximum parallel request count and each download size are
specified same parameters for uploading.)
In the new revision, when s3fs opens file, s3fs returns file descriptor soon.
Because s3fs only opens(makes) the file descriptor with no downloading
data. And when s3fs reads a data, s3fs downloads only some block unit
including specified area.
This result is good for performance.
5) Changes option name
The option "parallel_upload" which added at r456 is changed to new option
name as "parallel_count". This reason is this option value is not only used by
uploading object, but a uploading object also uses this option. (For a while,
you can use old option name "parallel_upload" for compatibility.)
git-svn-id: http://s3fs.googlecode.com/svn/trunk@458 df820570-a93a-0410-bd06-b72b767a4274
2013-07-23 16:01:48 +00:00
2020-09-13 07:49:25 +00:00
AutoFdEntity autoent ;
FdEntity * ent ;
if ( NULL = = ( ent = autoent . ExistOpen ( path , static_cast < int > ( fi - > fh ) ) ) ) {
2020-08-22 12:40:53 +00:00
S3FS_PRN_ERR ( " could not find opened fd(%s) " , path ) ;
return - EIO ;
}
if ( ent - > GetFd ( ) ! = static_cast < int > ( fi - > fh ) ) {
S3FS_PRN_WARN ( " different fd(%d - %llu) " , ent - > GetFd ( ) , ( unsigned long long ) ( fi - > fh ) ) ;
}
2010-11-13 23:59:23 +00:00
2020-08-22 12:40:53 +00:00
// check real file size
off_t realsize = 0 ;
if ( ! ent - > GetSize ( realsize ) | | 0 = = realsize ) {
S3FS_PRN_DBG ( " file size is 0, so break to read. " ) ;
return 0 ;
}
if ( 0 > ( res = ent - > Read ( buf , offset , size , false ) ) ) {
S3FS_PRN_WARN ( " failed to read file(%s). result=%zd " , path , res ) ;
}
return static_cast < int > ( res ) ;
}
static int s3fs_write ( const char * _path , const char * buf , size_t size , off_t offset , struct fuse_file_info * fi )
{
WTF8_ENCODE ( path )
ssize_t res ;
S3FS_PRN_DBG ( " [path=%s][size=%zu][offset=%lld][fd=%llu] " , path , size , static_cast < long long int > ( offset ) , ( unsigned long long ) ( fi - > fh ) ) ;
2020-09-13 07:49:25 +00:00
AutoFdEntity autoent ;
FdEntity * ent ;
if ( NULL = = ( ent = autoent . ExistOpen ( path , static_cast < int > ( fi - > fh ) ) ) ) {
2020-08-22 12:40:53 +00:00
S3FS_PRN_ERR ( " could not find opened fd(%s) " , path ) ;
return - EIO ;
}
if ( ent - > GetFd ( ) ! = static_cast < int > ( fi - > fh ) ) {
S3FS_PRN_WARN ( " different fd(%d - %llu) " , ent - > GetFd ( ) , ( unsigned long long ) ( fi - > fh ) ) ;
}
if ( 0 > ( res = ent - > Write ( buf , offset , size ) ) ) {
S3FS_PRN_WARN ( " failed to write file(%s). result=%zd " , path , res ) ;
}
return static_cast < int > ( res ) ;
}
2010-11-13 23:59:23 +00:00
2019-02-18 13:36:48 +00:00
static int s3fs_statfs ( const char * _path , struct statvfs * stbuf )
2013-07-05 02:28:31 +00:00
{
2020-08-22 12:40:53 +00:00
// WTF8_ENCODE(path)
// 256T
stbuf - > f_bsize = 0X1000000 ;
stbuf - > f_blocks = 0X1000000 ;
stbuf - > f_bfree = 0x1000000 ;
stbuf - > f_bavail = 0x1000000 ;
stbuf - > f_namemax = NAME_MAX ;
return 0 ;
2010-11-13 23:59:23 +00:00
}
2019-02-18 13:36:48 +00:00
static int s3fs_flush ( const char * _path , struct fuse_file_info * fi )
2013-07-05 02:28:31 +00:00
{
2020-08-22 12:40:53 +00:00
WTF8_ENCODE ( path )
int result ;
2010-12-09 20:56:29 +00:00
2020-08-22 12:40:53 +00:00
S3FS_PRN_INFO ( " [path=%s][fd=%llu] " , path , ( unsigned long long ) ( fi - > fh ) ) ;
2010-12-09 20:56:29 +00:00
2020-08-22 12:40:53 +00:00
int mask = ( O_RDONLY ! = ( fi - > flags & O_ACCMODE ) ? W_OK : R_OK ) ;
if ( 0 ! = ( result = check_parent_object_access ( path , X_OK ) ) ) {
return result ;
}
result = check_object_access ( path , mask , NULL ) ;
if ( - ENOENT = = result ) {
if ( 0 ! = ( result = check_parent_object_access ( path , W_OK ) ) ) {
return result ;
}
} else if ( 0 ! = result ) {
return result ;
2013-04-06 17:39:22 +00:00
}
Summary of Changes(1.62 -> 1.63)
1) Lifetime for the stats cache
Added the new option "stat_cache_expire".
This option which is specified by seconds means the lifetime for each stats cache entry.
If this option is not specified, the stats cache is kept in s3fs process until the stats cache grown to maximum size. (default)
If this option is specified, the stats cache entry is out from the memory when the entry expires time.
2) Enable file permission
s3fs before 1.62 did not consider the file access permission.
s3fs after this version can consider it.
For access permission, the s3fs_getattr() function was divided into sub function which can check the file access permission.
It is like access() function.
And the function calling the s3fs_getattr() calls this new sub function instead of s3fs_getattr().
Last the s3fs_opendir() function which is called by FUSE was added for checking directory access permission when listing the files in directory.
3) UID/GUID
When a file or a directory was created, the s3fs could not set the UID/GID as the user who executed a command.
(Almost the UID/GID are root, because the s3fs run by root.)
After this version, the s3fs set correct UID/GID as the user who executes the commond.
4) About the mtime
If the object does not have "x-amz-meta-mtime" meta, the s3fs uses the "Last-Modified" header instead of it.
But the s3fs had a bug in this code, and this version fixed this bug.
When user modified the file, the s3fs did not update the mtime of the file.
This version fixed this bug.
In the get_local_fd() function, the local file's mtime was changed only when s3fs run with "use_cache" option.
This version always updates the mtime whether the local cache file is used or not.
And s3fs_flush ( ) function set the mtime of local cache file from S3 object mtime, but it was wrong .
This version is that the s3fs_flush ( ) changes the mtime of S3 object from the local cache file or the tmpfile .
The s3fs cuts some requests, because the s3fs can always check mtime whether the s3fs uses or does not use the local cache file.
5) A case of no "x-amz-meta-mode"
If the object did not have "x-amz-meta-mtime" mete, the s3fs recognized the file as not regular file.
After this version, the s3fs recognizes the file as regular file.
6) "." and ".." directory
The s3fs_readdir() did not return "X" and "XX" directory name.
After this version, the s3fs is changed that it returns "X" and "XX".
Example, the result of "ls" lists "X" and "XX" directory.
7) Fixed a bug
The insert_object() had a bug, and it is fixed.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@390 df820570-a93a-0410-bd06-b72b767a4274
2013-02-24 08:58:54 +00:00
2020-09-13 07:49:25 +00:00
AutoFdEntity autoent ;
FdEntity * ent ;
if ( NULL ! = ( ent = autoent . ExistOpen ( path , static_cast < int > ( fi - > fh ) ) ) ) {
2020-08-22 12:40:53 +00:00
ent - > UpdateMtime ( ) ;
result = ent - > Flush ( false ) ;
}
S3FS_MALLOCTRIM ( 0 ) ;
2013-09-14 21:50:39 +00:00
2020-08-22 12:40:53 +00:00
return result ;
2010-11-13 23:59:23 +00:00
}
2015-03-10 16:18:03 +00:00
// [NOTICE]
// Assumption is a valid fd.
//
2019-02-18 13:36:48 +00:00
static int s3fs_fsync ( const char * _path , int datasync , struct fuse_file_info * fi )
2015-03-10 16:18:03 +00:00
{
2020-08-22 12:40:53 +00:00
WTF8_ENCODE ( path )
int result = 0 ;
2015-03-10 16:18:03 +00:00
2020-08-22 12:40:53 +00:00
S3FS_PRN_INFO ( " [path=%s][fd=%llu] " , path , ( unsigned long long ) ( fi - > fh ) ) ;
2015-03-10 16:18:03 +00:00
2020-09-13 07:49:25 +00:00
AutoFdEntity autoent ;
FdEntity * ent ;
if ( NULL ! = ( ent = autoent . ExistOpen ( path , static_cast < int > ( fi - > fh ) ) ) ) {
2020-08-22 12:40:53 +00:00
if ( 0 = = datasync ) {
ent - > UpdateMtime ( ) ;
}
result = ent - > Flush ( false ) ;
2015-03-10 16:18:03 +00:00
}
2020-08-22 12:40:53 +00:00
S3FS_MALLOCTRIM ( 0 ) ;
2015-03-10 16:18:03 +00:00
2020-08-22 12:40:53 +00:00
// Issue 320: Delete stat cache entry because st_size may have changed.
StatCache : : getStatCacheData ( ) - > DelStat ( path ) ;
2015-12-18 23:39:25 +00:00
2020-08-22 12:40:53 +00:00
return result ;
2015-03-10 16:18:03 +00:00
}
2019-02-18 13:36:48 +00:00
static int s3fs_release ( const char * _path , struct fuse_file_info * fi )
2013-03-30 13:37:14 +00:00
{
2020-08-22 12:40:53 +00:00
WTF8_ENCODE ( path )
S3FS_PRN_INFO ( " [path=%s][fd=%llu] " , path , ( unsigned long long ) ( fi - > fh ) ) ;
// [NOTE]
// All opened file's stats is cached with no truncate flag.
// Thus we unset it here.
2020-09-11 09:37:24 +00:00
StatCache : : getStatCacheData ( ) - > ChangeNoTruncateFlag ( std : : string ( path ) , false ) ;
2020-08-22 12:40:53 +00:00
// [NOTICE]
// At first, we remove stats cache.
// Because fuse does not wait for response from "release" function. :-(
// And fuse runs next command before this function returns.
// Thus we call deleting stats function ASSAP.
//
if ( ( fi - > flags & O_RDWR ) | | ( fi - > flags & O_WRONLY ) ) {
StatCache : : getStatCacheData ( ) - > DelStat ( path ) ;
}
2020-09-13 07:49:25 +00:00
{ // scope for AutoFdEntity
AutoFdEntity autoent ;
FdEntity * ent ;
2020-08-22 12:40:53 +00:00
2020-09-13 07:49:25 +00:00
// [NOTE]
// The number of references to fdEntity corresponding to fi-> fh is already incremented
// when it is opened. Therefore, when an existing fdEntity is detected here, the reference
// count must not be incremented. And if detected, the number of references incremented
// when opened will be decremented when the AutoFdEntity object is subsequently destroyed.
//
if ( NULL = = ( ent = autoent . GetFdEntity ( path , static_cast < int > ( fi - > fh ) , false ) ) ) {
S3FS_PRN_ERR ( " could not find fd(file=%s) " , path ) ;
return - EIO ;
}
if ( ent - > GetFd ( ) ! = static_cast < int > ( fi - > fh ) ) {
S3FS_PRN_WARN ( " different fd(%d - %llu) " , ent - > GetFd ( ) , ( unsigned long long ) ( fi - > fh ) ) ;
}
}
2020-08-22 12:40:53 +00:00
// check - for debug
if ( IS_S3FS_LOG_DBG ( ) ) {
2020-09-13 07:49:25 +00:00
AutoFdEntity autoent ;
FdEntity * ent ;
if ( NULL ! = ( ent = autoent . GetFdEntity ( path , static_cast < int > ( fi - > fh ) ) ) ) {
2020-08-22 12:40:53 +00:00
S3FS_PRN_WARN ( " file(%s),fd(%d) is still opened. " , path , ent - > GetFd ( ) ) ;
}
}
S3FS_MALLOCTRIM ( 0 ) ;
return 0 ;
2010-11-13 23:59:23 +00:00
}
2019-02-18 13:36:48 +00:00
static int s3fs_opendir ( const char * _path , struct fuse_file_info * fi )
Summary of Changes(1.62 -> 1.63)
1) Lifetime for the stats cache
Added the new option "stat_cache_expire".
This option which is specified by seconds means the lifetime for each stats cache entry.
If this option is not specified, the stats cache is kept in s3fs process until the stats cache grown to maximum size. (default)
If this option is specified, the stats cache entry is out from the memory when the entry expires time.
2) Enable file permission
s3fs before 1.62 did not consider the file access permission.
s3fs after this version can consider it.
For access permission, the s3fs_getattr() function was divided into sub function which can check the file access permission.
It is like access() function.
And the function calling the s3fs_getattr() calls this new sub function instead of s3fs_getattr().
Last the s3fs_opendir() function which is called by FUSE was added for checking directory access permission when listing the files in directory.
3) UID/GUID
When a file or a directory was created, the s3fs could not set the UID/GID as the user who executed a command.
(Almost the UID/GID are root, because the s3fs run by root.)
After this version, the s3fs set correct UID/GID as the user who executes the commond.
4) About the mtime
If the object does not have "x-amz-meta-mtime" meta, the s3fs uses the "Last-Modified" header instead of it.
But the s3fs had a bug in this code, and this version fixed this bug.
When user modified the file, the s3fs did not update the mtime of the file.
This version fixed this bug.
In the get_local_fd() function, the local file's mtime was changed only when s3fs run with "use_cache" option.
This version always updates the mtime whether the local cache file is used or not.
And s3fs_flush ( ) function set the mtime of local cache file from S3 object mtime, but it was wrong .
This version is that the s3fs_flush ( ) changes the mtime of S3 object from the local cache file or the tmpfile .
The s3fs cuts some requests, because the s3fs can always check mtime whether the s3fs uses or does not use the local cache file.
5) A case of no "x-amz-meta-mode"
If the object did not have "x-amz-meta-mtime" mete, the s3fs recognized the file as not regular file.
After this version, the s3fs recognizes the file as regular file.
6) "." and ".." directory
The s3fs_readdir() did not return "X" and "XX" directory name.
After this version, the s3fs is changed that it returns "X" and "XX".
Example, the result of "ls" lists "X" and "XX" directory.
7) Fixed a bug
The insert_object() had a bug, and it is fixed.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@390 df820570-a93a-0410-bd06-b72b767a4274
2013-02-24 08:58:54 +00:00
{
2020-08-22 12:40:53 +00:00
WTF8_ENCODE ( path )
int result ;
int mask = ( O_RDONLY ! = ( fi - > flags & O_ACCMODE ) ? W_OK : R_OK ) | X_OK ;
Summary of Changes(1.62 -> 1.63)
1) Lifetime for the stats cache
Added the new option "stat_cache_expire".
This option which is specified by seconds means the lifetime for each stats cache entry.
If this option is not specified, the stats cache is kept in s3fs process until the stats cache grown to maximum size. (default)
If this option is specified, the stats cache entry is out from the memory when the entry expires time.
2) Enable file permission
s3fs before 1.62 did not consider the file access permission.
s3fs after this version can consider it.
For access permission, the s3fs_getattr() function was divided into sub function which can check the file access permission.
It is like access() function.
And the function calling the s3fs_getattr() calls this new sub function instead of s3fs_getattr().
Last the s3fs_opendir() function which is called by FUSE was added for checking directory access permission when listing the files in directory.
3) UID/GUID
When a file or a directory was created, the s3fs could not set the UID/GID as the user who executed a command.
(Almost the UID/GID are root, because the s3fs run by root.)
After this version, the s3fs set correct UID/GID as the user who executes the commond.
4) About the mtime
If the object does not have "x-amz-meta-mtime" meta, the s3fs uses the "Last-Modified" header instead of it.
But the s3fs had a bug in this code, and this version fixed this bug.
When user modified the file, the s3fs did not update the mtime of the file.
This version fixed this bug.
In the get_local_fd() function, the local file's mtime was changed only when s3fs run with "use_cache" option.
This version always updates the mtime whether the local cache file is used or not.
And s3fs_flush ( ) function set the mtime of local cache file from S3 object mtime, but it was wrong .
This version is that the s3fs_flush ( ) changes the mtime of S3 object from the local cache file or the tmpfile .
The s3fs cuts some requests, because the s3fs can always check mtime whether the s3fs uses or does not use the local cache file.
5) A case of no "x-amz-meta-mode"
If the object did not have "x-amz-meta-mtime" mete, the s3fs recognized the file as not regular file.
After this version, the s3fs recognizes the file as regular file.
6) "." and ".." directory
The s3fs_readdir() did not return "X" and "XX" directory name.
After this version, the s3fs is changed that it returns "X" and "XX".
Example, the result of "ls" lists "X" and "XX" directory.
7) Fixed a bug
The insert_object() had a bug, and it is fixed.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@390 df820570-a93a-0410-bd06-b72b767a4274
2013-02-24 08:58:54 +00:00
2020-08-22 12:40:53 +00:00
S3FS_PRN_INFO ( " [path=%s][flags=0x%x] " , path , fi - > flags ) ;
Summary of Changes(1.62 -> 1.63)
1) Lifetime for the stats cache
Added the new option "stat_cache_expire".
This option which is specified by seconds means the lifetime for each stats cache entry.
If this option is not specified, the stats cache is kept in s3fs process until the stats cache grown to maximum size. (default)
If this option is specified, the stats cache entry is out from the memory when the entry expires time.
2) Enable file permission
s3fs before 1.62 did not consider the file access permission.
s3fs after this version can consider it.
For access permission, the s3fs_getattr() function was divided into sub function which can check the file access permission.
It is like access() function.
And the function calling the s3fs_getattr() calls this new sub function instead of s3fs_getattr().
Last the s3fs_opendir() function which is called by FUSE was added for checking directory access permission when listing the files in directory.
3) UID/GUID
When a file or a directory was created, the s3fs could not set the UID/GID as the user who executed a command.
(Almost the UID/GID are root, because the s3fs run by root.)
After this version, the s3fs set correct UID/GID as the user who executes the commond.
4) About the mtime
If the object does not have "x-amz-meta-mtime" meta, the s3fs uses the "Last-Modified" header instead of it.
But the s3fs had a bug in this code, and this version fixed this bug.
When user modified the file, the s3fs did not update the mtime of the file.
This version fixed this bug.
In the get_local_fd() function, the local file's mtime was changed only when s3fs run with "use_cache" option.
This version always updates the mtime whether the local cache file is used or not.
And s3fs_flush ( ) function set the mtime of local cache file from S3 object mtime, but it was wrong .
This version is that the s3fs_flush ( ) changes the mtime of S3 object from the local cache file or the tmpfile .
The s3fs cuts some requests, because the s3fs can always check mtime whether the s3fs uses or does not use the local cache file.
5) A case of no "x-amz-meta-mode"
If the object did not have "x-amz-meta-mtime" mete, the s3fs recognized the file as not regular file.
After this version, the s3fs recognizes the file as regular file.
6) "." and ".." directory
The s3fs_readdir() did not return "X" and "XX" directory name.
After this version, the s3fs is changed that it returns "X" and "XX".
Example, the result of "ls" lists "X" and "XX" directory.
7) Fixed a bug
The insert_object() had a bug, and it is fixed.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@390 df820570-a93a-0410-bd06-b72b767a4274
2013-02-24 08:58:54 +00:00
2020-08-22 12:40:53 +00:00
if ( 0 = = ( result = check_object_access ( path , mask , NULL ) ) ) {
result = check_parent_object_access ( path , mask ) ;
}
S3FS_MALLOCTRIM ( 0 ) ;
2013-09-14 21:50:39 +00:00
2020-08-22 12:40:53 +00:00
return result ;
Summary of Changes(1.62 -> 1.63)
1) Lifetime for the stats cache
Added the new option "stat_cache_expire".
This option which is specified by seconds means the lifetime for each stats cache entry.
If this option is not specified, the stats cache is kept in s3fs process until the stats cache grown to maximum size. (default)
If this option is specified, the stats cache entry is out from the memory when the entry expires time.
2) Enable file permission
s3fs before 1.62 did not consider the file access permission.
s3fs after this version can consider it.
For access permission, the s3fs_getattr() function was divided into sub function which can check the file access permission.
It is like access() function.
And the function calling the s3fs_getattr() calls this new sub function instead of s3fs_getattr().
Last the s3fs_opendir() function which is called by FUSE was added for checking directory access permission when listing the files in directory.
3) UID/GUID
When a file or a directory was created, the s3fs could not set the UID/GID as the user who executed a command.
(Almost the UID/GID are root, because the s3fs run by root.)
After this version, the s3fs set correct UID/GID as the user who executes the commond.
4) About the mtime
If the object does not have "x-amz-meta-mtime" meta, the s3fs uses the "Last-Modified" header instead of it.
But the s3fs had a bug in this code, and this version fixed this bug.
When user modified the file, the s3fs did not update the mtime of the file.
This version fixed this bug.
In the get_local_fd() function, the local file's mtime was changed only when s3fs run with "use_cache" option.
This version always updates the mtime whether the local cache file is used or not.
And s3fs_flush ( ) function set the mtime of local cache file from S3 object mtime, but it was wrong .
This version is that the s3fs_flush ( ) changes the mtime of S3 object from the local cache file or the tmpfile .
The s3fs cuts some requests, because the s3fs can always check mtime whether the s3fs uses or does not use the local cache file.
5) A case of no "x-amz-meta-mode"
If the object did not have "x-amz-meta-mtime" mete, the s3fs recognized the file as not regular file.
After this version, the s3fs recognizes the file as regular file.
6) "." and ".." directory
The s3fs_readdir() did not return "X" and "XX" directory name.
After this version, the s3fs is changed that it returns "X" and "XX".
Example, the result of "ls" lists "X" and "XX" directory.
7) Fixed a bug
The insert_object() had a bug, and it is fixed.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@390 df820570-a93a-0410-bd06-b72b767a4274
2013-02-24 08:58:54 +00:00
}
2013-07-05 02:28:31 +00:00
static bool multi_head_callback ( S3fsCurl * s3fscurl )
2013-05-27 01:15:48 +00:00
{
2020-08-22 12:40:53 +00:00
if ( ! s3fscurl ) {
return false ;
}
2020-09-11 09:37:24 +00:00
std : : string saved_path = s3fscurl - > GetSpacialSavedPath ( ) ;
2020-08-22 12:40:53 +00:00
if ( ! StatCache : : getStatCacheData ( ) - > AddStat ( saved_path , * ( s3fscurl - > GetResponseHeaders ( ) ) ) ) {
S3FS_PRN_ERR ( " failed adding stat cache [path=%s] " , saved_path . c_str ( ) ) ;
return false ;
}
return true ;
2013-07-05 02:28:31 +00:00
}
2013-05-27 01:15:48 +00:00
2013-07-05 02:28:31 +00:00
static S3fsCurl * multi_head_retry_callback ( S3fsCurl * s3fscurl )
{
2020-08-22 12:40:53 +00:00
if ( ! s3fscurl ) {
return NULL ;
}
int ssec_key_pos = s3fscurl - > GetLastPreHeadSeecKeyPos ( ) ;
int retry_count = s3fscurl - > GetMultipartRetryCount ( ) ;
// retry next sse key.
// if end of sse key, set retry master count is up.
ssec_key_pos = ( ssec_key_pos < 0 ? 0 : ssec_key_pos + 1 ) ;
if ( 0 = = S3fsCurl : : GetSseKeyCount ( ) | | S3fsCurl : : GetSseKeyCount ( ) < = ssec_key_pos ) {
if ( s3fscurl - > IsOverMultipartRetryCount ( ) ) {
S3FS_PRN_ERR ( " Over retry count(%d) limit(%s). " , s3fscurl - > GetMultipartRetryCount ( ) , s3fscurl - > GetSpacialSavedPath ( ) . c_str ( ) ) ;
return NULL ;
}
ssec_key_pos = - 1 ;
retry_count + + ;
}
2013-11-11 13:45:35 +00:00
2020-08-22 12:40:53 +00:00
S3fsCurl * newcurl = new S3fsCurl ( s3fscurl - > IsUseAhbe ( ) ) ;
2020-09-11 09:37:24 +00:00
std : : string path = s3fscurl - > GetPath ( ) ;
std : : string base_path = s3fscurl - > GetBasePath ( ) ;
std : : string saved_path = s3fscurl - > GetSpacialSavedPath ( ) ;
2020-08-22 12:40:53 +00:00
if ( ! newcurl - > PreHeadRequest ( path , base_path , saved_path , ssec_key_pos ) ) {
S3FS_PRN_ERR ( " Could not duplicate curl object(%s). " , saved_path . c_str ( ) ) ;
delete newcurl ;
return NULL ;
}
newcurl - > SetMultipartRetryCount ( retry_count ) ;
return newcurl ;
2013-07-05 02:28:31 +00:00
}
2013-05-27 01:15:48 +00:00
2019-09-23 10:49:49 +00:00
static int readdir_multi_head ( const char * path , const S3ObjList & head , void * buf , fuse_fill_dir_t filler )
2013-07-05 02:28:31 +00:00
{
2020-08-22 12:40:53 +00:00
S3fsMultiCurl curlmulti ( S3fsCurl : : GetMaxMultiRequest ( ) ) ;
s3obj_list_t headlist ;
s3obj_list_t fillerlist ;
int result = 0 ;
2013-05-27 01:15:48 +00:00
2020-08-22 12:40:53 +00:00
S3FS_PRN_INFO1 ( " [path=%s][list=%zu] " , path , headlist . size ( ) ) ;
2013-05-27 01:15:48 +00:00
2020-08-22 12:40:53 +00:00
// Make base path list.
head . GetNameList ( headlist , true , false ) ; // get name with "/".
2013-05-27 01:15:48 +00:00
2020-08-22 12:40:53 +00:00
// Initialize S3fsMultiCurl
curlmulti . SetSuccessCallback ( multi_head_callback ) ;
curlmulti . SetRetryCallback ( multi_head_retry_callback ) ;
2013-05-27 01:15:48 +00:00
2020-08-22 12:40:53 +00:00
s3obj_list_t : : iterator iter ;
2013-05-27 01:15:48 +00:00
2020-08-22 12:40:53 +00:00
fillerlist . clear ( ) ;
// Make single head request(with max).
for ( iter = headlist . begin ( ) ; headlist . end ( ) ! = iter ; iter = headlist . erase ( iter ) ) {
2020-09-11 09:37:24 +00:00
std : : string disppath = path + ( * iter ) ;
std : : string etag = head . GetETag ( ( * iter ) . c_str ( ) ) ;
2013-05-27 01:15:48 +00:00
2020-09-11 09:37:24 +00:00
std : : string fillpath = disppath ;
2020-08-22 12:40:53 +00:00
if ( ' / ' = = disppath [ disppath . length ( ) - 1 ] ) {
fillpath = fillpath . substr ( 0 , fillpath . length ( ) - 1 ) ;
}
fillerlist . push_back ( fillpath ) ;
2013-05-27 01:15:48 +00:00
2020-08-22 12:40:53 +00:00
if ( StatCache : : getStatCacheData ( ) - > HasStat ( disppath , etag . c_str ( ) ) ) {
continue ;
}
2013-05-27 01:15:48 +00:00
2020-08-22 12:40:53 +00:00
// First check for directory, start checking "not SSE-C".
// If checking failed, retry to check with "SSE-C" by retry callback func when SSE-C mode.
S3fsCurl * s3fscurl = new S3fsCurl ( ) ;
if ( ! s3fscurl - > PreHeadRequest ( disppath , ( * iter ) , disppath ) ) { // target path = cache key path.(ex "dir/")
S3FS_PRN_WARN ( " Could not make curl object for head request(%s). " , disppath . c_str ( ) ) ;
delete s3fscurl ;
continue ;
}
2013-05-27 01:15:48 +00:00
2020-08-22 12:40:53 +00:00
if ( ! curlmulti . SetS3fsCurlObject ( s3fscurl ) ) {
S3FS_PRN_WARN ( " Could not make curl object into multi curl(%s). " , disppath . c_str ( ) ) ;
delete s3fscurl ;
continue ;
}
2013-05-27 01:15:48 +00:00
}
2013-07-05 02:28:31 +00:00
2020-08-22 12:40:53 +00:00
// Multi request
if ( 0 ! = ( result = curlmulti . Request ( ) ) ) {
// If result is -EIO, it is something error occurred.
// This case includes that the object is encrypting(SSE) and s3fs does not have keys.
// So s3fs set result to 0 in order to continue the process.
if ( - EIO = = result ) {
S3FS_PRN_WARN ( " error occurred in multi request(errno=%d), but continue... " , result ) ;
result = 0 ;
} else {
S3FS_PRN_ERR ( " error occurred in multi request(errno=%d). " , result ) ;
return result ;
}
2013-09-14 21:50:39 +00:00
}
2020-08-22 12:40:53 +00:00
// populate fuse buffer
// here is best position, because a case is cache size < files in directory
//
for ( iter = fillerlist . begin ( ) ; fillerlist . end ( ) ! = iter ; + + iter ) {
struct stat st ;
bool in_cache = StatCache : : getStatCacheData ( ) - > GetStat ( ( * iter ) , & st ) ;
2020-09-11 09:37:24 +00:00
std : : string bpath = mybasename ( ( * iter ) ) ;
2020-08-22 12:40:53 +00:00
if ( use_wtf8 ) {
bpath = s3fs_wtf8_decode ( bpath ) ;
}
if ( in_cache ) {
filler ( buf , bpath . c_str ( ) , & st , 0 ) ;
} else {
S3FS_PRN_INFO2 ( " Could not find %s file in stat cache. " , ( * iter ) . c_str ( ) ) ;
filler ( buf , bpath . c_str ( ) , 0 , 0 ) ;
}
2019-01-19 01:16:56 +00:00
}
2020-08-22 12:40:53 +00:00
return result ;
2013-05-27 01:15:48 +00:00
}
2019-02-18 13:36:48 +00:00
static int s3fs_readdir ( const char * _path , void * buf , fuse_fill_dir_t filler , off_t offset , struct fuse_file_info * fi )
2013-05-27 01:15:48 +00:00
{
2020-08-22 12:40:53 +00:00
WTF8_ENCODE ( path )
S3ObjList head ;
int result ;
2011-07-28 15:43:52 +00:00
2020-08-22 12:40:53 +00:00
S3FS_PRN_INFO ( " [path=%s] " , path ) ;
2010-11-13 23:59:23 +00:00
2020-08-22 12:40:53 +00:00
if ( 0 ! = ( result = check_object_access ( path , X_OK , NULL ) ) ) {
return result ;
2013-01-19 16:05:07 +00:00
}
2013-09-14 21:50:39 +00:00
2020-08-22 12:40:53 +00:00
// get a list of all the objects
if ( ( result = list_bucket ( path , head , " / " ) ) ! = 0 ) {
S3FS_PRN_ERR ( " list_bucket returns error(%d). " , result ) ;
return result ;
2019-02-05 05:12:57 +00:00
}
2011-06-26 00:37:52 +00:00
2020-08-22 12:40:53 +00:00
// force to add "." and ".." name.
filler ( buf , " . " , 0 , 0 ) ;
filler ( buf , " .. " , 0 , 0 ) ;
if ( head . IsEmpty ( ) ) {
return 0 ;
}
2013-01-19 16:05:07 +00:00
2020-08-22 12:40:53 +00:00
// Send multi head request for stats caching.
2020-09-11 09:37:24 +00:00
std : : string strpath = path ;
2020-08-22 12:40:53 +00:00
if ( strcmp ( path , " / " ) ! = 0 ) {
strpath + = " / " ;
}
if ( 0 ! = ( result = readdir_multi_head ( strpath . c_str ( ) , head , buf , filler ) ) ) {
S3FS_PRN_ERR ( " readdir_multi_head returns error(%d). " , result ) ;
2011-08-31 17:00:00 +00:00
}
2020-08-22 12:40:53 +00:00
S3FS_MALLOCTRIM ( 0 ) ;
Summary of Changes(1.63 -> 1.64)
* This new version was made for fixing big issue about directory object.
Please be careful and review new s3fs.
==========================
List of Changes
==========================
1) Fixed bugs
Fixed some memory leak and un-freed curl handle.
Fixed codes with a bug which is not found yet.
Fixed a bug that the s3fs could not update object's mtime when the s3fs had a opened file descriptor.
Please let us know a bug, when you find new bug of a memory leak.
2) Changed codes
Changed codes of s3fs_readdir() and list_bucket() etc.
Changed codes so that the get_realpath() function returned std::string.
Changed codes about exit() function. Because the exit() function is called from many fuse callback function directly, these function called fuse_exit() function and retuned with error.
Changed codes so that the case of the characters for the "x-amz-meta" response header is ignored.
3) Added a option
Added the norenameapi option for the storage compatible with S3 without copy API.
This option is subset of nocopyapi option.
Please read man page or call with --help option.
4) Object for directory
This is very big and important change.
The object of directory is changed "dir/" instead of "dir" for being compatible with other S3 client applications.
And this version understands the object of directory which is made by old version.
If the new s3fs changes the attributes or owner/group or mtime of the directory object, the s3fs automatically changes the object from old object name("dir") to new("dir/").
If you need to change old object name("dir") to new("dir/") manually, you can use shell script(mergedir.sh) in test directory.
* About the directory object name
AWS S3 allows the object name as both "dir" and "dir/".
The s3fs before this version understood only "dir" as directory object name, but old version did not understand the "dir/" object name.
The new version understands both of "dir" and "dir/" object name.
The s3fs user needs to be care for the special situation that I mentioned later.
The new version deletes old "dir" object and makes new "dir/" object, when the user operates the directory object for changing the permission or owner/group or mtime.
This operation does on background and automatically.
If you need to merge manually, you can use shell script which is mergedir.sh in test directory.
This script runs chmod/chown/touch commands after finding a directory.
Other S3 client application makes a directory object("dir/") without meta information which is needed to understand by the s3fs, this script can add meta information for a directory object.
If this script function is insufficient for you, you can read and modify the codes by yourself.
Please use the shell script carefully because of changing the object.
If you find a bug in this script, please let me know.
* Details
** The directory object made by old version
The directory object made by old version is not understood by other S3 client application.
New s3fs version was updated for keeping compatibility with other clients.
You can use the mergedir.sh in test directory for merging from old directory object("dir") to new("dir/").
The directory object name is changed from "dir" to "dir/" after the mergedir.sh is run, this changed "dir/" object is understood by other S3 clients.
This script runs chmod/chown/chgrp/touch/etc commands against the old directory object("dir"), then new s3fs merges that directory automatically.
If you need to change directory object from old to new manually, you can do it by running these commands which change the directory attributes(mode/owner/group/mtime).
** The directory object made by new version
The directory object name made by new version is "dir/".
Because the name includes "/", other S3 client applications understand it as the directory.
I tested new directory by s3cmd/tntDrive/DragonDisk/Gladinet as other S3 clients, the result was good compatibility.
You need to know that the compatibility has small problem by the difference in specifications between clients.
And you need to be careful about that the old s3fs can not understand the directory object which made by new s3fs.
You should change all s3fs which accesses same bucket.
** The directory object made by other S3 client application
Because the object is determined as a directory by the s3fs, the s3fs makes and uses special meta information which is "x-amz-meta-***" and "Content-Type" as HTTP header.
The s3fs sets and uses HTTP headers for the directory object, those headers are listed below.
Content-Type: application/x-directory
x-amz-meta-mode: <mode>
x-amz-meta-uid: <UID>
x-amz-meta-gid <GID>
x-amz-meta-mtime: <unix time of modified file>
Other S3 client application builds the directory object without attributes which is needed by the s3fs.
When the "ls" command is run on the s3fs-fuse file system which has directories/files made by other S3 clients, this result is shown below.
d--------- 1 root root 0 Feb 27 11:21 dir
---------- 1 root root 1024 Mar 14 02:15 file
Because the objects don't have meta information("x-amz-meta-mode"), it means mode=0000.
In this case, the directory object is shown only "d", because the s3fs determines the object as a directory when the object is the name with "/" or has "Content-type: application/x-directory" header.
(The s3fs sets "Content-Type: application/x-directory" to the directory object, but other S3 clients set "binary/octet-stream".)
In that result, nobody without root is allowed to operate the object.
The owner and group are "root"(UID=0) because the object doesn't have "x-amz-meta-uid/gid".
If the object doesn't have "x-amz-meta-mtime", the s3fs uses "Last-Modified" HTTP header.
Therefore the object's mtime is "Last-Modified" value.(This logic is same as old version)
It has been already explained, if you need to change the object attributes, you can do it by manually operation or mergedir.sh.
* Example of the compatibility with s3cmd etc
** Case A) Only "dir/file" object
One of case, there is only "dir/file" object without "dir/" object, that object is made by s3cmd or etc.
In this case, the response of REST API(list bucket) with "delimiter=/" parameter has "CommonPrefixes", and the "dir/" is listed in "CommonPrefixes/Prefix", but the "dir/" object is not real object.
The s3fs needs to determine this object as directory, however there is no real directory object("dir" or "dir/").
But both new s3fs and old one does NOT understand this "dir/" in "CommonPrefixes", because the s3fs fails to get meta information from "dir" or "dir/".
On this case, the result of "ls" command is shown below.
??????????? ? ? ? ? ? dir
This "dir" is not operated by anyone and any process, because the s3fs does not understand this object permission.
And "dir/file" object can not be shown and operated too.
Some other S3 clients(tntDrive/Gladinet/etc) can not understand this object as same as the s3fs.
If you need to operate "dir/file" object, you need to make the "dir/" object as a directory.
To make the "dir/" directory object, you need to do below.
Because there is already the "dir" object which is not real object, you can not make "dir/" directory.
(s3cmd does not make "dir/" object because the object name has "/".).
You should make another name directory(ex: "dir2/"), and move the "dir/file" objects to in new directory.
Last, you can rename the directory name from "dir2/" to "dir/".
** Case B) Both "dir" and "dir/file" object
This case is that there are "dir" and "dir/file" objects which were made by s3cmd/etc.
s3cmd and s3fs understand the "dir" object as normal(file) object because this object does not have meta information and a name with "/".
But the result of REST API(list bucket) has "dir/" name in "CommonPrefixes/Prefix".
The s3fs checks "dir/" and "dir" as a directory, but the "dir" object is not directory object.
(Because the new s3fs need to compatible old version, the s3fs checks a directory object in order of "dir/", "dir")
In this case, the result of "ls" command is shown below.
---------- 1 root root 0 Feb 27 02:48 dir
As a result, the "dir/file" can not be shown and operated because the "dir" object is a file.
If you determine the "dir" as a directory, you need to add mete information to the "dir" object by s3cmd.
** Case C) Both "dir" and "dir/" object
Last case is that there are "dir" and "dir/" objects which were made by other S3 clients.
(example: At first you upload a object "dir/" as a directory by new 3sfs, and you upload a object "dir" by s3cmd.)
New s3fs determines "dir/" as a directory, because the s3fs searches in oder of "dir/", "dir".
As a result, the "dir" object can not be shown and operated.
** Compatibility between S3 clients
Both new and old s3fs do not understand both "dir" and "dir/" at the same time, tntDrive and Galdinet are same as the s3fs.
If there are "dir/" and "dir" objects, the s3fs gives priority to "dir/".
But s3cmd and DragonDisk understand both objects.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@392 df820570-a93a-0410-bd06-b72b767a4274
2013-03-23 14:04:07 +00:00
2020-08-22 12:40:53 +00:00
return result ;
Summary of Changes(1.63 -> 1.64)
* This new version was made for fixing big issue about directory object.
Please be careful and review new s3fs.
==========================
List of Changes
==========================
1) Fixed bugs
Fixed some memory leak and un-freed curl handle.
Fixed codes with a bug which is not found yet.
Fixed a bug that the s3fs could not update object's mtime when the s3fs had a opened file descriptor.
Please let us know a bug, when you find new bug of a memory leak.
2) Changed codes
Changed codes of s3fs_readdir() and list_bucket() etc.
Changed codes so that the get_realpath() function returned std::string.
Changed codes about exit() function. Because the exit() function is called from many fuse callback function directly, these function called fuse_exit() function and retuned with error.
Changed codes so that the case of the characters for the "x-amz-meta" response header is ignored.
3) Added a option
Added the norenameapi option for the storage compatible with S3 without copy API.
This option is subset of nocopyapi option.
Please read man page or call with --help option.
4) Object for directory
This is very big and important change.
The object of directory is changed "dir/" instead of "dir" for being compatible with other S3 client applications.
And this version understands the object of directory which is made by old version.
If the new s3fs changes the attributes or owner/group or mtime of the directory object, the s3fs automatically changes the object from old object name("dir") to new("dir/").
If you need to change old object name("dir") to new("dir/") manually, you can use shell script(mergedir.sh) in test directory.
* About the directory object name
AWS S3 allows the object name as both "dir" and "dir/".
The s3fs before this version understood only "dir" as directory object name, but old version did not understand the "dir/" object name.
The new version understands both of "dir" and "dir/" object name.
The s3fs user needs to be care for the special situation that I mentioned later.
The new version deletes old "dir" object and makes new "dir/" object, when the user operates the directory object for changing the permission or owner/group or mtime.
This operation does on background and automatically.
If you need to merge manually, you can use shell script which is mergedir.sh in test directory.
This script runs chmod/chown/touch commands after finding a directory.
Other S3 client application makes a directory object("dir/") without meta information which is needed to understand by the s3fs, this script can add meta information for a directory object.
If this script function is insufficient for you, you can read and modify the codes by yourself.
Please use the shell script carefully because of changing the object.
If you find a bug in this script, please let me know.
* Details
** The directory object made by old version
The directory object made by old version is not understood by other S3 client application.
New s3fs version was updated for keeping compatibility with other clients.
You can use the mergedir.sh in test directory for merging from old directory object("dir") to new("dir/").
The directory object name is changed from "dir" to "dir/" after the mergedir.sh is run, this changed "dir/" object is understood by other S3 clients.
This script runs chmod/chown/chgrp/touch/etc commands against the old directory object("dir"), then new s3fs merges that directory automatically.
If you need to change directory object from old to new manually, you can do it by running these commands which change the directory attributes(mode/owner/group/mtime).
** The directory object made by new version
The directory object name made by new version is "dir/".
Because the name includes "/", other S3 client applications understand it as the directory.
I tested new directory by s3cmd/tntDrive/DragonDisk/Gladinet as other S3 clients, the result was good compatibility.
You need to know that the compatibility has small problem by the difference in specifications between clients.
And you need to be careful about that the old s3fs can not understand the directory object which made by new s3fs.
You should change all s3fs which accesses same bucket.
** The directory object made by other S3 client application
Because the object is determined as a directory by the s3fs, the s3fs makes and uses special meta information which is "x-amz-meta-***" and "Content-Type" as HTTP header.
The s3fs sets and uses HTTP headers for the directory object, those headers are listed below.
Content-Type: application/x-directory
x-amz-meta-mode: <mode>
x-amz-meta-uid: <UID>
x-amz-meta-gid <GID>
x-amz-meta-mtime: <unix time of modified file>
Other S3 client application builds the directory object without attributes which is needed by the s3fs.
When the "ls" command is run on the s3fs-fuse file system which has directories/files made by other S3 clients, this result is shown below.
d--------- 1 root root 0 Feb 27 11:21 dir
---------- 1 root root 1024 Mar 14 02:15 file
Because the objects don't have meta information("x-amz-meta-mode"), it means mode=0000.
In this case, the directory object is shown only "d", because the s3fs determines the object as a directory when the object is the name with "/" or has "Content-type: application/x-directory" header.
(The s3fs sets "Content-Type: application/x-directory" to the directory object, but other S3 clients set "binary/octet-stream".)
In that result, nobody without root is allowed to operate the object.
The owner and group are "root"(UID=0) because the object doesn't have "x-amz-meta-uid/gid".
If the object doesn't have "x-amz-meta-mtime", the s3fs uses "Last-Modified" HTTP header.
Therefore the object's mtime is "Last-Modified" value.(This logic is same as old version)
It has been already explained, if you need to change the object attributes, you can do it by manually operation or mergedir.sh.
* Example of the compatibility with s3cmd etc
** Case A) Only "dir/file" object
One of case, there is only "dir/file" object without "dir/" object, that object is made by s3cmd or etc.
In this case, the response of REST API(list bucket) with "delimiter=/" parameter has "CommonPrefixes", and the "dir/" is listed in "CommonPrefixes/Prefix", but the "dir/" object is not real object.
The s3fs needs to determine this object as directory, however there is no real directory object("dir" or "dir/").
But both new s3fs and old one does NOT understand this "dir/" in "CommonPrefixes", because the s3fs fails to get meta information from "dir" or "dir/".
On this case, the result of "ls" command is shown below.
??????????? ? ? ? ? ? dir
This "dir" is not operated by anyone and any process, because the s3fs does not understand this object permission.
And "dir/file" object can not be shown and operated too.
Some other S3 clients(tntDrive/Gladinet/etc) can not understand this object as same as the s3fs.
If you need to operate "dir/file" object, you need to make the "dir/" object as a directory.
To make the "dir/" directory object, you need to do below.
Because there is already the "dir" object which is not real object, you can not make "dir/" directory.
(s3cmd does not make "dir/" object because the object name has "/".).
You should make another name directory(ex: "dir2/"), and move the "dir/file" objects to in new directory.
Last, you can rename the directory name from "dir2/" to "dir/".
** Case B) Both "dir" and "dir/file" object
This case is that there are "dir" and "dir/file" objects which were made by s3cmd/etc.
s3cmd and s3fs understand the "dir" object as normal(file) object because this object does not have meta information and a name with "/".
But the result of REST API(list bucket) has "dir/" name in "CommonPrefixes/Prefix".
The s3fs checks "dir/" and "dir" as a directory, but the "dir" object is not directory object.
(Because the new s3fs need to compatible old version, the s3fs checks a directory object in order of "dir/", "dir")
In this case, the result of "ls" command is shown below.
---------- 1 root root 0 Feb 27 02:48 dir
As a result, the "dir/file" can not be shown and operated because the "dir" object is a file.
If you determine the "dir" as a directory, you need to add mete information to the "dir" object by s3cmd.
** Case C) Both "dir" and "dir/" object
Last case is that there are "dir" and "dir/" objects which were made by other S3 clients.
(example: At first you upload a object "dir/" as a directory by new 3sfs, and you upload a object "dir" by s3cmd.)
New s3fs determines "dir/" as a directory, because the s3fs searches in oder of "dir/", "dir".
As a result, the "dir" object can not be shown and operated.
** Compatibility between S3 clients
Both new and old s3fs do not understand both "dir" and "dir/" at the same time, tntDrive and Galdinet are same as the s3fs.
If there are "dir/" and "dir" objects, the s3fs gives priority to "dir/".
But s3cmd and DragonDisk understand both objects.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@392 df820570-a93a-0410-bd06-b72b767a4274
2013-03-23 14:04:07 +00:00
}
2020-08-22 12:40:53 +00:00
static int list_bucket ( const char * path , S3ObjList & head , const char * delimiter , bool check_content_only )
2013-04-06 17:39:22 +00:00
{
2020-09-11 09:37:24 +00:00
std : : string s3_realpath ;
std : : string query_delimiter ;
std : : string query_prefix ;
std : : string query_maxkey ;
std : : string next_marker ;
bool truncated = true ;
2020-08-22 12:40:53 +00:00
S3fsCurl s3fscurl ;
xmlDocPtr doc ;
2013-04-06 17:39:22 +00:00
2020-08-22 12:40:53 +00:00
S3FS_PRN_INFO1 ( " [path=%s] " , path ) ;
2011-07-02 02:11:54 +00:00
2020-08-22 12:40:53 +00:00
if ( delimiter & & 0 < strlen ( delimiter ) ) {
query_delimiter + = " delimiter= " ;
query_delimiter + = delimiter ;
query_delimiter + = " & " ;
}
2011-07-02 02:11:54 +00:00
2020-08-22 12:40:53 +00:00
query_prefix + = " &prefix= " ;
s3_realpath = get_realpath ( path ) ;
if ( 0 = = s3_realpath . length ( ) | | ' / ' ! = s3_realpath [ s3_realpath . length ( ) - 1 ] ) {
// last word must be "/"
query_prefix + = urlEncode ( s3_realpath . substr ( 1 ) + " / " ) ;
} else {
query_prefix + = urlEncode ( s3_realpath . substr ( 1 ) ) ;
}
if ( check_content_only ) {
// Just need to know if there are child objects in dir
// For dir with children, expect "dir/" and "dir/child"
query_maxkey + = " max-keys=2 " ;
} else {
query_maxkey + = " max-keys= " + str ( max_keys_list_object ) ;
}
2013-04-06 17:39:22 +00:00
2020-08-22 12:40:53 +00:00
while ( truncated ) {
2020-09-11 09:37:24 +00:00
std : : string each_query = query_delimiter ;
2020-08-22 12:40:53 +00:00
if ( ! next_marker . empty ( ) ) {
each_query + = " marker= " + urlEncode ( next_marker ) + " & " ;
next_marker = " " ;
}
each_query + = query_maxkey ;
each_query + = query_prefix ;
// request
int result ;
if ( 0 ! = ( result = s3fscurl . ListBucketRequest ( path , each_query . c_str ( ) ) ) ) {
S3FS_PRN_ERR ( " ListBucketRequest returns with error. " ) ;
return result ;
}
BodyData * body = s3fscurl . GetBodyData ( ) ;
2011-07-02 02:11:54 +00:00
2020-08-22 12:40:53 +00:00
// xmlDocPtr
if ( NULL = = ( doc = xmlReadMemory ( body - > str ( ) , static_cast < int > ( body - > size ( ) ) , " " , NULL , 0 ) ) ) {
S3FS_PRN_ERR ( " xmlReadMemory returns with error. " ) ;
return - 1 ;
}
if ( 0 ! = append_objects_from_xml ( path , doc , head ) ) {
S3FS_PRN_ERR ( " append_objects_from_xml returns with error. " ) ;
xmlFreeDoc ( doc ) ;
return - 1 ;
}
if ( true = = ( truncated = is_truncated ( doc ) ) ) {
xmlChar * tmpch = get_next_marker ( doc ) ;
if ( tmpch ) {
next_marker = ( char * ) tmpch ;
xmlFree ( tmpch ) ;
} else {
// If did not specify "delimiter", s3 did not return "NextMarker".
// On this case, can use last name for next marker.
//
2020-09-11 09:37:24 +00:00
std : : string lastname ;
2020-08-22 12:40:53 +00:00
if ( ! head . GetLastName ( lastname ) ) {
S3FS_PRN_WARN ( " Could not find next marker, thus break loop. " ) ;
truncated = false ;
} else {
next_marker = s3_realpath . substr ( 1 ) ;
if ( 0 = = s3_realpath . length ( ) | | ' / ' ! = s3_realpath [ s3_realpath . length ( ) - 1 ] ) {
next_marker + = " / " ;
}
next_marker + = lastname ;
}
}
}
S3FS_XMLFREEDOC ( doc ) ;
2011-07-02 02:11:54 +00:00
2020-08-22 12:40:53 +00:00
// reset(initialize) curl object
s3fscurl . DestroyCurlHandle ( ) ;
Summary of Changes(1.63 -> 1.64)
* This new version was made for fixing big issue about directory object.
Please be careful and review new s3fs.
==========================
List of Changes
==========================
1) Fixed bugs
Fixed some memory leak and un-freed curl handle.
Fixed codes with a bug which is not found yet.
Fixed a bug that the s3fs could not update object's mtime when the s3fs had a opened file descriptor.
Please let us know a bug, when you find new bug of a memory leak.
2) Changed codes
Changed codes of s3fs_readdir() and list_bucket() etc.
Changed codes so that the get_realpath() function returned std::string.
Changed codes about exit() function. Because the exit() function is called from many fuse callback function directly, these function called fuse_exit() function and retuned with error.
Changed codes so that the case of the characters for the "x-amz-meta" response header is ignored.
3) Added a option
Added the norenameapi option for the storage compatible with S3 without copy API.
This option is subset of nocopyapi option.
Please read man page or call with --help option.
4) Object for directory
This is very big and important change.
The object of directory is changed "dir/" instead of "dir" for being compatible with other S3 client applications.
And this version understands the object of directory which is made by old version.
If the new s3fs changes the attributes or owner/group or mtime of the directory object, the s3fs automatically changes the object from old object name("dir") to new("dir/").
If you need to change old object name("dir") to new("dir/") manually, you can use shell script(mergedir.sh) in test directory.
* About the directory object name
AWS S3 allows the object name as both "dir" and "dir/".
The s3fs before this version understood only "dir" as directory object name, but old version did not understand the "dir/" object name.
The new version understands both of "dir" and "dir/" object name.
The s3fs user needs to be care for the special situation that I mentioned later.
The new version deletes old "dir" object and makes new "dir/" object, when the user operates the directory object for changing the permission or owner/group or mtime.
This operation does on background and automatically.
If you need to merge manually, you can use shell script which is mergedir.sh in test directory.
This script runs chmod/chown/touch commands after finding a directory.
Other S3 client application makes a directory object("dir/") without meta information which is needed to understand by the s3fs, this script can add meta information for a directory object.
If this script function is insufficient for you, you can read and modify the codes by yourself.
Please use the shell script carefully because of changing the object.
If you find a bug in this script, please let me know.
* Details
** The directory object made by old version
The directory object made by old version is not understood by other S3 client application.
New s3fs version was updated for keeping compatibility with other clients.
You can use the mergedir.sh in test directory for merging from old directory object("dir") to new("dir/").
The directory object name is changed from "dir" to "dir/" after the mergedir.sh is run, this changed "dir/" object is understood by other S3 clients.
This script runs chmod/chown/chgrp/touch/etc commands against the old directory object("dir"), then new s3fs merges that directory automatically.
If you need to change directory object from old to new manually, you can do it by running these commands which change the directory attributes(mode/owner/group/mtime).
** The directory object made by new version
The directory object name made by new version is "dir/".
Because the name includes "/", other S3 client applications understand it as the directory.
I tested new directory by s3cmd/tntDrive/DragonDisk/Gladinet as other S3 clients, the result was good compatibility.
You need to know that the compatibility has small problem by the difference in specifications between clients.
And you need to be careful about that the old s3fs can not understand the directory object which made by new s3fs.
You should change all s3fs which accesses same bucket.
** The directory object made by other S3 client application
Because the object is determined as a directory by the s3fs, the s3fs makes and uses special meta information which is "x-amz-meta-***" and "Content-Type" as HTTP header.
The s3fs sets and uses HTTP headers for the directory object, those headers are listed below.
Content-Type: application/x-directory
x-amz-meta-mode: <mode>
x-amz-meta-uid: <UID>
x-amz-meta-gid <GID>
x-amz-meta-mtime: <unix time of modified file>
Other S3 client application builds the directory object without attributes which is needed by the s3fs.
When the "ls" command is run on the s3fs-fuse file system which has directories/files made by other S3 clients, this result is shown below.
d--------- 1 root root 0 Feb 27 11:21 dir
---------- 1 root root 1024 Mar 14 02:15 file
Because the objects don't have meta information("x-amz-meta-mode"), it means mode=0000.
In this case, the directory object is shown only "d", because the s3fs determines the object as a directory when the object is the name with "/" or has "Content-type: application/x-directory" header.
(The s3fs sets "Content-Type: application/x-directory" to the directory object, but other S3 clients set "binary/octet-stream".)
In that result, nobody without root is allowed to operate the object.
The owner and group are "root"(UID=0) because the object doesn't have "x-amz-meta-uid/gid".
If the object doesn't have "x-amz-meta-mtime", the s3fs uses "Last-Modified" HTTP header.
Therefore the object's mtime is "Last-Modified" value.(This logic is same as old version)
It has been already explained, if you need to change the object attributes, you can do it by manually operation or mergedir.sh.
* Example of the compatibility with s3cmd etc
** Case A) Only "dir/file" object
One of case, there is only "dir/file" object without "dir/" object, that object is made by s3cmd or etc.
In this case, the response of REST API(list bucket) with "delimiter=/" parameter has "CommonPrefixes", and the "dir/" is listed in "CommonPrefixes/Prefix", but the "dir/" object is not real object.
The s3fs needs to determine this object as directory, however there is no real directory object("dir" or "dir/").
But both new s3fs and old one does NOT understand this "dir/" in "CommonPrefixes", because the s3fs fails to get meta information from "dir" or "dir/".
On this case, the result of "ls" command is shown below.
??????????? ? ? ? ? ? dir
This "dir" is not operated by anyone and any process, because the s3fs does not understand this object permission.
And "dir/file" object can not be shown and operated too.
Some other S3 clients(tntDrive/Gladinet/etc) can not understand this object as same as the s3fs.
If you need to operate "dir/file" object, you need to make the "dir/" object as a directory.
To make the "dir/" directory object, you need to do below.
Because there is already the "dir" object which is not real object, you can not make "dir/" directory.
(s3cmd does not make "dir/" object because the object name has "/".).
You should make another name directory(ex: "dir2/"), and move the "dir/file" objects to in new directory.
Last, you can rename the directory name from "dir2/" to "dir/".
** Case B) Both "dir" and "dir/file" object
This case is that there are "dir" and "dir/file" objects which were made by s3cmd/etc.
s3cmd and s3fs understand the "dir" object as normal(file) object because this object does not have meta information and a name with "/".
But the result of REST API(list bucket) has "dir/" name in "CommonPrefixes/Prefix".
The s3fs checks "dir/" and "dir" as a directory, but the "dir" object is not directory object.
(Because the new s3fs need to compatible old version, the s3fs checks a directory object in order of "dir/", "dir")
In this case, the result of "ls" command is shown below.
---------- 1 root root 0 Feb 27 02:48 dir
As a result, the "dir/file" can not be shown and operated because the "dir" object is a file.
If you determine the "dir" as a directory, you need to add mete information to the "dir" object by s3cmd.
** Case C) Both "dir" and "dir/" object
Last case is that there are "dir" and "dir/" objects which were made by other S3 clients.
(example: At first you upload a object "dir/" as a directory by new 3sfs, and you upload a object "dir" by s3cmd.)
New s3fs determines "dir/" as a directory, because the s3fs searches in oder of "dir/", "dir".
As a result, the "dir" object can not be shown and operated.
** Compatibility between S3 clients
Both new and old s3fs do not understand both "dir" and "dir/" at the same time, tntDrive and Galdinet are same as the s3fs.
If there are "dir/" and "dir" objects, the s3fs gives priority to "dir/".
But s3cmd and DragonDisk understand both objects.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@392 df820570-a93a-0410-bd06-b72b767a4274
2013-03-23 14:04:07 +00:00
2020-08-22 12:40:53 +00:00
if ( check_content_only ) {
break ;
}
}
S3FS_MALLOCTRIM ( 0 ) ;
Summary of Changes(1.63 -> 1.64)
* This new version was made for fixing big issue about directory object.
Please be careful and review new s3fs.
==========================
List of Changes
==========================
1) Fixed bugs
Fixed some memory leak and un-freed curl handle.
Fixed codes with a bug which is not found yet.
Fixed a bug that the s3fs could not update object's mtime when the s3fs had a opened file descriptor.
Please let us know a bug, when you find new bug of a memory leak.
2) Changed codes
Changed codes of s3fs_readdir() and list_bucket() etc.
Changed codes so that the get_realpath() function returned std::string.
Changed codes about exit() function. Because the exit() function is called from many fuse callback function directly, these function called fuse_exit() function and retuned with error.
Changed codes so that the case of the characters for the "x-amz-meta" response header is ignored.
3) Added a option
Added the norenameapi option for the storage compatible with S3 without copy API.
This option is subset of nocopyapi option.
Please read man page or call with --help option.
4) Object for directory
This is very big and important change.
The object of directory is changed "dir/" instead of "dir" for being compatible with other S3 client applications.
And this version understands the object of directory which is made by old version.
If the new s3fs changes the attributes or owner/group or mtime of the directory object, the s3fs automatically changes the object from old object name("dir") to new("dir/").
If you need to change old object name("dir") to new("dir/") manually, you can use shell script(mergedir.sh) in test directory.
* About the directory object name
AWS S3 allows the object name as both "dir" and "dir/".
The s3fs before this version understood only "dir" as directory object name, but old version did not understand the "dir/" object name.
The new version understands both of "dir" and "dir/" object name.
The s3fs user needs to be care for the special situation that I mentioned later.
The new version deletes old "dir" object and makes new "dir/" object, when the user operates the directory object for changing the permission or owner/group or mtime.
This operation does on background and automatically.
If you need to merge manually, you can use shell script which is mergedir.sh in test directory.
This script runs chmod/chown/touch commands after finding a directory.
Other S3 client application makes a directory object("dir/") without meta information which is needed to understand by the s3fs, this script can add meta information for a directory object.
If this script function is insufficient for you, you can read and modify the codes by yourself.
Please use the shell script carefully because of changing the object.
If you find a bug in this script, please let me know.
* Details
** The directory object made by old version
The directory object made by old version is not understood by other S3 client application.
New s3fs version was updated for keeping compatibility with other clients.
You can use the mergedir.sh in test directory for merging from old directory object("dir") to new("dir/").
The directory object name is changed from "dir" to "dir/" after the mergedir.sh is run, this changed "dir/" object is understood by other S3 clients.
This script runs chmod/chown/chgrp/touch/etc commands against the old directory object("dir"), then new s3fs merges that directory automatically.
If you need to change directory object from old to new manually, you can do it by running these commands which change the directory attributes(mode/owner/group/mtime).
** The directory object made by new version
The directory object name made by new version is "dir/".
Because the name includes "/", other S3 client applications understand it as the directory.
I tested new directory by s3cmd/tntDrive/DragonDisk/Gladinet as other S3 clients, the result was good compatibility.
You need to know that the compatibility has small problem by the difference in specifications between clients.
And you need to be careful about that the old s3fs can not understand the directory object which made by new s3fs.
You should change all s3fs which accesses same bucket.
** The directory object made by other S3 client application
Because the object is determined as a directory by the s3fs, the s3fs makes and uses special meta information which is "x-amz-meta-***" and "Content-Type" as HTTP header.
The s3fs sets and uses HTTP headers for the directory object, those headers are listed below.
Content-Type: application/x-directory
x-amz-meta-mode: <mode>
x-amz-meta-uid: <UID>
x-amz-meta-gid <GID>
x-amz-meta-mtime: <unix time of modified file>
Other S3 client application builds the directory object without attributes which is needed by the s3fs.
When the "ls" command is run on the s3fs-fuse file system which has directories/files made by other S3 clients, this result is shown below.
d--------- 1 root root 0 Feb 27 11:21 dir
---------- 1 root root 1024 Mar 14 02:15 file
Because the objects don't have meta information("x-amz-meta-mode"), it means mode=0000.
In this case, the directory object is shown only "d", because the s3fs determines the object as a directory when the object is the name with "/" or has "Content-type: application/x-directory" header.
(The s3fs sets "Content-Type: application/x-directory" to the directory object, but other S3 clients set "binary/octet-stream".)
In that result, nobody without root is allowed to operate the object.
The owner and group are "root"(UID=0) because the object doesn't have "x-amz-meta-uid/gid".
If the object doesn't have "x-amz-meta-mtime", the s3fs uses "Last-Modified" HTTP header.
Therefore the object's mtime is "Last-Modified" value.(This logic is same as old version)
It has been already explained, if you need to change the object attributes, you can do it by manually operation or mergedir.sh.
* Example of the compatibility with s3cmd etc
** Case A) Only "dir/file" object
One of case, there is only "dir/file" object without "dir/" object, that object is made by s3cmd or etc.
In this case, the response of REST API(list bucket) with "delimiter=/" parameter has "CommonPrefixes", and the "dir/" is listed in "CommonPrefixes/Prefix", but the "dir/" object is not real object.
The s3fs needs to determine this object as directory, however there is no real directory object("dir" or "dir/").
But both new s3fs and old one does NOT understand this "dir/" in "CommonPrefixes", because the s3fs fails to get meta information from "dir" or "dir/".
On this case, the result of "ls" command is shown below.
??????????? ? ? ? ? ? dir
This "dir" is not operated by anyone and any process, because the s3fs does not understand this object permission.
And "dir/file" object can not be shown and operated too.
Some other S3 clients(tntDrive/Gladinet/etc) can not understand this object as same as the s3fs.
If you need to operate "dir/file" object, you need to make the "dir/" object as a directory.
To make the "dir/" directory object, you need to do below.
Because there is already the "dir" object which is not real object, you can not make "dir/" directory.
(s3cmd does not make "dir/" object because the object name has "/".).
You should make another name directory(ex: "dir2/"), and move the "dir/file" objects to in new directory.
Last, you can rename the directory name from "dir2/" to "dir/".
** Case B) Both "dir" and "dir/file" object
This case is that there are "dir" and "dir/file" objects which were made by s3cmd/etc.
s3cmd and s3fs understand the "dir" object as normal(file) object because this object does not have meta information and a name with "/".
But the result of REST API(list bucket) has "dir/" name in "CommonPrefixes/Prefix".
The s3fs checks "dir/" and "dir" as a directory, but the "dir" object is not directory object.
(Because the new s3fs need to compatible old version, the s3fs checks a directory object in order of "dir/", "dir")
In this case, the result of "ls" command is shown below.
---------- 1 root root 0 Feb 27 02:48 dir
As a result, the "dir/file" can not be shown and operated because the "dir" object is a file.
If you determine the "dir" as a directory, you need to add mete information to the "dir" object by s3cmd.
** Case C) Both "dir" and "dir/" object
Last case is that there are "dir" and "dir/" objects which were made by other S3 clients.
(example: At first you upload a object "dir/" as a directory by new 3sfs, and you upload a object "dir" by s3cmd.)
New s3fs determines "dir/" as a directory, because the s3fs searches in oder of "dir/", "dir".
As a result, the "dir" object can not be shown and operated.
** Compatibility between S3 clients
Both new and old s3fs do not understand both "dir" and "dir/" at the same time, tntDrive and Galdinet are same as the s3fs.
If there are "dir/" and "dir" objects, the s3fs gives priority to "dir/".
But s3cmd and DragonDisk understand both objects.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@392 df820570-a93a-0410-bd06-b72b767a4274
2013-03-23 14:04:07 +00:00
2020-08-22 12:40:53 +00:00
return 0 ;
2011-07-02 02:11:54 +00:00
}
2020-08-22 12:40:53 +00:00
static int remote_mountpath_exists ( const char * path )
2013-07-05 02:28:31 +00:00
{
2020-08-22 12:40:53 +00:00
struct stat stbuf ;
2013-09-14 21:50:39 +00:00
2020-08-22 12:40:53 +00:00
S3FS_PRN_INFO1 ( " [path=%s] " , path ) ;
2011-07-02 02:11:54 +00:00
2020-08-22 12:40:53 +00:00
// getattr will prefix the path with the remote mountpoint
if ( 0 ! = get_object_attribute ( " / " , & stbuf , NULL ) ) {
return - 1 ;
2013-01-19 16:05:07 +00:00
}
2020-08-22 12:40:53 +00:00
if ( ! S_ISDIR ( stbuf . st_mode ) ) {
return - 1 ;
}
return 0 ;
2010-11-13 23:59:23 +00:00
}
2015-06-06 16:39:39 +00:00
static void free_xattrs ( xattrs_t & xattrs )
{
2020-08-22 12:40:53 +00:00
for ( xattrs_t : : iterator iter = xattrs . begin ( ) ; iter ! = xattrs . end ( ) ; + + iter ) {
delete iter - > second ;
}
xattrs . clear ( ) ;
2015-06-06 16:39:39 +00:00
}
2020-09-11 09:37:24 +00:00
static bool parse_xattr_keyval ( const std : : string & xattrpair , std : : string & key , PXATTRVAL & pval )
2015-04-20 17:24:57 +00:00
{
2020-08-22 12:40:53 +00:00
// parse key and value
size_t pos ;
2020-09-11 09:37:24 +00:00
std : : string tmpval ;
if ( std : : string : : npos = = ( pos = xattrpair . find_first_of ( ' : ' ) ) ) {
2020-08-22 12:40:53 +00:00
S3FS_PRN_ERR ( " one of xattr pair(%s) is wrong format. " , xattrpair . c_str ( ) ) ;
return false ;
}
key = xattrpair . substr ( 0 , pos ) ;
tmpval = xattrpair . substr ( pos + 1 ) ;
2015-04-20 17:24:57 +00:00
2020-08-22 12:40:53 +00:00
if ( ! takeout_str_dquart ( key ) | | ! takeout_str_dquart ( tmpval ) ) {
S3FS_PRN_ERR ( " one of xattr pair(%s) is wrong format. " , xattrpair . c_str ( ) ) ;
return false ;
}
2015-06-06 16:39:39 +00:00
2020-08-22 12:40:53 +00:00
pval = new XATTRVAL ;
pval - > length = 0 ;
pval - > pvalue = s3fs_decode64 ( tmpval . c_str ( ) , & pval - > length ) ;
2015-06-06 16:39:39 +00:00
2020-08-22 12:40:53 +00:00
return true ;
2015-04-20 17:24:57 +00:00
}
static size_t parse_xattrs ( const std : : string & strxattrs , xattrs_t & xattrs )
{
2020-08-22 12:40:53 +00:00
xattrs . clear ( ) ;
// decode
2020-09-11 09:37:24 +00:00
std : : string jsonxattrs = urlDecode ( strxattrs ) ;
2020-08-22 12:40:53 +00:00
// get from "{" to "}"
2020-09-11 09:37:24 +00:00
std : : string restxattrs ;
2020-08-22 12:40:53 +00:00
{
size_t startpos ;
2020-09-11 09:37:24 +00:00
size_t endpos = std : : string : : npos ;
if ( std : : string : : npos ! = ( startpos = jsonxattrs . find_first_of ( ' { ' ) ) ) {
2020-08-22 12:40:53 +00:00
endpos = jsonxattrs . find_last_of ( ' } ' ) ;
}
2020-09-11 09:37:24 +00:00
if ( startpos = = std : : string : : npos | | endpos = = std : : string : : npos | | endpos < = startpos ) {
2020-08-22 12:40:53 +00:00
S3FS_PRN_WARN ( " xattr header(%s) is not json format. " , jsonxattrs . c_str ( ) ) ;
return 0 ;
}
restxattrs = jsonxattrs . substr ( startpos + 1 , endpos - ( startpos + 1 ) ) ;
}
// parse each key:val
2020-09-11 09:37:24 +00:00
for ( size_t pair_nextpos = restxattrs . find_first_of ( ' , ' ) ; 0 < restxattrs . length ( ) ; restxattrs = ( pair_nextpos ! = std : : string : : npos ? restxattrs . substr ( pair_nextpos + 1 ) : std : : string ( " " ) ) , pair_nextpos = restxattrs . find_first_of ( ' , ' ) ) {
std : : string pair = pair_nextpos ! = std : : string : : npos ? restxattrs . substr ( 0 , pair_nextpos ) : restxattrs ;
std : : string key ;
2020-08-22 12:40:53 +00:00
PXATTRVAL pval = NULL ;
if ( ! parse_xattr_keyval ( pair , key , pval ) ) {
// something format error, so skip this.
continue ;
}
xattrs [ key ] = pval ;
}
return xattrs . size ( ) ;
2015-04-20 17:24:57 +00:00
}
static std : : string build_xattrs ( const xattrs_t & xattrs )
{
2020-09-11 09:37:24 +00:00
std : : string strxattrs ( " { " ) ;
2015-06-06 16:39:39 +00:00
2020-08-22 12:40:53 +00:00
bool is_set = false ;
for ( xattrs_t : : const_iterator iter = xattrs . begin ( ) ; iter ! = xattrs . end ( ) ; + + iter ) {
if ( is_set ) {
strxattrs + = ' , ' ;
} else {
is_set = true ;
}
strxattrs + = ' \" ' ;
strxattrs + = iter - > first ;
strxattrs + = " \" : \" " ;
if ( iter - > second ) {
char * base64val = s3fs_base64 ( ( iter - > second ) - > pvalue , ( iter - > second ) - > length ) ;
if ( base64val ) {
strxattrs + = base64val ;
delete [ ] base64val ;
}
}
strxattrs + = ' \" ' ;
2015-06-06 16:39:39 +00:00
}
2020-08-22 12:40:53 +00:00
strxattrs + = ' } ' ;
2015-04-20 17:24:57 +00:00
2020-08-22 12:40:53 +00:00
strxattrs = urlEncode ( strxattrs ) ;
2015-06-06 16:39:39 +00:00
2020-08-22 12:40:53 +00:00
return strxattrs ;
2015-04-20 17:24:57 +00:00
}
static int set_xattrs_to_header ( headers_t & meta , const char * name , const char * value , size_t size , int flags )
{
2020-09-11 09:37:24 +00:00
std : : string strxattrs ;
2020-08-22 12:40:53 +00:00
xattrs_t xattrs ;
2015-04-20 17:24:57 +00:00
2020-08-22 12:40:53 +00:00
headers_t : : iterator iter ;
if ( meta . end ( ) = = ( iter = meta . find ( " x-amz-meta-xattr " ) ) ) {
2018-03-02 20:58:41 +00:00
# if defined(XATTR_REPLACE)
2020-08-22 12:40:53 +00:00
if ( XATTR_REPLACE = = ( flags & XATTR_REPLACE ) ) {
// there is no xattr header but flags is replace, so failure.
return - ENOATTR ;
}
2018-03-02 20:58:41 +00:00
# endif
2020-08-22 12:40:53 +00:00
} else {
2018-03-02 20:58:41 +00:00
# if defined(XATTR_CREATE)
2020-08-22 12:40:53 +00:00
if ( XATTR_CREATE = = ( flags & XATTR_CREATE ) ) {
// found xattr header but flags is only creating, so failure.
return - EEXIST ;
}
2018-03-02 20:58:41 +00:00
# endif
2020-08-22 12:40:53 +00:00
strxattrs = iter - > second ;
}
2015-04-20 17:24:57 +00:00
2020-08-22 12:40:53 +00:00
// get map as xattrs_t
parse_xattrs ( strxattrs , xattrs ) ;
2015-04-20 17:24:57 +00:00
2020-08-22 12:40:53 +00:00
// add name(do not care overwrite and empty name/value)
xattrs_t : : iterator xiter ;
2020-09-11 09:37:24 +00:00
if ( xattrs . end ( ) ! = ( xiter = xattrs . find ( std : : string ( name ) ) ) ) {
2020-08-22 12:40:53 +00:00
// found same head. free value.
delete xiter - > second ;
}
2015-06-06 16:39:39 +00:00
2020-08-22 12:40:53 +00:00
PXATTRVAL pval = new XATTRVAL ;
pval - > length = size ;
if ( 0 < size ) {
pval - > pvalue = new unsigned char [ size ] ;
memcpy ( pval - > pvalue , value , size ) ;
} else {
pval - > pvalue = NULL ;
}
2020-09-11 09:37:24 +00:00
xattrs [ std : : string ( name ) ] = pval ;
2015-04-20 17:24:57 +00:00
2020-08-22 12:40:53 +00:00
// build new strxattrs(not encoded) and set it to headers_t
meta [ " x-amz-meta-xattr " ] = build_xattrs ( xattrs ) ;
2015-04-20 17:24:57 +00:00
2020-08-22 12:40:53 +00:00
free_xattrs ( xattrs ) ;
2015-06-06 16:39:39 +00:00
2020-08-22 12:40:53 +00:00
return 0 ;
2015-04-20 17:24:57 +00:00
}
2015-06-25 19:55:47 +00:00
# if defined(__APPLE__)
static int s3fs_setxattr ( const char * path , const char * name , const char * value , size_t size , int flags , uint32_t position )
# else
2015-04-20 17:24:57 +00:00
static int s3fs_setxattr ( const char * path , const char * name , const char * value , size_t size , int flags )
2015-06-25 19:55:47 +00:00
# endif
2015-04-20 17:24:57 +00:00
{
2020-08-22 12:40:53 +00:00
S3FS_PRN_INFO ( " [path=%s][name=%s][value=%p][size=%zu][flags=0x%x] " , path , name , value , size , flags ) ;
2015-06-06 16:39:39 +00:00
2020-08-22 12:40:53 +00:00
if ( ( value & & 0 = = size ) | | ( ! value & & 0 < size ) ) {
S3FS_PRN_ERR ( " Wrong parameter: value(%p), size(%zu) " , value , size ) ;
return 0 ;
}
2015-04-20 17:24:57 +00:00
2015-06-25 19:55:47 +00:00
# if defined(__APPLE__)
2020-08-22 12:40:53 +00:00
if ( position ! = 0 ) {
// No resource fork support
return - EINVAL ;
}
2015-06-25 19:55:47 +00:00
# endif
2020-08-22 12:40:53 +00:00
int result ;
2020-09-11 09:37:24 +00:00
std : : string strpath ;
std : : string newpath ;
std : : string nowcache ;
2020-08-22 12:40:53 +00:00
headers_t meta ;
struct stat stbuf ;
dirtype nDirType = DIRTYPE_UNKNOWN ;
if ( 0 = = strcmp ( path , " / " ) ) {
S3FS_PRN_ERR ( " Could not change mode for mount point. " ) ;
return - EIO ;
}
if ( 0 ! = ( result = check_parent_object_access ( path , X_OK ) ) ) {
return result ;
2020-08-02 13:37:06 +00:00
}
2020-08-22 12:40:53 +00:00
if ( 0 ! = ( result = check_object_owner ( path , & stbuf ) ) ) {
return result ;
2020-08-02 13:37:06 +00:00
}
2020-08-22 12:40:53 +00:00
if ( S_ISDIR ( stbuf . st_mode ) ) {
result = chk_dir_object_type ( path , newpath , strpath , nowcache , & meta , & nDirType ) ;
2020-08-02 13:37:06 +00:00
} else {
2020-08-22 12:40:53 +00:00
strpath = path ;
nowcache = strpath ;
result = get_object_attribute ( strpath . c_str ( ) , NULL , & meta ) ;
}
if ( 0 ! = result ) {
return result ;
2020-08-02 13:37:06 +00:00
}
2020-08-22 12:40:53 +00:00
if ( S_ISDIR ( stbuf . st_mode ) & & IS_REPLACEDIR ( nDirType ) ) {
// Should rebuild directory object(except new type)
// Need to remove old dir("dir" etc) and make new dir("dir/")
2020-08-02 13:37:06 +00:00
2020-08-22 12:40:53 +00:00
// At first, remove directory old object
if ( 0 ! = ( result = remove_old_type_dir ( strpath , nDirType ) ) ) {
return result ;
}
StatCache : : getStatCacheData ( ) - > DelStat ( nowcache ) ;
2020-08-02 13:37:06 +00:00
2020-08-22 12:40:53 +00:00
// Make new directory object("dir/")
if ( 0 ! = ( result = create_directory_object ( newpath . c_str ( ) , stbuf . st_mode , stbuf . st_mtime , stbuf . st_uid , stbuf . st_gid ) ) ) {
return result ;
}
// need to set xattr header for directory.
strpath = newpath ;
nowcache = strpath ;
2020-08-02 13:37:06 +00:00
}
2015-04-20 17:24:57 +00:00
2020-08-22 12:40:53 +00:00
// set xattr all object
headers_t updatemeta ;
updatemeta [ " x-amz-meta-ctime " ] = str ( time ( NULL ) ) ;
updatemeta [ " x-amz-copy-source " ] = urlEncode ( service_path + bucket + get_realpath ( strpath . c_str ( ) ) ) ;
updatemeta [ " x-amz-metadata-directive " ] = " REPLACE " ;
// check opened file handle.
//
// If the file starts uploading by multipart when the disk capacity is insufficient,
// we need to put these header after finishing upload.
// Or if the file is only open, we must update to FdEntity's internal meta.
//
2020-09-13 07:49:25 +00:00
AutoFdEntity autoent ;
FdEntity * ent ;
if ( NULL ! = ( ent = autoent . ExistOpen ( path , - 1 , true ) ) ) {
2020-08-22 12:40:53 +00:00
// the file is opened now.
// get xattr and make new xattr
2020-09-11 09:37:24 +00:00
std : : string strxattr ;
2020-08-22 12:40:53 +00:00
if ( ent - > GetXattr ( strxattr ) ) {
updatemeta [ " x-amz-meta-xattr " ] = strxattr ;
} else {
// [NOTE]
// Set an empty xattr.
// This requires the key to be present in order to add xattr.
ent - > SetXattr ( strxattr ) ;
}
if ( 0 ! = ( result = set_xattrs_to_header ( updatemeta , name , value , size , flags ) ) ) {
return result ;
}
if ( ent - > MergeOrgMeta ( updatemeta ) ) {
// now uploading
// the meta is pending and accumulated to be put after the upload is complete.
S3FS_PRN_INFO ( " meta pending until upload is complete " ) ;
} else {
// allow to put header
// updatemeta already merged the orgmeta of the opened files.
if ( 0 ! = put_headers ( strpath . c_str ( ) , updatemeta , true ) ) {
return - EIO ;
}
StatCache : : getStatCacheData ( ) - > DelStat ( nowcache ) ;
}
} else {
// not opened file, then put headers
merge_headers ( meta , updatemeta , true ) ;
// NOTICE: modify xattr from base meta
if ( 0 ! = ( result = set_xattrs_to_header ( meta , name , value , size , flags ) ) ) {
return result ;
}
if ( 0 ! = put_headers ( strpath . c_str ( ) , meta , true ) ) {
return - EIO ;
}
StatCache : : getStatCacheData ( ) - > DelStat ( nowcache ) ;
}
return 0 ;
2015-04-20 17:24:57 +00:00
}
2015-06-25 19:55:47 +00:00
# if defined(__APPLE__)
static int s3fs_getxattr ( const char * path , const char * name , char * value , size_t size , uint32_t position )
# else
2015-04-20 17:24:57 +00:00
static int s3fs_getxattr ( const char * path , const char * name , char * value , size_t size )
2015-06-25 19:55:47 +00:00
# endif
2015-04-20 17:24:57 +00:00
{
2020-08-22 12:40:53 +00:00
S3FS_PRN_INFO ( " [path=%s][name=%s][value=%p][size=%zu] " , path , name , value , size ) ;
2015-04-20 17:24:57 +00:00
2020-08-22 12:40:53 +00:00
if ( ! path | | ! name ) {
return - EIO ;
}
2015-04-20 17:24:57 +00:00
2016-01-11 08:39:17 +00:00
# if defined(__APPLE__)
2020-08-22 12:40:53 +00:00
if ( position ! = 0 ) {
// No resource fork support
return - EINVAL ;
}
2015-06-25 19:55:47 +00:00
# endif
2020-08-22 12:40:53 +00:00
int result ;
headers_t meta ;
xattrs_t xattrs ;
2015-04-20 17:24:57 +00:00
2020-08-22 12:40:53 +00:00
// check parent directory attribute.
if ( 0 ! = ( result = check_parent_object_access ( path , X_OK ) ) ) {
return result ;
}
2015-04-20 17:24:57 +00:00
2020-08-22 12:40:53 +00:00
// get headers
if ( 0 ! = ( result = get_object_attribute ( path , NULL , & meta ) ) ) {
return result ;
}
// get xattrs
headers_t : : iterator hiter = meta . find ( " x-amz-meta-xattr " ) ;
if ( meta . end ( ) = = hiter ) {
// object does not have xattrs
return - ENOATTR ;
}
2020-09-11 09:37:24 +00:00
std : : string strxattrs = hiter - > second ;
2020-08-22 12:40:53 +00:00
parse_xattrs ( strxattrs , xattrs ) ;
// search name
2020-09-11 09:37:24 +00:00
std : : string strname = name ;
xattrs_t : : iterator xiter = xattrs . find ( strname ) ;
2020-08-22 12:40:53 +00:00
if ( xattrs . end ( ) = = xiter ) {
// not found name in xattrs
free_xattrs ( xattrs ) ;
return - ENOATTR ;
}
// decode
size_t length = 0 ;
unsigned char * pvalue = NULL ;
if ( NULL ! = xiter - > second ) {
length = xiter - > second - > length ;
pvalue = xiter - > second - > pvalue ;
}
if ( 0 < size ) {
if ( static_cast < size_t > ( size ) < length ) {
// over buffer size
free_xattrs ( xattrs ) ;
return - ERANGE ;
}
if ( pvalue ) {
memcpy ( value , pvalue , length ) ;
}
}
2015-06-06 16:39:39 +00:00
free_xattrs ( xattrs ) ;
2020-08-22 12:40:53 +00:00
return static_cast < int > ( length ) ;
2015-04-20 17:24:57 +00:00
}
static int s3fs_listxattr ( const char * path , char * list , size_t size )
{
2020-08-22 12:40:53 +00:00
S3FS_PRN_INFO ( " [path=%s][list=%p][size=%zu] " , path , list , size ) ;
2015-04-20 17:24:57 +00:00
2020-08-22 12:40:53 +00:00
if ( ! path ) {
return - EIO ;
}
2015-04-20 17:24:57 +00:00
2020-08-22 12:40:53 +00:00
int result ;
headers_t meta ;
xattrs_t xattrs ;
2015-04-20 17:24:57 +00:00
2020-08-22 12:40:53 +00:00
// check parent directory attribute.
if ( 0 ! = ( result = check_parent_object_access ( path , X_OK ) ) ) {
return result ;
}
2015-04-20 17:24:57 +00:00
2020-08-22 12:40:53 +00:00
// get headers
if ( 0 ! = ( result = get_object_attribute ( path , NULL , & meta ) ) ) {
return result ;
}
2015-04-20 17:24:57 +00:00
2020-08-22 12:40:53 +00:00
// get xattrs
headers_t : : iterator iter ;
if ( meta . end ( ) = = ( iter = meta . find ( " x-amz-meta-xattr " ) ) ) {
// object does not have xattrs
return 0 ;
}
2020-09-11 09:37:24 +00:00
std : : string strxattrs = iter - > second ;
2015-06-06 16:39:39 +00:00
2020-08-22 12:40:53 +00:00
parse_xattrs ( strxattrs , xattrs ) ;
2015-04-20 17:24:57 +00:00
2020-08-22 12:40:53 +00:00
// calculate total name length
size_t total = 0 ;
for ( xattrs_t : : const_iterator xiter = xattrs . begin ( ) ; xiter ! = xattrs . end ( ) ; + + xiter ) {
if ( 0 < xiter - > first . length ( ) ) {
total + = xiter - > first . length ( ) + 1 ;
}
2015-04-21 16:18:05 +00:00
}
2015-06-06 16:39:39 +00:00
2020-08-22 12:40:53 +00:00
if ( 0 = = total ) {
free_xattrs ( xattrs ) ;
return 0 ;
}
2015-04-20 17:24:57 +00:00
2020-08-22 12:40:53 +00:00
// check parameters
if ( 0 = = size ) {
free_xattrs ( xattrs ) ;
return total ;
}
if ( ! list | | size < total ) {
free_xattrs ( xattrs ) ;
return - ERANGE ;
}
2015-04-20 17:24:57 +00:00
2020-08-22 12:40:53 +00:00
// copy to list
char * setpos = list ;
for ( xattrs_t : : const_iterator xiter = xattrs . begin ( ) ; xiter ! = xattrs . end ( ) ; + + xiter ) {
if ( 0 < xiter - > first . length ( ) ) {
strcpy ( setpos , xiter - > first . c_str ( ) ) ;
setpos = & setpos [ strlen ( setpos ) + 1 ] ;
}
2015-04-21 16:18:05 +00:00
}
2020-08-22 12:40:53 +00:00
free_xattrs ( xattrs ) ;
2015-06-06 16:39:39 +00:00
2020-08-22 12:40:53 +00:00
return total ;
2015-04-20 17:24:57 +00:00
}
static int s3fs_removexattr ( const char * path , const char * name )
{
2020-08-22 12:40:53 +00:00
S3FS_PRN_INFO ( " [path=%s][name=%s] " , path , name ) ;
if ( ! path | | ! name ) {
2020-08-02 13:37:06 +00:00
return - EIO ;
}
2020-08-22 12:40:53 +00:00
int result ;
2020-09-11 09:37:24 +00:00
std : : string strpath ;
std : : string newpath ;
std : : string nowcache ;
2020-08-22 12:40:53 +00:00
headers_t meta ;
xattrs_t xattrs ;
struct stat stbuf ;
dirtype nDirType = DIRTYPE_UNKNOWN ;
if ( 0 = = strcmp ( path , " / " ) ) {
S3FS_PRN_ERR ( " Could not change mode for mount point. " ) ;
return - EIO ;
2020-08-02 13:37:06 +00:00
}
2020-08-22 12:40:53 +00:00
if ( 0 ! = ( result = check_parent_object_access ( path , X_OK ) ) ) {
return result ;
2020-08-02 13:37:06 +00:00
}
2020-08-22 12:40:53 +00:00
if ( 0 ! = ( result = check_object_owner ( path , & stbuf ) ) ) {
return result ;
2015-12-15 23:20:40 +00:00
}
2013-01-19 16:05:07 +00:00
2020-08-22 12:40:53 +00:00
if ( S_ISDIR ( stbuf . st_mode ) ) {
result = chk_dir_object_type ( path , newpath , strpath , nowcache , & meta , & nDirType ) ;
} else {
strpath = path ;
nowcache = strpath ;
result = get_object_attribute ( strpath . c_str ( ) , NULL , & meta ) ;
2016-05-06 04:37:32 +00:00
}
2020-08-22 12:40:53 +00:00
if ( 0 ! = result ) {
return result ;
2016-06-13 07:27:04 +00:00
}
2015-02-24 13:17:59 +00:00
2020-08-22 12:40:53 +00:00
// get xattrs
headers_t : : iterator hiter = meta . find ( " x-amz-meta-xattr " ) ;
if ( meta . end ( ) = = hiter ) {
// object does not have xattrs
return - ENOATTR ;
2017-05-13 07:35:55 +00:00
}
2020-09-11 09:37:24 +00:00
std : : string strxattrs = hiter - > second ;
2016-02-06 18:59:13 +00:00
2020-08-22 12:40:53 +00:00
parse_xattrs ( strxattrs , xattrs ) ;
2018-09-11 05:21:51 +00:00
2020-08-22 12:40:53 +00:00
// check name xattrs
2020-09-11 09:37:24 +00:00
std : : string strname = name ;
xattrs_t : : iterator xiter = xattrs . find ( strname ) ;
2020-08-22 12:40:53 +00:00
if ( xattrs . end ( ) = = xiter ) {
free_xattrs ( xattrs ) ;
return - ENOATTR ;
}
2020-06-28 08:00:41 +00:00
2020-08-22 12:40:53 +00:00
// make new header_t after deleting name xattr
delete xiter - > second ;
xattrs . erase ( xiter ) ;
2010-11-13 23:59:23 +00:00
2020-08-22 12:40:53 +00:00
if ( S_ISDIR ( stbuf . st_mode ) & & IS_REPLACEDIR ( nDirType ) ) {
// Should rebuild directory object(except new type)
// Need to remove old dir("dir" etc) and make new dir("dir/")
2013-01-19 16:05:07 +00:00
2020-08-22 12:40:53 +00:00
// At first, remove directory old object
if ( 0 ! = ( result = remove_old_type_dir ( strpath , nDirType ) ) ) {
return result ;
}
StatCache : : getStatCacheData ( ) - > DelStat ( nowcache ) ;
2020-06-28 08:00:41 +00:00
2020-08-22 12:40:53 +00:00
// Make new directory object("dir/")
if ( 0 ! = ( result = create_directory_object ( newpath . c_str ( ) , stbuf . st_mode , stbuf . st_mtime , stbuf . st_uid , stbuf . st_gid ) ) ) {
free_xattrs ( xattrs ) ;
return result ;
}
2010-11-13 23:59:23 +00:00
2020-08-22 12:40:53 +00:00
// need to set xattr header for directory.
strpath = newpath ;
nowcache = strpath ;
}
2010-11-13 23:59:23 +00:00
2020-08-22 12:40:53 +00:00
// set xattr all object
headers_t updatemeta ;
updatemeta [ " x-amz-copy-source " ] = urlEncode ( service_path + bucket + get_realpath ( strpath . c_str ( ) ) ) ;
updatemeta [ " x-amz-metadata-directive " ] = " REPLACE " ;
if ( ! xattrs . empty ( ) ) {
updatemeta [ " x-amz-meta-xattr " ] = build_xattrs ( xattrs ) ;
} else {
2020-09-11 09:37:24 +00:00
updatemeta [ " x-amz-meta-xattr " ] = std : : string ( " " ) ; // This is a special case. If empty, this header will eventually be removed.
2020-08-22 12:40:53 +00:00
}
free_xattrs ( xattrs ) ;
2013-11-11 13:45:35 +00:00
2020-08-22 12:40:53 +00:00
// check opened file handle.
//
// If the file starts uploading by multipart when the disk capacity is insufficient,
// we need to put these header after finishing upload.
// Or if the file is only open, we must update to FdEntity's internal meta.
//
2020-09-13 07:49:25 +00:00
AutoFdEntity autoent ;
FdEntity * ent ;
if ( NULL ! = ( ent = autoent . ExistOpen ( path , - 1 , true ) ) ) {
2020-08-22 12:40:53 +00:00
// the file is opened now.
if ( ent - > MergeOrgMeta ( updatemeta ) ) {
// now uploading
// the meta is pending and accumulated to be put after the upload is complete.
S3FS_PRN_INFO ( " meta pending until upload is complete " ) ;
} else {
// allow to put header
// updatemeta already merged the orgmeta of the opened files.
if ( updatemeta [ " x-amz-meta-xattr " ] . empty ( ) ) {
updatemeta . erase ( " x-amz-meta-xattr " ) ;
}
if ( 0 ! = put_headers ( strpath . c_str ( ) , updatemeta , true ) ) {
return - EIO ;
}
StatCache : : getStatCacheData ( ) - > DelStat ( nowcache ) ;
}
} else {
// not opened file, then put headers
if ( updatemeta [ " x-amz-meta-xattr " ] . empty ( ) ) {
updatemeta . erase ( " x-amz-meta-xattr " ) ;
}
merge_headers ( meta , updatemeta , true ) ;
if ( 0 ! = put_headers ( strpath . c_str ( ) , meta , true ) ) {
return - EIO ;
}
StatCache : : getStatCacheData ( ) - > DelStat ( nowcache ) ;
}
2013-11-11 13:45:35 +00:00
2020-08-22 12:40:53 +00:00
return 0 ;
2013-11-11 13:45:35 +00:00
}
2020-08-22 12:40:53 +00:00
// s3fs_init calls this function to exit cleanly from the fuse event loop.
//
// There's no way to pass an exit status to the high-level event loop API, so
// this function stores the exit value in a global for main()
static void s3fs_exit_fuseloop ( int exit_status )
2013-11-11 13:45:35 +00:00
{
2020-08-22 12:40:53 +00:00
S3FS_PRN_ERR ( " Exiting FUSE event loop due to errors \n " ) ;
s3fs_init_deferred_exit_status = exit_status ;
struct fuse_context * ctx = fuse_get_context ( ) ;
if ( NULL ! = ctx ) {
fuse_exit ( ctx - > fuse ) ;
}
2013-11-11 13:45:35 +00:00
}
2020-08-22 12:40:53 +00:00
static void * s3fs_init ( struct fuse_conn_info * conn )
2013-11-11 13:45:35 +00:00
{
2020-08-22 12:40:53 +00:00
S3FS_PRN_INIT_INFO ( " init v%s(commit:%s) with %s " , VERSION , COMMIT_HASH_VAL , s3fs_crypt_lib_name ( ) ) ;
// cache(remove cache dirs at first)
if ( is_remove_cache & & ( ! CacheFileStat : : DeleteCacheFileStatDirectory ( ) | | ! FdManager : : DeleteCacheDirectory ( ) ) ) {
S3FS_PRN_DBG ( " Could not initialize cache directory. " ) ;
}
// check loading IAM role name
if ( load_iamrole ) {
// load IAM role name from http://169.254.169.254/latest/meta-data/iam/security-credentials
//
S3fsCurl s3fscurl ;
if ( ! s3fscurl . LoadIAMRoleFromMetaData ( ) ) {
S3FS_PRN_CRIT ( " could not load IAM role name from meta data. " ) ;
s3fs_exit_fuseloop ( EXIT_FAILURE ) ;
return NULL ;
2019-02-03 14:22:16 +00:00
}
2020-08-22 12:40:53 +00:00
S3FS_PRN_INFO ( " loaded IAM role name = %s " , S3fsCurl : : GetIAMRole ( ) ) ;
2019-02-03 14:22:16 +00:00
}
2020-08-22 12:40:53 +00:00
if ( create_bucket ) {
int result = do_create_bucket ( ) ;
if ( result ! = 0 ) {
s3fs_exit_fuseloop ( result ) ;
return NULL ;
}
2013-11-11 13:45:35 +00:00
}
2020-08-22 12:40:53 +00:00
// Check Bucket
{
int result ;
if ( EXIT_SUCCESS ! = ( result = s3fs_check_service ( ) ) ) {
s3fs_exit_fuseloop ( result ) ;
return NULL ;
}
2013-11-11 13:45:35 +00:00
}
2020-08-22 12:40:53 +00:00
// Investigate system capabilities
# ifndef __APPLE__
if ( ( unsigned int ) conn - > capable & FUSE_CAP_ATOMIC_O_TRUNC ) {
conn - > want | = FUSE_CAP_ATOMIC_O_TRUNC ;
2013-11-11 13:45:35 +00:00
}
2020-08-22 12:40:53 +00:00
# endif
2013-11-11 13:45:35 +00:00
2020-08-22 12:40:53 +00:00
if ( ( unsigned int ) conn - > capable & FUSE_CAP_BIG_WRITES ) {
conn - > want | = FUSE_CAP_BIG_WRITES ;
2013-11-11 13:45:35 +00:00
}
2020-08-22 12:40:53 +00:00
// Signal object
2020-09-13 09:07:43 +00:00
if ( ! S3fsSignals : : Initialize ( ) ) {
2020-08-22 12:40:53 +00:00
S3FS_PRN_ERR ( " Failed to initialize signal object, but continue... " ) ;
}
2013-11-11 13:45:35 +00:00
2020-08-22 12:40:53 +00:00
return NULL ;
2013-11-11 13:45:35 +00:00
}
2020-08-22 12:40:53 +00:00
static void s3fs_destroy ( void * )
2013-08-27 08:12:01 +00:00
{
2020-08-22 12:40:53 +00:00
S3FS_PRN_INFO ( " destroy " ) ;
2013-11-11 13:45:35 +00:00
2020-08-22 12:40:53 +00:00
// Signal object
2020-09-13 09:07:43 +00:00
if ( ! S3fsSignals : : Destroy ( ) ) {
2020-08-22 12:40:53 +00:00
S3FS_PRN_WARN ( " Failed to clean up signal object. " ) ;
2013-11-11 13:45:35 +00:00
}
2013-08-27 08:12:01 +00:00
2020-08-22 12:40:53 +00:00
// cache(remove at last)
if ( is_remove_cache & & ( ! CacheFileStat : : DeleteCacheFileStatDirectory ( ) | | ! FdManager : : DeleteCacheDirectory ( ) ) ) {
S3FS_PRN_WARN ( " Could not remove cache directory. " ) ;
}
}
2014-06-01 03:54:02 +00:00
2020-08-22 12:40:53 +00:00
static int s3fs_access ( const char * path , int mask )
{
S3FS_PRN_INFO ( " [path=%s][mask=%s%s%s%s] " , path ,
( ( mask & R_OK ) = = R_OK ) ? " R_OK " : " " ,
( ( mask & W_OK ) = = W_OK ) ? " W_OK " : " " ,
( ( mask & X_OK ) = = X_OK ) ? " X_OK " : " " ,
( mask = = F_OK ) ? " F_OK " : " " ) ;
int result = check_object_access ( path , mask , NULL ) ;
S3FS_MALLOCTRIM ( 0 ) ;
return result ;
2013-08-27 08:12:01 +00:00
}
2015-02-02 16:36:08 +00:00
//
2014-04-05 05:11:55 +00:00
// If calling with wrong region, s3fs gets following error body as 400 error code.
2019-01-31 02:29:34 +00:00
// "<Error>
// <Code>AuthorizationHeaderMalformed</Code>
// <Message>The authorization header is malformed; the region 'us-east-1' is wrong; expecting 'ap-northeast-1'</Message>
// <Region>ap-northeast-1</Region>
// <RequestId>...</RequestId>
// <HostId>...</HostId>
2015-02-02 16:36:08 +00:00
// </Error>"
//
2019-01-31 02:29:34 +00:00
// So this is cheap code but s3fs should get correct region automatically.
2015-02-02 16:36:08 +00:00
//
2020-09-11 09:37:24 +00:00
static bool check_region_error ( const char * pbody , size_t len , std : : string & expectregion )
2015-02-02 16:36:08 +00:00
{
2020-08-22 12:40:53 +00:00
if ( ! pbody ) {
return false ;
}
2019-01-31 02:29:34 +00:00
2020-08-22 12:40:53 +00:00
std : : string code ;
if ( ! simple_parse_xml ( pbody , len , " Code " , code ) | | code ! = " AuthorizationHeaderMalformed " ) {
return false ;
}
2019-01-31 02:29:34 +00:00
2020-08-22 12:40:53 +00:00
if ( ! simple_parse_xml ( pbody , len , " Region " , expectregion ) ) {
return false ;
}
2015-02-02 16:36:08 +00:00
2020-08-22 12:40:53 +00:00
return true ;
2015-02-02 16:36:08 +00:00
}
2019-01-23 23:44:50 +00:00
static int s3fs_check_service ( )
2013-07-05 02:28:31 +00:00
{
2020-08-22 12:40:53 +00:00
S3FS_PRN_INFO ( " check services. " ) ;
2015-06-20 04:34:32 +00:00
2020-08-22 12:40:53 +00:00
// At first time for access S3, we check IAM role if it sets.
if ( ! S3fsCurl : : CheckIAMCredentialUpdate ( ) ) {
S3FS_PRN_CRIT ( " Failed to check IAM role name(%s). " , S3fsCurl : : GetIAMRole ( ) ) ;
return EXIT_FAILURE ;
}
2019-01-23 13:23:03 +00:00
2020-08-22 12:40:53 +00:00
S3fsCurl s3fscurl ;
int res ;
if ( 0 > ( res = s3fscurl . CheckBucket ( ) ) ) {
// get response code
long responseCode = s3fscurl . GetLastResponseCode ( ) ;
// check wrong endpoint, and automatically switch endpoint
if ( 300 < = responseCode & & responseCode < 500 ) {
// check region error(for putting message or retrying)
BodyData * body = s3fscurl . GetBodyData ( ) ;
2020-09-11 09:37:24 +00:00
std : : string expectregion ;
2020-08-22 12:40:53 +00:00
if ( check_region_error ( body - > str ( ) , body - > size ( ) , expectregion ) ) {
// [NOTE]
// If endpoint is not specified(using us-east-1 region) and
// an error is encountered accessing a different region, we
// will retry the check on the expected region.
// see) https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro
//
if ( is_specified_endpoint ) {
const char * tmp_expect_ep = expectregion . c_str ( ) ;
S3FS_PRN_CRIT ( " The bucket region is not '%s', it is correctly '%s'. You should specify 'endpoint=%s' option. " , endpoint . c_str ( ) , tmp_expect_ep , tmp_expect_ep ) ;
} else {
// current endpoint is wrong, so try to connect to expected region.
S3FS_PRN_CRIT ( " Failed to connect region '%s'(default), so retry to connect region '%s'. " , endpoint . c_str ( ) , expectregion . c_str ( ) ) ;
endpoint = expectregion ;
if ( S3fsCurl : : IsSignatureV4 ( ) ) {
if ( s3host = = " http://s3.amazonaws.com " ) {
s3host = " http://s3- " + endpoint + " .amazonaws.com " ;
} else if ( s3host = = " https://s3.amazonaws.com " ) {
s3host = " https://s3- " + endpoint + " .amazonaws.com " ;
}
}
// retry to check with new endpoint
s3fscurl . DestroyCurlHandle ( ) ;
res = s3fscurl . CheckBucket ( ) ;
responseCode = s3fscurl . GetLastResponseCode ( ) ;
}
}
2019-01-23 13:23:03 +00:00
}
2015-02-02 16:36:08 +00:00
2020-08-22 12:40:53 +00:00
// try signature v2
if ( 0 > res & & ( responseCode = = 400 | | responseCode = = 403 ) & & S3fsCurl : : IsSignatureV4 ( ) ) {
// switch sigv2
S3FS_PRN_CRIT ( " Failed to connect by sigv4, so retry to connect by signature version 2. " ) ;
S3fsCurl : : SetSignatureV4 ( false ) ;
2015-02-02 16:36:08 +00:00
2020-08-22 12:40:53 +00:00
// retry to check with sigv2
s3fscurl . DestroyCurlHandle ( ) ;
res = s3fscurl . CheckBucket ( ) ;
responseCode = s3fscurl . GetLastResponseCode ( ) ;
}
2015-06-20 04:34:32 +00:00
2020-08-22 12:40:53 +00:00
// check errors(after retrying)
if ( 0 > res & & responseCode ! = 200 & & responseCode ! = 301 ) {
if ( responseCode = = 400 ) {
S3FS_PRN_CRIT ( " Bad Request(host=%s) - result of checking service. " , s3host . c_str ( ) ) ;
2017-05-13 06:47:51 +00:00
2020-08-22 12:40:53 +00:00
} else if ( responseCode = = 403 ) {
S3FS_PRN_CRIT ( " invalid credentials(host=%s) - result of checking service. " , s3host . c_str ( ) ) ;
2017-05-13 06:47:51 +00:00
2020-08-22 12:40:53 +00:00
} else if ( responseCode = = 404 ) {
S3FS_PRN_CRIT ( " bucket not found(host=%s) - result of checking service. " , s3host . c_str ( ) ) ;
2017-05-13 06:47:51 +00:00
2020-08-22 12:40:53 +00:00
} else {
// another error
S3FS_PRN_CRIT ( " unable to connect(host=%s) - result of checking service. " , s3host . c_str ( ) ) ;
}
return EXIT_FAILURE ;
}
2015-02-02 16:36:08 +00:00
}
2020-08-22 12:40:53 +00:00
s3fscurl . DestroyCurlHandle ( ) ;
2010-11-26 22:11:48 +00:00
2020-08-22 12:40:53 +00:00
// make sure remote mountpath exists and is a directory
if ( ! mount_prefix . empty ( ) ) {
if ( remote_mountpath_exists ( mount_prefix . c_str ( ) ) ! = 0 ) {
S3FS_PRN_CRIT ( " remote mountpath %s not found. " , mount_prefix . c_str ( ) ) ;
return EXIT_FAILURE ;
}
2010-12-17 04:40:15 +00:00
}
2020-08-22 12:40:53 +00:00
S3FS_MALLOCTRIM ( 0 ) ;
2013-09-14 21:50:39 +00:00
2020-08-22 12:40:53 +00:00
return EXIT_SUCCESS ;
2010-11-20 17:55:15 +00:00
}
2018-02-25 13:08:41 +00:00
//
// Read and Parse passwd file
//
// The line of the password file is one of the following formats:
// (1) "accesskey:secretkey" : AWS format for default(all) access key/secret key
// (2) "bucket:accesskey:secretkey" : AWS format for bucket's access key/secret key
// (3) "key=value" : Content-dependent KeyValue contents
//
// This function sets result into bucketkvmap_t, it bucket name and key&value mapping.
// If bucket name is empty(1 or 3 format), bucket name for mapping is set "\t" or "".
//
2017-11-19 11:38:12 +00:00
// Return: 1 - OK(could parse and set mapping etc.)
// 0 - NG(could not read any value)
2014-04-05 05:11:55 +00:00
// -1 - Should shutdown immediately
2018-02-25 13:08:41 +00:00
//
2017-11-19 11:38:12 +00:00
static int parse_passwd_file ( bucketkvmap_t & resmap )
2013-07-05 02:28:31 +00:00
{
2020-09-11 09:37:24 +00:00
std : : string line ;
2020-08-22 12:40:53 +00:00
size_t first_pos ;
readline_t linelist ;
readline_t : : iterator iter ;
// open passwd file
2020-09-11 09:37:24 +00:00
std : : ifstream PF ( passwd_file . c_str ( ) ) ;
2020-08-22 12:40:53 +00:00
if ( ! PF . good ( ) ) {
S3FS_PRN_EXIT ( " could not open passwd file : %s " , passwd_file . c_str ( ) ) ;
return - 1 ;
}
// read each line
while ( getline ( PF , line ) ) {
line = trim ( line ) ;
if ( line . empty ( ) ) {
continue ;
}
if ( ' # ' = = line [ 0 ] ) {
continue ;
}
2020-09-11 09:37:24 +00:00
if ( std : : string : : npos ! = line . find_first_of ( " \t " ) ) {
2020-08-22 12:40:53 +00:00
S3FS_PRN_EXIT ( " invalid line in passwd file, found whitespace character. " ) ;
return - 1 ;
}
if ( ' [ ' = = line [ 0 ] ) {
S3FS_PRN_EXIT ( " invalid line in passwd file, found a bracket \" [ \" character. " ) ;
return - 1 ;
}
linelist . push_back ( line ) ;
}
// read '=' type
kvmap_t kv ;
for ( iter = linelist . begin ( ) ; iter ! = linelist . end ( ) ; + + iter ) {
first_pos = iter - > find_first_of ( " = " ) ;
2020-09-11 09:37:24 +00:00
if ( first_pos = = std : : string : : npos ) {
2020-08-22 12:40:53 +00:00
continue ;
}
// formatted by "key=val"
2020-09-11 09:37:24 +00:00
std : : string key = trim ( iter - > substr ( 0 , first_pos ) ) ;
std : : string val = trim ( iter - > substr ( first_pos + 1 , std : : string : : npos ) ) ;
2020-08-22 12:40:53 +00:00
if ( key . empty ( ) ) {
continue ;
}
if ( kv . end ( ) ! = kv . find ( key ) ) {
S3FS_PRN_WARN ( " same key name(%s) found in passwd file, skip this. " , key . c_str ( ) ) ;
continue ;
}
kv [ key ] = val ;
}
// set special key name
2020-09-11 09:37:24 +00:00
resmap [ std : : string ( keyval_fields_type ) ] = kv ;
2020-08-22 12:40:53 +00:00
// read ':' type
for ( iter = linelist . begin ( ) ; iter ! = linelist . end ( ) ; + + iter ) {
first_pos = iter - > find_first_of ( " : " ) ;
size_t last_pos = iter - > find_last_of ( " : " ) ;
2020-09-11 09:37:24 +00:00
if ( first_pos = = std : : string : : npos ) {
2020-08-22 12:40:53 +00:00
continue ;
}
2020-09-11 09:37:24 +00:00
std : : string bucketname ;
std : : string accesskey ;
std : : string secret ;
2020-08-22 12:40:53 +00:00
if ( first_pos ! = last_pos ) {
// formatted by "bucket:accesskey:secretkey"
bucketname = trim ( iter - > substr ( 0 , first_pos ) ) ;
accesskey = trim ( iter - > substr ( first_pos + 1 , last_pos - first_pos - 1 ) ) ;
2020-09-11 09:37:24 +00:00
secret = trim ( iter - > substr ( last_pos + 1 , std : : string : : npos ) ) ;
2020-08-22 12:40:53 +00:00
} else {
// formatted by "accesskey:secretkey"
bucketname = allbucket_fields_type ;
accesskey = trim ( iter - > substr ( 0 , first_pos ) ) ;
2020-09-11 09:37:24 +00:00
secret = trim ( iter - > substr ( first_pos + 1 , std : : string : : npos ) ) ;
2020-08-22 12:40:53 +00:00
}
if ( resmap . end ( ) ! = resmap . find ( bucketname ) ) {
S3FS_PRN_EXIT ( " there are multiple entries for the same bucket(%s) in the passwd file. " , ( bucketname . empty ( ) ? " default " : bucketname . c_str ( ) ) ) ;
return - 1 ;
}
kv . clear ( ) ;
2020-09-11 09:37:24 +00:00
kv [ std : : string ( aws_accesskeyid ) ] = accesskey ;
kv [ std : : string ( aws_secretkey ) ] = secret ;
resmap [ bucketname ] = kv ;
2020-08-22 12:40:53 +00:00
}
return ( resmap . empty ( ) ? 0 : 1 ) ;
2018-02-25 13:08:41 +00:00
}
//
2017-11-19 11:38:12 +00:00
// Return: 1 - OK(could read and set accesskey etc.)
// 0 - NG(could not read)
// -1 - Should shutdown immediately
2018-02-25 13:08:41 +00:00
//
2017-11-19 11:38:12 +00:00
static int check_for_aws_format ( const kvmap_t & kvmap )
{
2020-09-11 09:37:24 +00:00
std : : string str1 ( aws_accesskeyid ) ;
std : : string str2 ( aws_secretkey ) ;
2017-11-19 11:38:12 +00:00
2020-08-22 12:40:53 +00:00
if ( kvmap . empty ( ) ) {
return 0 ;
}
kvmap_t : : const_iterator str1_it = kvmap . find ( str1 ) ;
kvmap_t : : const_iterator str2_it = kvmap . find ( str2 ) ;
if ( kvmap . end ( ) = = str1_it & & kvmap . end ( ) = = str2_it ) {
return 0 ;
}
if ( kvmap . end ( ) = = str1_it | | kvmap . end ( ) = = str2_it ) {
S3FS_PRN_EXIT ( " AWSAccesskey or AWSSecretkey is not specified. " ) ;
return - 1 ;
}
if ( ! S3fsCurl : : SetAccessKey ( str1_it - > second . c_str ( ) , str2_it - > second . c_str ( ) ) ) {
S3FS_PRN_EXIT ( " failed to set access key/secret key. " ) ;
return - 1 ;
}
return 1 ;
2011-04-06 15:06:28 +00:00
}
2013-03-30 13:37:14 +00:00
//
2010-11-13 23:59:23 +00:00
// check_passwd_file_perms
//
// expect that global passwd_file variable contains
// a non-empty value and is readable by the current user
//
// Check for too permissive access to the file
// help save users from themselves via a security hole
//
// only two options: return or error out
2013-03-30 13:37:14 +00:00
//
2019-01-23 23:44:50 +00:00
static int check_passwd_file_perms ( )
2013-07-05 02:28:31 +00:00
{
2020-08-22 12:40:53 +00:00
struct stat info ;
2010-11-13 23:59:23 +00:00
2020-08-22 12:40:53 +00:00
// let's get the file info
if ( stat ( passwd_file . c_str ( ) , & info ) ! = 0 ) {
S3FS_PRN_EXIT ( " unexpected error from stat(%s). " , passwd_file . c_str ( ) ) ;
return EXIT_FAILURE ;
}
2010-11-13 23:59:23 +00:00
2020-08-22 12:40:53 +00:00
// return error if any file has others permissions
if ( ( info . st_mode & S_IROTH ) | |
( info . st_mode & S_IWOTH ) | |
( info . st_mode & S_IXOTH ) ) {
S3FS_PRN_EXIT ( " credentials file %s should not have others permissions. " , passwd_file . c_str ( ) ) ;
return EXIT_FAILURE ;
2010-11-13 23:59:23 +00:00
}
2020-08-22 12:40:53 +00:00
// Any local file should not have any group permissions
// /etc/passwd-s3fs can have group permissions
if ( passwd_file ! = " /etc/passwd-s3fs " ) {
if ( ( info . st_mode & S_IRGRP ) | |
( info . st_mode & S_IWGRP ) | |
( info . st_mode & S_IXGRP ) ) {
S3FS_PRN_EXIT ( " credentials file %s should not have group permissions. " , passwd_file . c_str ( ) ) ;
return EXIT_FAILURE ;
}
} else {
// "/etc/passwd-s3fs" does not allow group write.
if ( ( info . st_mode & S_IWGRP ) ) {
S3FS_PRN_EXIT ( " credentials file %s should not have group writable permissions. " , passwd_file . c_str ( ) ) ;
return EXIT_FAILURE ;
}
2013-05-08 07:51:22 +00:00
}
2020-08-22 12:40:53 +00:00
if ( ( info . st_mode & S_IXUSR ) | | ( info . st_mode & S_IXGRP ) ) {
S3FS_PRN_EXIT ( " credentials file %s should not have executable permissions. " , passwd_file . c_str ( ) ) ;
return EXIT_FAILURE ;
}
return EXIT_SUCCESS ;
2010-11-13 23:59:23 +00:00
}
2018-11-04 19:41:49 +00:00
static int read_aws_credentials_file ( const std : : string & filename )
{
2020-08-22 12:40:53 +00:00
// open passwd file
2020-09-11 09:37:24 +00:00
std : : ifstream PF ( filename . c_str ( ) ) ;
2020-08-22 12:40:53 +00:00
if ( ! PF . good ( ) ) {
return - 1 ;
2018-11-04 19:41:49 +00:00
}
2020-09-11 09:37:24 +00:00
std : : string profile ;
std : : string accesskey ;
std : : string secret ;
std : : string session_token ;
2020-08-22 12:40:53 +00:00
// read each line
2020-09-11 09:37:24 +00:00
std : : string line ;
2020-08-22 12:40:53 +00:00
while ( getline ( PF , line ) ) {
line = trim ( line ) ;
if ( line . empty ( ) ) {
continue ;
}
if ( ' # ' = = line [ 0 ] ) {
continue ;
}
if ( line . size ( ) > 2 & & line [ 0 ] = = ' [ ' & & line [ line . size ( ) - 1 ] = = ' ] ' ) {
if ( profile = = aws_profile ) {
break ;
}
profile = line . substr ( 1 , line . size ( ) - 2 ) ;
accesskey . clear ( ) ;
secret . clear ( ) ;
session_token . clear ( ) ;
}
size_t pos = line . find_first_of ( ' = ' ) ;
2020-09-11 09:37:24 +00:00
if ( pos = = std : : string : : npos ) {
2020-08-22 12:40:53 +00:00
continue ;
}
2020-09-11 09:37:24 +00:00
std : : string key = trim ( line . substr ( 0 , pos ) ) ;
std : : string value = trim ( line . substr ( pos + 1 , std : : string : : npos ) ) ;
2020-08-22 12:40:53 +00:00
if ( key = = " aws_access_key_id " ) {
accesskey = value ;
} else if ( key = = " aws_secret_access_key " ) {
secret = value ;
} else if ( key = = " aws_session_token " ) {
session_token = value ;
}
2019-04-25 12:29:35 +00:00
}
2020-08-22 12:40:53 +00:00
if ( profile ! = aws_profile ) {
2019-04-14 16:19:34 +00:00
return EXIT_FAILURE ;
}
2020-08-22 12:40:53 +00:00
if ( session_token . empty ( ) ) {
if ( is_use_session_token ) {
S3FS_PRN_EXIT ( " AWS session token was expected but wasn't provided in aws/credentials file for profile: %s. " , aws_profile . c_str ( ) ) ;
return EXIT_FAILURE ;
}
if ( ! S3fsCurl : : SetAccessKey ( accesskey . c_str ( ) , secret . c_str ( ) ) ) {
S3FS_PRN_EXIT ( " failed to set internal data for access key/secret key from aws credential file. " ) ;
return EXIT_FAILURE ;
}
} else {
if ( ! S3fsCurl : : SetAccessKeyWithSessionToken ( accesskey . c_str ( ) , secret . c_str ( ) , session_token . c_str ( ) ) ) {
S3FS_PRN_EXIT ( " session token is invalid. " ) ;
return EXIT_FAILURE ;
}
2019-04-14 16:19:34 +00:00
}
2020-08-22 12:40:53 +00:00
return EXIT_SUCCESS ;
2018-11-04 19:41:49 +00:00
}
2013-03-30 13:37:14 +00:00
//
2010-11-13 23:59:23 +00:00
// read_passwd_file
//
// Support for per bucket credentials
//
// Format for the credentials file:
// [bucket:]AccessKeyId:SecretAccessKey
//
// Lines beginning with # are considered comments
// and ignored, as are empty lines
//
// Uncommented lines without the ":" character are flagged as
// an error, so are lines with spaces or tabs
//
// only one default key pair is allowed, but not required
2013-03-30 13:37:14 +00:00
//
2019-01-23 23:44:50 +00:00
static int read_passwd_file ( )
2013-07-05 02:28:31 +00:00
{
2020-08-22 12:40:53 +00:00
bucketkvmap_t bucketmap ;
kvmap_t keyval ;
int result ;
// if you got here, the password file
// exists and is readable by the
// current user, check for permissions
if ( EXIT_SUCCESS ! = check_passwd_file_perms ( ) ) {
return EXIT_FAILURE ;
}
//
// parse passwd file
//
result = parse_passwd_file ( bucketmap ) ;
2017-11-19 11:38:12 +00:00
if ( - 1 = = result ) {
2020-08-22 12:40:53 +00:00
return EXIT_FAILURE ;
}
//
// check key=value type format.
//
bucketkvmap_t : : iterator it = bucketmap . find ( keyval_fields_type ) ;
if ( bucketmap . end ( ) ! = it ) {
// aws format
result = check_for_aws_format ( it - > second ) ;
if ( - 1 = = result ) {
return EXIT_FAILURE ;
} else if ( 1 = = result ) {
// success to set
return EXIT_SUCCESS ;
}
}
2020-09-11 09:37:24 +00:00
std : : string bucket_key = allbucket_fields_type ;
2020-08-22 12:40:53 +00:00
if ( ! bucket . empty ( ) & & bucketmap . end ( ) ! = bucketmap . find ( bucket ) ) {
bucket_key = bucket ;
}
it = bucketmap . find ( bucket_key ) ;
if ( bucketmap . end ( ) = = it ) {
S3FS_PRN_EXIT ( " Not found access key/secret key in passwd file. " ) ;
return EXIT_FAILURE ;
}
keyval = it - > second ;
kvmap_t : : iterator aws_accesskeyid_it = keyval . find ( aws_accesskeyid ) ;
kvmap_t : : iterator aws_secretkey_it = keyval . find ( aws_secretkey ) ;
if ( keyval . end ( ) = = aws_accesskeyid_it | | keyval . end ( ) = = aws_secretkey_it ) {
S3FS_PRN_EXIT ( " Not found access key/secret key in passwd file. " ) ;
return EXIT_FAILURE ;
}
if ( ! S3fsCurl : : SetAccessKey ( aws_accesskeyid_it - > second . c_str ( ) , aws_secretkey_it - > second . c_str ( ) ) ) {
S3FS_PRN_EXIT ( " failed to set internal data for access key/secret key from passwd file. " ) ;
return EXIT_FAILURE ;
}
return EXIT_SUCCESS ;
2010-11-13 23:59:23 +00:00
}
2013-03-30 13:37:14 +00:00
//
2010-11-13 23:59:23 +00:00
// get_access_keys
//
// called only when were are not mounting a
// public bucket
//
// Here is the order precedence for getting the
// keys:
//
// 1 - from the command line (security risk)
// 2 - from a password file specified on the command line
// 3 - from environment variables
2018-12-20 23:44:28 +00:00
// 3a - from the AWS_CREDENTIAL_FILE environment variable
// 3b - from ${HOME}/.aws/credentials
2010-11-13 23:59:23 +00:00
// 4 - from the users ~/.passwd-s3fs
// 5 - from /etc/passwd-s3fs
2013-03-30 13:37:14 +00:00
//
2019-01-23 23:44:50 +00:00
static int get_access_keys ( )
2013-07-05 02:28:31 +00:00
{
2020-08-22 12:40:53 +00:00
// should be redundant
if ( S3fsCurl : : IsPublicBucket ( ) ) {
return EXIT_SUCCESS ;
2010-11-13 23:59:23 +00:00
}
2020-08-22 12:40:53 +00:00
// access key loading is deferred
if ( load_iamrole | | is_ecs ) {
return EXIT_SUCCESS ;
2010-11-13 23:59:23 +00:00
}
2020-08-22 12:40:53 +00:00
// 1 - keys specified on the command line
if ( S3fsCurl : : IsSetAccessKeys ( ) ) {
return EXIT_SUCCESS ;
2013-07-05 02:28:31 +00:00
}
2010-11-13 23:59:23 +00:00
2020-08-22 12:40:53 +00:00
// 2 - was specified on the command line
2019-01-23 19:30:28 +00:00
if ( ! passwd_file . empty ( ) ) {
2020-09-11 09:37:24 +00:00
std : : ifstream PF ( passwd_file . c_str ( ) ) ;
2020-08-22 12:40:53 +00:00
if ( PF . good ( ) ) {
PF . close ( ) ;
return read_passwd_file ( ) ;
} else {
S3FS_PRN_EXIT ( " specified passwd_file is not readable. " ) ;
return EXIT_FAILURE ;
}
}
// 3 - environment variables
char * AWSACCESSKEYID = getenv ( " AWSACCESSKEYID " ) ;
char * AWSSECRETACCESSKEY = getenv ( " AWSSECRETACCESSKEY " ) ;
char * AWSSESSIONTOKEN = getenv ( " AWSSESSIONTOKEN " ) ;
if ( AWSACCESSKEYID ! = NULL | | AWSSECRETACCESSKEY ! = NULL ) {
if ( ( AWSACCESSKEYID = = NULL & & AWSSECRETACCESSKEY ! = NULL ) | |
( AWSACCESSKEYID ! = NULL & & AWSSECRETACCESSKEY = = NULL ) ) {
S3FS_PRN_EXIT ( " if environment variable AWSACCESSKEYID is set then AWSSECRETACCESSKEY must be set too. " ) ;
return EXIT_FAILURE ;
}
S3FS_PRN_INFO2 ( " access key from env variables " ) ;
if ( AWSSESSIONTOKEN ! = NULL ) {
S3FS_PRN_INFO2 ( " session token is available " ) ;
if ( ! S3fsCurl : : SetAccessKeyWithSessionToken ( AWSACCESSKEYID , AWSSECRETACCESSKEY , AWSSESSIONTOKEN ) ) {
S3FS_PRN_EXIT ( " session token is invalid. " ) ;
return EXIT_FAILURE ;
}
} else {
S3FS_PRN_INFO2 ( " session token is not available " ) ;
if ( is_use_session_token ) {
S3FS_PRN_EXIT ( " environment variable AWSSESSIONTOKEN is expected to be set. " ) ;
return EXIT_FAILURE ;
}
}
if ( ! S3fsCurl : : SetAccessKey ( AWSACCESSKEYID , AWSSECRETACCESSKEY ) ) {
S3FS_PRN_EXIT ( " if one access key is specified, both keys need to be specified. " ) ;
return EXIT_FAILURE ;
}
return EXIT_SUCCESS ;
}
// 3a - from the AWS_CREDENTIAL_FILE environment variable
char * AWS_CREDENTIAL_FILE ;
AWS_CREDENTIAL_FILE = getenv ( " AWS_CREDENTIAL_FILE " ) ;
if ( AWS_CREDENTIAL_FILE ! = NULL ) {
passwd_file . assign ( AWS_CREDENTIAL_FILE ) ;
if ( ! passwd_file . empty ( ) ) {
2020-09-11 09:37:24 +00:00
std : : ifstream PF ( passwd_file . c_str ( ) ) ;
2020-08-22 12:40:53 +00:00
if ( PF . good ( ) ) {
PF . close ( ) ;
return read_passwd_file ( ) ;
} else {
S3FS_PRN_EXIT ( " AWS_CREDENTIAL_FILE: \" %s \" is not readable. " , passwd_file . c_str ( ) ) ;
return EXIT_FAILURE ;
}
}
}
// 3b - check ${HOME}/.aws/credentials
std : : string aws_credentials = std : : string ( getpwuid ( getuid ( ) ) - > pw_dir ) + " /.aws/credentials " ;
if ( read_aws_credentials_file ( aws_credentials ) = = EXIT_SUCCESS ) {
return EXIT_SUCCESS ;
} else if ( aws_profile ! = " default " ) {
S3FS_PRN_EXIT ( " Could not find profile: %s in file: %s " , aws_profile . c_str ( ) , aws_credentials . c_str ( ) ) ;
Summary of Changes(1.63 -> 1.64)
* This new version was made for fixing big issue about directory object.
Please be careful and review new s3fs.
==========================
List of Changes
==========================
1) Fixed bugs
Fixed some memory leak and un-freed curl handle.
Fixed codes with a bug which is not found yet.
Fixed a bug that the s3fs could not update object's mtime when the s3fs had a opened file descriptor.
Please let us know a bug, when you find new bug of a memory leak.
2) Changed codes
Changed codes of s3fs_readdir() and list_bucket() etc.
Changed codes so that the get_realpath() function returned std::string.
Changed codes about exit() function. Because the exit() function is called from many fuse callback function directly, these function called fuse_exit() function and retuned with error.
Changed codes so that the case of the characters for the "x-amz-meta" response header is ignored.
3) Added a option
Added the norenameapi option for the storage compatible with S3 without copy API.
This option is subset of nocopyapi option.
Please read man page or call with --help option.
4) Object for directory
This is very big and important change.
The object of directory is changed "dir/" instead of "dir" for being compatible with other S3 client applications.
And this version understands the object of directory which is made by old version.
If the new s3fs changes the attributes or owner/group or mtime of the directory object, the s3fs automatically changes the object from old object name("dir") to new("dir/").
If you need to change old object name("dir") to new("dir/") manually, you can use shell script(mergedir.sh) in test directory.
* About the directory object name
AWS S3 allows the object name as both "dir" and "dir/".
The s3fs before this version understood only "dir" as directory object name, but old version did not understand the "dir/" object name.
The new version understands both of "dir" and "dir/" object name.
The s3fs user needs to be care for the special situation that I mentioned later.
The new version deletes old "dir" object and makes new "dir/" object, when the user operates the directory object for changing the permission or owner/group or mtime.
This operation does on background and automatically.
If you need to merge manually, you can use shell script which is mergedir.sh in test directory.
This script runs chmod/chown/touch commands after finding a directory.
Other S3 client application makes a directory object("dir/") without meta information which is needed to understand by the s3fs, this script can add meta information for a directory object.
If this script function is insufficient for you, you can read and modify the codes by yourself.
Please use the shell script carefully because of changing the object.
If you find a bug in this script, please let me know.
* Details
** The directory object made by old version
The directory object made by old version is not understood by other S3 client application.
New s3fs version was updated for keeping compatibility with other clients.
You can use the mergedir.sh in test directory for merging from old directory object("dir") to new("dir/").
The directory object name is changed from "dir" to "dir/" after the mergedir.sh is run, this changed "dir/" object is understood by other S3 clients.
This script runs chmod/chown/chgrp/touch/etc commands against the old directory object("dir"), then new s3fs merges that directory automatically.
If you need to change directory object from old to new manually, you can do it by running these commands which change the directory attributes(mode/owner/group/mtime).
** The directory object made by new version
The directory object name made by new version is "dir/".
Because the name includes "/", other S3 client applications understand it as the directory.
I tested new directory by s3cmd/tntDrive/DragonDisk/Gladinet as other S3 clients, the result was good compatibility.
You need to know that the compatibility has small problem by the difference in specifications between clients.
And you need to be careful about that the old s3fs can not understand the directory object which made by new s3fs.
You should change all s3fs which accesses same bucket.
** The directory object made by other S3 client application
Because the object is determined as a directory by the s3fs, the s3fs makes and uses special meta information which is "x-amz-meta-***" and "Content-Type" as HTTP header.
The s3fs sets and uses HTTP headers for the directory object, those headers are listed below.
Content-Type: application/x-directory
x-amz-meta-mode: <mode>
x-amz-meta-uid: <UID>
x-amz-meta-gid <GID>
x-amz-meta-mtime: <unix time of modified file>
Other S3 client application builds the directory object without attributes which is needed by the s3fs.
When the "ls" command is run on the s3fs-fuse file system which has directories/files made by other S3 clients, this result is shown below.
d--------- 1 root root 0 Feb 27 11:21 dir
---------- 1 root root 1024 Mar 14 02:15 file
Because the objects don't have meta information("x-amz-meta-mode"), it means mode=0000.
In this case, the directory object is shown only "d", because the s3fs determines the object as a directory when the object is the name with "/" or has "Content-type: application/x-directory" header.
(The s3fs sets "Content-Type: application/x-directory" to the directory object, but other S3 clients set "binary/octet-stream".)
In that result, nobody without root is allowed to operate the object.
The owner and group are "root"(UID=0) because the object doesn't have "x-amz-meta-uid/gid".
If the object doesn't have "x-amz-meta-mtime", the s3fs uses "Last-Modified" HTTP header.
Therefore the object's mtime is "Last-Modified" value.(This logic is same as old version)
It has been already explained, if you need to change the object attributes, you can do it by manually operation or mergedir.sh.
* Example of the compatibility with s3cmd etc
** Case A) Only "dir/file" object
One of case, there is only "dir/file" object without "dir/" object, that object is made by s3cmd or etc.
In this case, the response of REST API(list bucket) with "delimiter=/" parameter has "CommonPrefixes", and the "dir/" is listed in "CommonPrefixes/Prefix", but the "dir/" object is not real object.
The s3fs needs to determine this object as directory, however there is no real directory object("dir" or "dir/").
But both new s3fs and old one does NOT understand this "dir/" in "CommonPrefixes", because the s3fs fails to get meta information from "dir" or "dir/".
On this case, the result of "ls" command is shown below.
??????????? ? ? ? ? ? dir
This "dir" is not operated by anyone and any process, because the s3fs does not understand this object permission.
And "dir/file" object can not be shown and operated too.
Some other S3 clients(tntDrive/Gladinet/etc) can not understand this object as same as the s3fs.
If you need to operate "dir/file" object, you need to make the "dir/" object as a directory.
To make the "dir/" directory object, you need to do below.
Because there is already the "dir" object which is not real object, you can not make "dir/" directory.
(s3cmd does not make "dir/" object because the object name has "/".).
You should make another name directory(ex: "dir2/"), and move the "dir/file" objects to in new directory.
Last, you can rename the directory name from "dir2/" to "dir/".
** Case B) Both "dir" and "dir/file" object
This case is that there are "dir" and "dir/file" objects which were made by s3cmd/etc.
s3cmd and s3fs understand the "dir" object as normal(file) object because this object does not have meta information and a name with "/".
But the result of REST API(list bucket) has "dir/" name in "CommonPrefixes/Prefix".
The s3fs checks "dir/" and "dir" as a directory, but the "dir" object is not directory object.
(Because the new s3fs need to compatible old version, the s3fs checks a directory object in order of "dir/", "dir")
In this case, the result of "ls" command is shown below.
---------- 1 root root 0 Feb 27 02:48 dir
As a result, the "dir/file" can not be shown and operated because the "dir" object is a file.
If you determine the "dir" as a directory, you need to add mete information to the "dir" object by s3cmd.
** Case C) Both "dir" and "dir/" object
Last case is that there are "dir" and "dir/" objects which were made by other S3 clients.
(example: At first you upload a object "dir/" as a directory by new 3sfs, and you upload a object "dir" by s3cmd.)
New s3fs determines "dir/" as a directory, because the s3fs searches in oder of "dir/", "dir".
As a result, the "dir" object can not be shown and operated.
** Compatibility between S3 clients
Both new and old s3fs do not understand both "dir" and "dir/" at the same time, tntDrive and Galdinet are same as the s3fs.
If there are "dir/" and "dir" objects, the s3fs gives priority to "dir/".
But s3cmd and DragonDisk understand both objects.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@392 df820570-a93a-0410-bd06-b72b767a4274
2013-03-23 14:04:07 +00:00
return EXIT_FAILURE ;
2011-04-06 15:06:28 +00:00
}
2020-08-22 12:40:53 +00:00
// 4 - from the default location in the users home directory
char * HOME ;
HOME = getenv ( " HOME " ) ;
if ( HOME ! = NULL ) {
passwd_file . assign ( HOME ) ;
passwd_file . append ( " /.passwd-s3fs " ) ;
2020-09-11 09:37:24 +00:00
std : : ifstream PF ( passwd_file . c_str ( ) ) ;
2020-08-22 12:40:53 +00:00
if ( PF . good ( ) ) {
PF . close ( ) ;
if ( EXIT_SUCCESS ! = read_passwd_file ( ) ) {
return EXIT_FAILURE ;
}
// It is possible that the user's file was there but
// contained no key pairs i.e. commented out
// in that case, go look in the final location
if ( S3fsCurl : : IsSetAccessKeys ( ) ) {
return EXIT_SUCCESS ;
}
}
2010-11-13 23:59:23 +00:00
}
2020-08-22 12:40:53 +00:00
// 5 - from the system default location
passwd_file . assign ( " /etc/passwd-s3fs " ) ;
2020-09-11 09:37:24 +00:00
std : : ifstream PF ( passwd_file . c_str ( ) ) ;
2020-08-22 12:40:53 +00:00
if ( PF . good ( ) ) {
PF . close ( ) ;
return read_passwd_file ( ) ;
}
S3FS_PRN_EXIT ( " could not determine how to establish security credentials. " ) ;
return EXIT_FAILURE ;
2010-11-13 23:59:23 +00:00
}
Fixed Issue 229 and Changes codes
1) Set metadata "Content-Encoding" automatically(Issue 292)
For this issue, s3fs is added new option "ahbe_conf".
New option means the configuration file path, and this file specifies
additional HTTP header by file(object) extension.
Thus you can specify any HTTP header for each object by extension.
* ahbe_conf file format:
-----------
line = [file suffix] HTTP-header [HTTP-header-values]
file suffix = file(object) suffix, if this field is empty,
it means "*"(all object).
HTTP-header = additional HTTP header name
HTTP-header-values = additional HTTP header value
-----------
* Example:
-----------
.gz Content-Encoding gzip
.Z Content-Encoding compress
X-S3FS-MYHTTPHEAD myvalue
-----------
A sample configuration file is uploaded in "test" directory.
If ahbe_conf parameter is specified, s3fs loads it's configuration
and compares extension(suffix) of object(file) when uploading
(PUT/POST) it. If the extension is same, s3fs adds/sends specified
HTTP header and value.
A case of sample configuration file, if a object(it's extension is
".gz") which already has Content-Encoding HTTP header is renamed
to ".txt" extension, s3fs does not set Content-Encoding. Because
".txt" is not match any line in configuration file.
So, s3fs matches the extension by each PUT/POST action.
* Please take care about "Content-Encoding".
This new option allows setting ANY HTTP header by object extension.
For example, you can specify "Content-Encoding" for ".gz"/etc
extension in configuration. But this means that S3 always returns
"Content-Encoding: gzip" when a client requests with other
"Accept-Encoding:" header. It SHOULD NOT be good.
Please see RFC 2616.
2) Changes about allow_other/uid/gid option for mount point
I reviewed about mount point permission and allow_other/uid/gid
options, and found bugs about these.
s3fs is fixed bugs and changed to the following specifications.
* s3fs only allows uid(gid) options as 0(root), when the effective
user is zero(root).
* A mount point(directory) must have a permission to allow
accessing by effective user/group.
* If allow_other option is specified, the mount point permission
is set 0777(all users allow all access).
In another case, the mount point is set 0700(only allows
effective user).
* When uid/gid option is specified, the mount point owner/group
is set uid/gid option value.
If uid/gid is not set, it is set effective user/group id.
This changes maybe fixes some issue(321, 338).
3) Changes a logic about (Issue 229)
The chmod command returns -EIO when changing the mount point.
It is correct, s3fs can not changed owner/group/mtime for the
mount point, but s3fs sends a request for changing the bucket.
This revision does not send the request, and returns EIO as
soon as possible.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@465 df820570-a93a-0410-bd06-b72b767a4274
2013-08-16 19:24:01 +00:00
//
// Check & Set attributes for mount point.
//
2019-09-05 17:42:14 +00:00
static bool set_mountpoint_attribute ( struct stat & mpst )
Fixed Issue 229 and Changes codes
1) Set metadata "Content-Encoding" automatically(Issue 292)
For this issue, s3fs is added new option "ahbe_conf".
New option means the configuration file path, and this file specifies
additional HTTP header by file(object) extension.
Thus you can specify any HTTP header for each object by extension.
* ahbe_conf file format:
-----------
line = [file suffix] HTTP-header [HTTP-header-values]
file suffix = file(object) suffix, if this field is empty,
it means "*"(all object).
HTTP-header = additional HTTP header name
HTTP-header-values = additional HTTP header value
-----------
* Example:
-----------
.gz Content-Encoding gzip
.Z Content-Encoding compress
X-S3FS-MYHTTPHEAD myvalue
-----------
A sample configuration file is uploaded in "test" directory.
If ahbe_conf parameter is specified, s3fs loads it's configuration
and compares extension(suffix) of object(file) when uploading
(PUT/POST) it. If the extension is same, s3fs adds/sends specified
HTTP header and value.
A case of sample configuration file, if a object(it's extension is
".gz") which already has Content-Encoding HTTP header is renamed
to ".txt" extension, s3fs does not set Content-Encoding. Because
".txt" is not match any line in configuration file.
So, s3fs matches the extension by each PUT/POST action.
* Please take care about "Content-Encoding".
This new option allows setting ANY HTTP header by object extension.
For example, you can specify "Content-Encoding" for ".gz"/etc
extension in configuration. But this means that S3 always returns
"Content-Encoding: gzip" when a client requests with other
"Accept-Encoding:" header. It SHOULD NOT be good.
Please see RFC 2616.
2) Changes about allow_other/uid/gid option for mount point
I reviewed about mount point permission and allow_other/uid/gid
options, and found bugs about these.
s3fs is fixed bugs and changed to the following specifications.
* s3fs only allows uid(gid) options as 0(root), when the effective
user is zero(root).
* A mount point(directory) must have a permission to allow
accessing by effective user/group.
* If allow_other option is specified, the mount point permission
is set 0777(all users allow all access).
In another case, the mount point is set 0700(only allows
effective user).
* When uid/gid option is specified, the mount point owner/group
is set uid/gid option value.
If uid/gid is not set, it is set effective user/group id.
This changes maybe fixes some issue(321, 338).
3) Changes a logic about (Issue 229)
The chmod command returns -EIO when changing the mount point.
It is correct, s3fs can not changed owner/group/mtime for the
mount point, but s3fs sends a request for changing the bucket.
This revision does not send the request, and returns EIO as
soon as possible.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@465 df820570-a93a-0410-bd06-b72b767a4274
2013-08-16 19:24:01 +00:00
{
2020-08-22 12:40:53 +00:00
mp_uid = geteuid ( ) ;
mp_gid = getegid ( ) ;
mp_mode = S_IFDIR | ( allow_other ? ( is_mp_umask ? ( ~ mp_umask & ( S_IRWXU | S_IRWXG | S_IRWXO ) ) : ( S_IRWXU | S_IRWXG | S_IRWXO ) ) : S_IRWXU ) ;
Fixed Issue 229 and Changes codes
1) Set metadata "Content-Encoding" automatically(Issue 292)
For this issue, s3fs is added new option "ahbe_conf".
New option means the configuration file path, and this file specifies
additional HTTP header by file(object) extension.
Thus you can specify any HTTP header for each object by extension.
* ahbe_conf file format:
-----------
line = [file suffix] HTTP-header [HTTP-header-values]
file suffix = file(object) suffix, if this field is empty,
it means "*"(all object).
HTTP-header = additional HTTP header name
HTTP-header-values = additional HTTP header value
-----------
* Example:
-----------
.gz Content-Encoding gzip
.Z Content-Encoding compress
X-S3FS-MYHTTPHEAD myvalue
-----------
A sample configuration file is uploaded in "test" directory.
If ahbe_conf parameter is specified, s3fs loads it's configuration
and compares extension(suffix) of object(file) when uploading
(PUT/POST) it. If the extension is same, s3fs adds/sends specified
HTTP header and value.
A case of sample configuration file, if a object(it's extension is
".gz") which already has Content-Encoding HTTP header is renamed
to ".txt" extension, s3fs does not set Content-Encoding. Because
".txt" is not match any line in configuration file.
So, s3fs matches the extension by each PUT/POST action.
* Please take care about "Content-Encoding".
This new option allows setting ANY HTTP header by object extension.
For example, you can specify "Content-Encoding" for ".gz"/etc
extension in configuration. But this means that S3 always returns
"Content-Encoding: gzip" when a client requests with other
"Accept-Encoding:" header. It SHOULD NOT be good.
Please see RFC 2616.
2) Changes about allow_other/uid/gid option for mount point
I reviewed about mount point permission and allow_other/uid/gid
options, and found bugs about these.
s3fs is fixed bugs and changed to the following specifications.
* s3fs only allows uid(gid) options as 0(root), when the effective
user is zero(root).
* A mount point(directory) must have a permission to allow
accessing by effective user/group.
* If allow_other option is specified, the mount point permission
is set 0777(all users allow all access).
In another case, the mount point is set 0700(only allows
effective user).
* When uid/gid option is specified, the mount point owner/group
is set uid/gid option value.
If uid/gid is not set, it is set effective user/group id.
This changes maybe fixes some issue(321, 338).
3) Changes a logic about (Issue 229)
The chmod command returns -EIO when changing the mount point.
It is correct, s3fs can not changed owner/group/mtime for the
mount point, but s3fs sends a request for changing the bucket.
This revision does not send the request, and returns EIO as
soon as possible.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@465 df820570-a93a-0410-bd06-b72b767a4274
2013-08-16 19:24:01 +00:00
2020-08-22 12:40:53 +00:00
S3FS_PRN_INFO2 ( " PROC(uid=%u, gid=%u) - MountPoint(uid=%u, gid=%u, mode=%04o) " ,
( unsigned int ) mp_uid , ( unsigned int ) mp_gid , ( unsigned int ) ( mpst . st_uid ) , ( unsigned int ) ( mpst . st_gid ) , mpst . st_mode ) ;
Fixed Issue 229 and Changes codes
1) Set metadata "Content-Encoding" automatically(Issue 292)
For this issue, s3fs is added new option "ahbe_conf".
New option means the configuration file path, and this file specifies
additional HTTP header by file(object) extension.
Thus you can specify any HTTP header for each object by extension.
* ahbe_conf file format:
-----------
line = [file suffix] HTTP-header [HTTP-header-values]
file suffix = file(object) suffix, if this field is empty,
it means "*"(all object).
HTTP-header = additional HTTP header name
HTTP-header-values = additional HTTP header value
-----------
* Example:
-----------
.gz Content-Encoding gzip
.Z Content-Encoding compress
X-S3FS-MYHTTPHEAD myvalue
-----------
A sample configuration file is uploaded in "test" directory.
If ahbe_conf parameter is specified, s3fs loads it's configuration
and compares extension(suffix) of object(file) when uploading
(PUT/POST) it. If the extension is same, s3fs adds/sends specified
HTTP header and value.
A case of sample configuration file, if a object(it's extension is
".gz") which already has Content-Encoding HTTP header is renamed
to ".txt" extension, s3fs does not set Content-Encoding. Because
".txt" is not match any line in configuration file.
So, s3fs matches the extension by each PUT/POST action.
* Please take care about "Content-Encoding".
This new option allows setting ANY HTTP header by object extension.
For example, you can specify "Content-Encoding" for ".gz"/etc
extension in configuration. But this means that S3 always returns
"Content-Encoding: gzip" when a client requests with other
"Accept-Encoding:" header. It SHOULD NOT be good.
Please see RFC 2616.
2) Changes about allow_other/uid/gid option for mount point
I reviewed about mount point permission and allow_other/uid/gid
options, and found bugs about these.
s3fs is fixed bugs and changed to the following specifications.
* s3fs only allows uid(gid) options as 0(root), when the effective
user is zero(root).
* A mount point(directory) must have a permission to allow
accessing by effective user/group.
* If allow_other option is specified, the mount point permission
is set 0777(all users allow all access).
In another case, the mount point is set 0700(only allows
effective user).
* When uid/gid option is specified, the mount point owner/group
is set uid/gid option value.
If uid/gid is not set, it is set effective user/group id.
This changes maybe fixes some issue(321, 338).
3) Changes a logic about (Issue 229)
The chmod command returns -EIO when changing the mount point.
It is correct, s3fs can not changed owner/group/mtime for the
mount point, but s3fs sends a request for changing the bucket.
This revision does not send the request, and returns EIO as
soon as possible.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@465 df820570-a93a-0410-bd06-b72b767a4274
2013-08-16 19:24:01 +00:00
2020-08-22 12:40:53 +00:00
// check owner
if ( 0 = = mp_uid | | mpst . st_uid = = mp_uid ) {
return true ;
}
// check group permission
if ( mpst . st_gid = = mp_gid | | 1 = = is_uid_include_group ( mp_uid , mpst . st_gid ) ) {
if ( S_IRWXG = = ( mpst . st_mode & S_IRWXG ) ) {
return true ;
}
}
// check other permission
if ( S_IRWXO = = ( mpst . st_mode & S_IRWXO ) ) {
return true ;
}
return false ;
Fixed Issue 229 and Changes codes
1) Set metadata "Content-Encoding" automatically(Issue 292)
For this issue, s3fs is added new option "ahbe_conf".
New option means the configuration file path, and this file specifies
additional HTTP header by file(object) extension.
Thus you can specify any HTTP header for each object by extension.
* ahbe_conf file format:
-----------
line = [file suffix] HTTP-header [HTTP-header-values]
file suffix = file(object) suffix, if this field is empty,
it means "*"(all object).
HTTP-header = additional HTTP header name
HTTP-header-values = additional HTTP header value
-----------
* Example:
-----------
.gz Content-Encoding gzip
.Z Content-Encoding compress
X-S3FS-MYHTTPHEAD myvalue
-----------
A sample configuration file is uploaded in "test" directory.
If ahbe_conf parameter is specified, s3fs loads it's configuration
and compares extension(suffix) of object(file) when uploading
(PUT/POST) it. If the extension is same, s3fs adds/sends specified
HTTP header and value.
A case of sample configuration file, if a object(it's extension is
".gz") which already has Content-Encoding HTTP header is renamed
to ".txt" extension, s3fs does not set Content-Encoding. Because
".txt" is not match any line in configuration file.
So, s3fs matches the extension by each PUT/POST action.
* Please take care about "Content-Encoding".
This new option allows setting ANY HTTP header by object extension.
For example, you can specify "Content-Encoding" for ".gz"/etc
extension in configuration. But this means that S3 always returns
"Content-Encoding: gzip" when a client requests with other
"Accept-Encoding:" header. It SHOULD NOT be good.
Please see RFC 2616.
2) Changes about allow_other/uid/gid option for mount point
I reviewed about mount point permission and allow_other/uid/gid
options, and found bugs about these.
s3fs is fixed bugs and changed to the following specifications.
* s3fs only allows uid(gid) options as 0(root), when the effective
user is zero(root).
* A mount point(directory) must have a permission to allow
accessing by effective user/group.
* If allow_other option is specified, the mount point permission
is set 0777(all users allow all access).
In another case, the mount point is set 0700(only allows
effective user).
* When uid/gid option is specified, the mount point owner/group
is set uid/gid option value.
If uid/gid is not set, it is set effective user/group id.
This changes maybe fixes some issue(321, 338).
3) Changes a logic about (Issue 229)
The chmod command returns -EIO when changing the mount point.
It is correct, s3fs can not changed owner/group/mtime for the
mount point, but s3fs sends a request for changing the bucket.
This revision does not send the request, and returns EIO as
soon as possible.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@465 df820570-a93a-0410-bd06-b72b767a4274
2013-08-16 19:24:01 +00:00
}
2016-07-13 21:22:13 +00:00
//
// Set bucket and mount_prefix based on passed bucket name.
//
static int set_bucket ( const char * arg )
{
2020-08-22 12:40:53 +00:00
char * bucket_name = ( char * ) arg ;
if ( strstr ( arg , " : " ) ) {
if ( strstr ( arg , " :// " ) ) {
S3FS_PRN_EXIT ( " bucket name and path( \" %s \" ) is wrong, it must be \" bucket[:/path] \" . " , arg ) ;
return - 1 ;
}
bucket = strtok ( bucket_name , " : " ) ;
char * pmount_prefix = strtok ( NULL , " " ) ;
if ( pmount_prefix ) {
if ( 0 = = strlen ( pmount_prefix ) | | ' / ' ! = pmount_prefix [ 0 ] ) {
S3FS_PRN_EXIT ( " path(%s) must be prefix \" / \" . " , pmount_prefix ) ;
return - 1 ;
}
mount_prefix = pmount_prefix ;
// remove trailing slash
if ( mount_prefix . at ( mount_prefix . size ( ) - 1 ) = = ' / ' ) {
mount_prefix = mount_prefix . substr ( 0 , mount_prefix . size ( ) - 1 ) ;
}
}
} else {
bucket = arg ;
2016-07-13 21:22:13 +00:00
}
2020-08-22 12:40:53 +00:00
return 0 ;
2016-07-13 21:22:13 +00:00
}
2011-06-26 00:37:52 +00:00
// This is repeatedly called by the fuse option parser
// if the key is equal to FUSE_OPT_KEY_OPT, it's an option passed in prefixed by
// '-' or '--' e.g.: -f -d -ousecache=/tmp
//
// if the key is equal to FUSE_OPT_KEY_NONOPT, it's either the bucket name
// or the mountpoint. The bucket name will always come before the mountpoint
2013-07-05 02:28:31 +00:00
static int my_fuse_opt_proc ( void * data , const char * arg , int key , struct fuse_args * outargs )
2013-04-16 08:05:24 +00:00
{
2020-08-22 12:40:53 +00:00
int ret ;
if ( key = = FUSE_OPT_KEY_NONOPT ) {
// the first NONOPT option is the bucket name
if ( bucket . empty ( ) ) {
if ( ( ret = set_bucket ( arg ) ) ) {
return ret ;
}
return 0 ;
} else if ( ! strcmp ( arg , " s3fs " ) ) {
return 0 ;
}
2010-11-13 23:59:23 +00:00
2020-08-22 12:40:53 +00:00
// the second NONOPT option is the mountpoint(not utility mode)
if ( mountpoint . empty ( ) & & NO_UTILITY_MODE = = utility_mode ) {
// save the mountpoint and do some basic error checking
mountpoint = arg ;
struct stat stbuf ;
2011-06-26 00:37:52 +00:00
2020-08-22 12:40:53 +00:00
if ( stat ( arg , & stbuf ) = = - 1 ) {
S3FS_PRN_EXIT ( " unable to access MOUNTPOINT %s: %s " , mountpoint . c_str ( ) , strerror ( errno ) ) ;
return - 1 ;
}
if ( ! ( S_ISDIR ( stbuf . st_mode ) ) ) {
S3FS_PRN_EXIT ( " MOUNTPOINT: %s is not a directory. " , mountpoint . c_str ( ) ) ;
return - 1 ;
}
if ( ! set_mountpoint_attribute ( stbuf ) ) {
S3FS_PRN_EXIT ( " MOUNTPOINT: %s permission denied. " , mountpoint . c_str ( ) ) ;
return - 1 ;
}
2013-11-11 13:45:35 +00:00
2020-08-22 12:40:53 +00:00
if ( ! nonempty ) {
struct dirent * ent ;
DIR * dp = opendir ( mountpoint . c_str ( ) ) ;
if ( dp = = NULL ) {
S3FS_PRN_EXIT ( " failed to open MOUNTPOINT: %s: %s " , mountpoint . c_str ( ) , strerror ( errno ) ) ;
return - 1 ;
}
while ( ( ent = readdir ( dp ) ) ! = NULL ) {
if ( strcmp ( ent - > d_name , " . " ) ! = 0 & & strcmp ( ent - > d_name , " .. " ) ! = 0 ) {
closedir ( dp ) ;
S3FS_PRN_EXIT ( " MOUNTPOINT directory %s is not empty. if you are sure this is safe, can use the 'nonempty' mount option. " , mountpoint . c_str ( ) ) ;
return - 1 ;
}
}
closedir ( dp ) ;
}
return 1 ;
2013-11-11 13:45:35 +00:00
}
2020-08-22 12:40:53 +00:00
// Unknown option
if ( NO_UTILITY_MODE = = utility_mode ) {
S3FS_PRN_EXIT ( " specified unknown third option(%s). " , arg ) ;
2014-07-19 19:02:55 +00:00
} else {
2020-08-22 12:40:53 +00:00
S3FS_PRN_EXIT ( " specified unknown second option(%s). you don't need to specify second option(mountpoint) for utility mode(-u). " , arg ) ;
2015-10-06 14:46:14 +00:00
}
2020-08-22 12:40:53 +00:00
return - 1 ;
2015-10-06 14:46:14 +00:00
2020-08-22 12:40:53 +00:00
} else if ( key = = FUSE_OPT_KEY_OPT ) {
2020-09-17 10:45:36 +00:00
if ( is_prefix ( arg , " uid= " ) ) {
2020-08-22 12:40:53 +00:00
s3fs_uid = get_uid ( strchr ( arg , ' = ' ) + sizeof ( char ) ) ;
if ( 0 ! = geteuid ( ) & & 0 = = s3fs_uid ) {
S3FS_PRN_EXIT ( " root user can only specify uid=0. " ) ;
return - 1 ;
}
is_s3fs_uid = true ;
return 1 ; // continue for fuse option
}
2020-09-17 10:45:36 +00:00
if ( is_prefix ( arg , " gid= " ) ) {
2020-08-22 12:40:53 +00:00
s3fs_gid = get_gid ( strchr ( arg , ' = ' ) + sizeof ( char ) ) ;
if ( 0 ! = getegid ( ) & & 0 = = s3fs_gid ) {
S3FS_PRN_EXIT ( " root user can only specify gid=0. " ) ;
return - 1 ;
}
is_s3fs_gid = true ;
return 1 ; // continue for fuse option
}
2020-09-17 10:45:36 +00:00
if ( is_prefix ( arg , " umask= " ) ) {
2020-08-22 12:40:53 +00:00
s3fs_umask = cvt_strtoofft ( strchr ( arg , ' = ' ) + sizeof ( char ) , /*base=*/ 8 ) ;
s3fs_umask & = ( S_IRWXU | S_IRWXG | S_IRWXO ) ;
is_s3fs_umask = true ;
return 1 ; // continue for fuse option
}
if ( 0 = = strcmp ( arg , " allow_other " ) ) {
allow_other = true ;
return 1 ; // continue for fuse option
}
2020-09-17 10:45:36 +00:00
if ( is_prefix ( arg , " mp_umask= " ) ) {
2020-08-22 12:40:53 +00:00
mp_umask = cvt_strtoofft ( strchr ( arg , ' = ' ) + sizeof ( char ) , /*base=*/ 8 ) ;
mp_umask & = ( S_IRWXU | S_IRWXG | S_IRWXO ) ;
is_mp_umask = true ;
return 0 ;
2015-10-06 14:46:14 +00:00
}
2020-09-17 10:45:36 +00:00
if ( is_prefix ( arg , " default_acl= " ) ) {
2020-08-22 12:40:53 +00:00
const char * acl_string = strchr ( arg , ' = ' ) + sizeof ( char ) ;
acl_t acl = acl_t : : from_str ( acl_string ) ;
if ( acl = = acl_t : : UNKNOWN ) {
S3FS_PRN_EXIT ( " unknown value for default_acl: %s " , acl_string ) ;
return - 1 ;
}
S3fsCurl : : SetDefaultAcl ( acl ) ;
return 0 ;
}
2020-09-17 10:45:36 +00:00
if ( is_prefix ( arg , " retries= " ) ) {
2020-08-22 12:40:53 +00:00
off_t retries = static_cast < int > ( cvt_strtoofft ( strchr ( arg , ' = ' ) + sizeof ( char ) ) ) ;
if ( retries = = 0 ) {
S3FS_PRN_EXIT ( " retries must be greater than zero " ) ;
return - 1 ;
}
S3fsCurl : : SetRetries ( retries ) ;
return 0 ;
}
2020-09-17 10:45:36 +00:00
if ( is_prefix ( arg , " use_cache= " ) ) {
2020-08-22 12:40:53 +00:00
FdManager : : SetCacheDir ( strchr ( arg , ' = ' ) + sizeof ( char ) ) ;
return 0 ;
}
2020-09-11 08:38:20 +00:00
if ( 0 = = strcmp ( arg , " check_cache_dir_exist " ) ) {
2020-08-22 12:40:53 +00:00
FdManager : : SetCheckCacheDirExist ( true ) ;
return 0 ;
}
if ( 0 = = strcmp ( arg , " del_cache " ) ) {
is_remove_cache = true ;
return 0 ;
}
2020-09-17 10:45:36 +00:00
if ( is_prefix ( arg , " multireq_max= " ) ) {
2020-08-22 12:40:53 +00:00
int maxreq = static_cast < int > ( cvt_strtoofft ( strchr ( arg , ' = ' ) + sizeof ( char ) ) ) ;
S3fsCurl : : SetMaxMultiRequest ( maxreq ) ;
return 0 ;
}
if ( 0 = = strcmp ( arg , " nonempty " ) ) {
nonempty = true ;
return 1 ; // need to continue for fuse.
}
if ( 0 = = strcmp ( arg , " nomultipart " ) ) {
nomultipart = true ;
return 0 ;
}
// old format for storage_class
2020-09-17 10:45:36 +00:00
if ( 0 = = strcmp ( arg , " use_rrs " ) | | is_prefix ( arg , " use_rrs= " ) ) {
2020-08-22 12:40:53 +00:00
off_t rrs = 1 ;
// for an old format.
2020-09-17 10:45:36 +00:00
if ( is_prefix ( arg , " use_rrs= " ) ) {
2020-08-22 12:40:53 +00:00
rrs = cvt_strtoofft ( strchr ( arg , ' = ' ) + sizeof ( char ) ) ;
}
if ( 0 = = rrs ) {
S3fsCurl : : SetStorageClass ( storage_class_t : : STANDARD ) ;
} else if ( 1 = = rrs ) {
S3fsCurl : : SetStorageClass ( storage_class_t : : REDUCED_REDUNDANCY ) ;
} else {
S3FS_PRN_EXIT ( " poorly formed argument to option: use_rrs " ) ;
return - 1 ;
}
return 0 ;
}
2020-09-17 10:45:36 +00:00
if ( is_prefix ( arg , " storage_class= " ) ) {
2020-08-22 12:40:53 +00:00
const char * storage_class_str = strchr ( arg , ' = ' ) + sizeof ( char ) ;
storage_class_t storage_class = storage_class_t : : from_str ( storage_class_str ) ;
if ( storage_class = = storage_class_t : : UNKNOWN ) {
S3FS_PRN_EXIT ( " unknown value for storage_class: %s " , storage_class_str ) ;
return - 1 ;
}
S3fsCurl : : SetStorageClass ( storage_class ) ;
return 0 ;
}
//
2015-10-06 14:46:14 +00:00
// [NOTE]
2020-08-22 12:40:53 +00:00
// use_sse Set Server Side Encrypting type to SSE-S3
// use_sse=1
// use_sse=file Set Server Side Encrypting type to Custom key(SSE-C) and load custom keys
// use_sse=custom(c):file
// use_sse=custom(c) Set Server Side Encrypting type to Custom key(SSE-C)
// use_sse=kmsid(k):kms-key-id Set Server Side Encrypting type to AWS Key Management key id(SSE-KMS) and load KMS id
// use_sse=kmsid(k) Set Server Side Encrypting type to AWS Key Management key id(SSE-KMS)
2015-10-06 14:46:14 +00:00
//
2020-08-22 12:40:53 +00:00
// load_sse_c=file Load Server Side Encrypting custom keys
//
// AWSSSECKEYS Loading Environment for Server Side Encrypting custom keys
// AWSSSEKMSID Loading Environment for Server Side Encrypting Key id
//
2020-09-17 10:45:36 +00:00
if ( is_prefix ( arg , " use_sse " ) ) {
2020-08-22 12:40:53 +00:00
if ( 0 = = strcmp ( arg , " use_sse " ) | | 0 = = strcmp ( arg , " use_sse=1 " ) ) { // use_sse=1 is old type parameter
// sse type is SSE_S3
if ( ! S3fsCurl : : IsSseDisable ( ) & & ! S3fsCurl : : IsSseS3Type ( ) ) {
S3FS_PRN_EXIT ( " already set SSE another type, so conflict use_sse option or environment. " ) ;
return - 1 ;
}
S3fsCurl : : SetSseType ( sse_type_t : : SSE_S3 ) ;
} else if ( 0 = = strcmp ( arg , " use_sse=kmsid " ) | | 0 = = strcmp ( arg , " use_sse=k " ) ) {
// sse type is SSE_KMS with out kmsid(expecting id is loaded by environment)
if ( ! S3fsCurl : : IsSseDisable ( ) & & ! S3fsCurl : : IsSseKmsType ( ) ) {
S3FS_PRN_EXIT ( " already set SSE another type, so conflict use_sse option or environment. " ) ;
return - 1 ;
}
if ( ! S3fsCurl : : IsSetSseKmsId ( ) ) {
S3FS_PRN_EXIT ( " use_sse=kms but not loaded kms id by environment. " ) ;
return - 1 ;
}
S3fsCurl : : SetSseType ( sse_type_t : : SSE_KMS ) ;
2020-09-17 10:45:36 +00:00
} else if ( is_prefix ( arg , " use_sse=kmsid: " ) | | is_prefix ( arg , " use_sse=k: " ) ) {
2020-08-22 12:40:53 +00:00
// sse type is SSE_KMS with kmsid
if ( ! S3fsCurl : : IsSseDisable ( ) & & ! S3fsCurl : : IsSseKmsType ( ) ) {
S3FS_PRN_EXIT ( " already set SSE another type, so conflict use_sse option or environment. " ) ;
return - 1 ;
}
const char * kmsid ;
2020-09-17 10:45:36 +00:00
if ( is_prefix ( arg , " use_sse=kmsid: " ) ) {
2020-08-22 12:40:53 +00:00
kmsid = & arg [ strlen ( " use_sse=kmsid: " ) ] ;
} else {
kmsid = & arg [ strlen ( " use_sse=k: " ) ] ;
}
if ( ! S3fsCurl : : SetSseKmsid ( kmsid ) ) {
S3FS_PRN_EXIT ( " failed to load use_sse kms id. " ) ;
return - 1 ;
}
S3fsCurl : : SetSseType ( sse_type_t : : SSE_KMS ) ;
} else if ( 0 = = strcmp ( arg , " use_sse=custom " ) | | 0 = = strcmp ( arg , " use_sse=c " ) ) {
// sse type is SSE_C with out custom keys(expecting keys are loaded by environment or load_sse_c option)
if ( ! S3fsCurl : : IsSseDisable ( ) & & ! S3fsCurl : : IsSseCType ( ) ) {
S3FS_PRN_EXIT ( " already set SSE another type, so conflict use_sse option or environment. " ) ;
return - 1 ;
}
// [NOTE]
// do not check ckeys exists here.
//
S3fsCurl : : SetSseType ( sse_type_t : : SSE_C ) ;
2020-09-17 10:45:36 +00:00
} else if ( is_prefix ( arg , " use_sse=custom: " ) | | is_prefix ( arg , " use_sse=c: " ) ) {
2020-08-22 12:40:53 +00:00
// sse type is SSE_C with custom keys
if ( ! S3fsCurl : : IsSseDisable ( ) & & ! S3fsCurl : : IsSseCType ( ) ) {
S3FS_PRN_EXIT ( " already set SSE another type, so conflict use_sse option or environment. " ) ;
return - 1 ;
}
const char * ssecfile ;
2020-09-17 10:45:36 +00:00
if ( is_prefix ( arg , " use_sse=custom: " ) ) {
2020-08-22 12:40:53 +00:00
ssecfile = & arg [ strlen ( " use_sse=custom: " ) ] ;
} else {
ssecfile = & arg [ strlen ( " use_sse=c: " ) ] ;
}
if ( ! S3fsCurl : : SetSseCKeys ( ssecfile ) ) {
S3FS_PRN_EXIT ( " failed to load use_sse custom key file(%s). " , ssecfile ) ;
return - 1 ;
}
S3fsCurl : : SetSseType ( sse_type_t : : SSE_C ) ;
} else if ( 0 = = strcmp ( arg , " use_sse= " ) ) { // this type is old style(parameter is custom key file path)
// SSE_C with custom keys.
const char * ssecfile = & arg [ strlen ( " use_sse= " ) ] ;
if ( ! S3fsCurl : : SetSseCKeys ( ssecfile ) ) {
S3FS_PRN_EXIT ( " failed to load use_sse custom key file(%s). " , ssecfile ) ;
return - 1 ;
}
S3fsCurl : : SetSseType ( sse_type_t : : SSE_C ) ;
} else {
// never come here.
S3FS_PRN_EXIT ( " something wrong use_sse option. " ) ;
return - 1 ;
}
return 0 ;
2015-10-06 14:46:14 +00:00
}
2020-08-22 12:40:53 +00:00
// [NOTE]
// Do only load SSE custom keys, care for set without set sse type.
2020-09-17 10:45:36 +00:00
if ( is_prefix ( arg , " load_sse_c= " ) ) {
2020-08-22 12:40:53 +00:00
const char * ssecfile = & arg [ strlen ( " load_sse_c= " ) ] ;
if ( ! S3fsCurl : : SetSseCKeys ( ssecfile ) ) {
S3FS_PRN_EXIT ( " failed to load use_sse custom key file(%s). " , ssecfile ) ;
return - 1 ;
}
return 0 ;
2015-10-06 14:46:14 +00:00
}
2020-09-17 10:45:36 +00:00
if ( is_prefix ( arg , " ssl_verify_hostname= " ) ) {
2020-08-22 12:40:53 +00:00
long sslvh = static_cast < long > ( cvt_strtoofft ( strchr ( arg , ' = ' ) + sizeof ( char ) ) ) ;
if ( - 1 = = S3fsCurl : : SetSslVerifyHostname ( sslvh ) ) {
S3FS_PRN_EXIT ( " poorly formed argument to option: ssl_verify_hostname. " ) ;
return - 1 ;
}
return 0 ;
2014-07-19 19:02:55 +00:00
}
2020-09-17 10:45:36 +00:00
if ( is_prefix ( arg , " passwd_file= " ) ) {
2020-08-22 12:40:53 +00:00
passwd_file = strchr ( arg , ' = ' ) + sizeof ( char ) ;
return 0 ;
}
if ( 0 = = strcmp ( arg , " ibm_iam_auth " ) ) {
S3fsCurl : : SetIsIBMIAMAuth ( true ) ;
S3fsCurl : : SetIAMCredentialsURL ( " https://iam.bluemix.net/oidc/token " ) ;
S3fsCurl : : SetIAMTokenField ( " access_token " ) ;
S3fsCurl : : SetIAMExpiryField ( " expiration " ) ;
S3fsCurl : : SetIAMFieldCount ( 2 ) ;
is_ibm_iam_auth = true ;
return 0 ;
}
2020-09-11 08:38:20 +00:00
if ( 0 = = strcmp ( arg , " use_session_token " ) ) {
2020-08-22 12:40:53 +00:00
is_use_session_token = true ;
2020-09-10 23:09:36 +00:00
return 0 ;
2020-08-22 12:40:53 +00:00
}
2020-09-17 10:45:36 +00:00
if ( is_prefix ( arg , " ibm_iam_endpoint= " ) ) {
2020-08-22 12:40:53 +00:00
std : : string endpoint_url ;
std : : string iam_endpoint = strchr ( arg , ' = ' ) + sizeof ( char ) ;
2020-09-11 09:37:24 +00:00
// Check url for http / https protocol std::string
2020-08-22 12:40:53 +00:00
if ( ( iam_endpoint . compare ( 0 , 8 , " https:// " ) ! = 0 ) & & ( iam_endpoint . compare ( 0 , 7 , " http:// " ) ! = 0 ) ) {
S3FS_PRN_EXIT ( " option ibm_iam_endpoint has invalid format, missing http / https protocol " ) ;
return - 1 ;
}
endpoint_url = iam_endpoint + " /oidc/token " ;
S3fsCurl : : SetIAMCredentialsURL ( endpoint_url . c_str ( ) ) ;
return 0 ;
}
if ( 0 = = strcmp ( arg , " ecs " ) ) {
if ( is_ibm_iam_auth ) {
S3FS_PRN_EXIT ( " option ecs cannot be used in conjunction with ibm " ) ;
return - 1 ;
}
S3fsCurl : : SetIsECS ( true ) ;
S3fsCurl : : SetIAMCredentialsURL ( " http://169.254.170.2 " ) ;
S3fsCurl : : SetIAMFieldCount ( 5 ) ;
is_ecs = true ;
return 0 ;
}
2020-09-17 10:45:36 +00:00
if ( is_prefix ( arg , " iam_role " ) ) {
2020-08-22 12:40:53 +00:00
if ( is_ecs | | is_ibm_iam_auth ) {
S3FS_PRN_EXIT ( " option iam_role cannot be used in conjunction with ecs or ibm " ) ;
return - 1 ;
}
if ( 0 = = strcmp ( arg , " iam_role " ) | | 0 = = strcmp ( arg , " iam_role=auto " ) ) {
// loading IAM role name in s3fs_init(), because we need to wait initializing curl.
//
load_iamrole = true ;
return 0 ;
2020-09-17 10:45:36 +00:00
} else if ( is_prefix ( arg , " iam_role= " ) ) {
2020-08-22 12:40:53 +00:00
const char * role = strchr ( arg , ' = ' ) + sizeof ( char ) ;
S3fsCurl : : SetIAMRole ( role ) ;
load_iamrole = false ;
return 0 ;
}
}
2020-09-17 10:45:36 +00:00
if ( is_prefix ( arg , " profile= " ) ) {
2020-08-22 12:40:53 +00:00
aws_profile = strchr ( arg , ' = ' ) + sizeof ( char ) ;
return 0 ;
}
2020-09-17 10:45:36 +00:00
if ( is_prefix ( arg , " public_bucket= " ) ) {
2020-08-22 12:40:53 +00:00
off_t pubbucket = cvt_strtoofft ( strchr ( arg , ' = ' ) + sizeof ( char ) ) ;
if ( 1 = = pubbucket ) {
S3fsCurl : : SetPublicBucket ( true ) ;
// [NOTE]
// if bucket is public(without credential), s3 do not allow copy api.
// so s3fs sets nocopyapi mode.
//
nocopyapi = true ;
} else if ( 0 = = pubbucket ) {
S3fsCurl : : SetPublicBucket ( false ) ;
} else {
S3FS_PRN_EXIT ( " poorly formed argument to option: public_bucket. " ) ;
return - 1 ;
}
return 0 ;
}
2020-09-17 10:45:36 +00:00
if ( is_prefix ( arg , " bucket= " ) ) {
2020-08-22 12:40:53 +00:00
std : : string bname = strchr ( arg , ' = ' ) + sizeof ( char ) ;
if ( ( ret = set_bucket ( bname . c_str ( ) ) ) ) {
return ret ;
}
return 0 ;
}
if ( 0 = = strcmp ( arg , " no_check_certificate " ) ) {
S3fsCurl : : SetCheckCertificate ( false ) ;
return 0 ;
}
2020-09-17 10:45:36 +00:00
if ( is_prefix ( arg , " connect_timeout= " ) ) {
2020-08-22 12:40:53 +00:00
long contimeout = static_cast < long > ( cvt_strtoofft ( strchr ( arg , ' = ' ) + sizeof ( char ) ) ) ;
S3fsCurl : : SetConnectTimeout ( contimeout ) ;
return 0 ;
}
2020-09-17 10:45:36 +00:00
if ( is_prefix ( arg , " readwrite_timeout= " ) ) {
2020-08-22 12:40:53 +00:00
time_t rwtimeout = static_cast < time_t > ( cvt_strtoofft ( strchr ( arg , ' = ' ) + sizeof ( char ) ) ) ;
S3fsCurl : : SetReadwriteTimeout ( rwtimeout ) ;
return 0 ;
}
2020-09-17 10:45:36 +00:00
if ( is_prefix ( arg , " list_object_max_keys= " ) ) {
2020-08-22 12:40:53 +00:00
int max_keys = static_cast < int > ( cvt_strtoofft ( strchr ( arg , ' = ' ) + sizeof ( char ) ) ) ;
if ( max_keys < 1000 ) {
S3FS_PRN_EXIT ( " argument should be over 1000: list_object_max_keys " ) ;
return - 1 ;
}
max_keys_list_object = max_keys ;
return 0 ;
}
2020-09-17 10:45:36 +00:00
if ( is_prefix ( arg , " max_stat_cache_size= " ) ) {
2020-08-22 12:40:53 +00:00
unsigned long cache_size = static_cast < unsigned long > ( cvt_strtoofft ( strchr ( arg , ' = ' ) + sizeof ( char ) ) ) ;
StatCache : : getStatCacheData ( ) - > SetCacheSize ( cache_size ) ;
return 0 ;
}
2020-09-17 10:45:36 +00:00
if ( is_prefix ( arg , " stat_cache_expire= " ) ) {
2020-08-22 12:40:53 +00:00
time_t expr_time = static_cast < time_t > ( cvt_strtoofft ( strchr ( arg , ' = ' ) + sizeof ( char ) ) ) ;
StatCache : : getStatCacheData ( ) - > SetExpireTime ( expr_time ) ;
return 0 ;
}
// [NOTE]
// This option is for compatibility old version.
2020-09-17 10:45:36 +00:00
if ( is_prefix ( arg , " stat_cache_interval_expire= " ) ) {
2020-08-22 12:40:53 +00:00
time_t expr_time = static_cast < time_t > ( cvt_strtoofft ( strchr ( arg , ' = ' ) + sizeof ( char ) ) ) ;
StatCache : : getStatCacheData ( ) - > SetExpireTime ( expr_time , true ) ;
return 0 ;
}
if ( 0 = = strcmp ( arg , " enable_noobj_cache " ) ) {
StatCache : : getStatCacheData ( ) - > EnableCacheNoObject ( ) ;
return 0 ;
}
if ( 0 = = strcmp ( arg , " nodnscache " ) ) {
S3fsCurl : : SetDnsCache ( false ) ;
return 0 ;
}
if ( 0 = = strcmp ( arg , " nosscache " ) ) {
S3fsCurl : : SetSslSessionCache ( false ) ;
return 0 ;
}
2020-09-17 10:45:36 +00:00
if ( is_prefix ( arg , " parallel_count= " ) | | is_prefix ( arg , " parallel_upload= " ) ) {
2020-08-22 12:40:53 +00:00
int maxpara = static_cast < int > ( cvt_strtoofft ( strchr ( arg , ' = ' ) + sizeof ( char ) ) ) ;
if ( 0 > = maxpara ) {
S3FS_PRN_EXIT ( " argument should be over 1: parallel_count " ) ;
return - 1 ;
}
S3fsCurl : : SetMaxParallelCount ( maxpara ) ;
return 0 ;
}
2020-09-17 10:45:36 +00:00
if ( is_prefix ( arg , " fd_page_size= " ) ) {
2020-08-22 12:40:53 +00:00
S3FS_PRN_ERR ( " option fd_page_size is no longer supported, so skip this option. " ) ;
return 0 ;
}
2020-09-17 10:45:36 +00:00
if ( is_prefix ( arg , " multipart_size= " ) ) {
2020-08-22 12:40:53 +00:00
off_t size = static_cast < off_t > ( cvt_strtoofft ( strchr ( arg , ' = ' ) + sizeof ( char ) ) ) ;
if ( ! S3fsCurl : : SetMultipartSize ( size ) ) {
S3FS_PRN_EXIT ( " multipart_size option must be at least 5 MB. " ) ;
return - 1 ;
}
return 0 ;
}
2020-09-17 10:45:36 +00:00
if ( is_prefix ( arg , " ensure_diskfree= " ) ) {
2020-08-22 12:40:53 +00:00
off_t dfsize = cvt_strtoofft ( strchr ( arg , ' = ' ) + sizeof ( char ) ) * 1024 * 1024 ;
if ( dfsize < S3fsCurl : : GetMultipartSize ( ) ) {
S3FS_PRN_WARN ( " specified size to ensure disk free space is smaller than multipart size, so set multipart size to it. " ) ;
dfsize = S3fsCurl : : GetMultipartSize ( ) ;
}
FdManager : : SetEnsureFreeDiskSpace ( dfsize ) ;
return 0 ;
}
2020-09-17 10:45:36 +00:00
if ( is_prefix ( arg , " singlepart_copy_limit= " ) ) {
2020-08-22 12:40:53 +00:00
singlepart_copy_limit = static_cast < int64_t > ( cvt_strtoofft ( strchr ( arg , ' = ' ) + sizeof ( char ) ) ) * 1024 ;
return 0 ;
}
2020-09-17 10:45:36 +00:00
if ( is_prefix ( arg , " ahbe_conf= " ) ) {
2020-09-11 09:37:24 +00:00
std : : string ahbe_conf = strchr ( arg , ' = ' ) + sizeof ( char ) ;
2020-08-22 12:40:53 +00:00
if ( ! AdditionalHeader : : get ( ) - > Load ( ahbe_conf . c_str ( ) ) ) {
S3FS_PRN_EXIT ( " failed to load ahbe_conf file(%s). " , ahbe_conf . c_str ( ) ) ;
return - 1 ;
}
AdditionalHeader : : get ( ) - > Dump ( ) ;
return 0 ;
}
if ( 0 = = strcmp ( arg , " noxmlns " ) ) {
noxmlns = true ;
return 0 ;
}
if ( 0 = = strcmp ( arg , " nomixupload " ) ) {
FdEntity : : SetNoMixMultipart ( ) ;
return 0 ;
}
if ( 0 = = strcmp ( arg , " nocopyapi " ) ) {
nocopyapi = true ;
return 0 ;
}
if ( 0 = = strcmp ( arg , " norenameapi " ) ) {
norenameapi = true ;
return 0 ;
}
if ( 0 = = strcmp ( arg , " complement_stat " ) ) {
complement_stat = true ;
return 0 ;
}
if ( 0 = = strcmp ( arg , " notsup_compat_dir " ) ) {
support_compat_dir = false ;
return 0 ;
}
if ( 0 = = strcmp ( arg , " enable_content_md5 " ) ) {
S3fsCurl : : SetContentMd5 ( true ) ;
return 0 ;
}
2020-09-17 10:45:36 +00:00
if ( is_prefix ( arg , " host= " ) ) {
2020-08-22 12:40:53 +00:00
s3host = strchr ( arg , ' = ' ) + sizeof ( char ) ;
return 0 ;
}
2020-09-17 10:45:36 +00:00
if ( is_prefix ( arg , " servicepath= " ) ) {
2020-08-22 12:40:53 +00:00
service_path = strchr ( arg , ' = ' ) + sizeof ( char ) ;
return 0 ;
}
2020-09-17 10:45:36 +00:00
if ( is_prefix ( arg , " url= " ) ) {
2020-08-22 12:40:53 +00:00
s3host = strchr ( arg , ' = ' ) + sizeof ( char ) ;
// strip the trailing '/', if any, off the end of the host
2020-09-11 09:37:24 +00:00
// std::string
2020-08-22 12:40:53 +00:00
size_t found , length ;
found = s3host . find_last_of ( ' / ' ) ;
length = s3host . length ( ) ;
while ( found = = ( length - 1 ) & & length > 0 ) {
s3host . erase ( found ) ;
found = s3host . find_last_of ( ' / ' ) ;
length = s3host . length ( ) ;
}
2020-09-11 09:37:24 +00:00
// Check url for http / https protocol std::string
2020-08-22 12:40:53 +00:00
if ( ( s3host . compare ( 0 , 8 , " https:// " ) ! = 0 ) & & ( s3host . compare ( 0 , 7 , " http:// " ) ! = 0 ) ) {
S3FS_PRN_EXIT ( " option url has invalid format, missing http / https protocol " ) ;
return - 1 ;
}
return 0 ;
}
if ( 0 = = strcmp ( arg , " sigv2 " ) ) {
S3fsCurl : : SetSignatureV4 ( false ) ;
return 0 ;
}
if ( 0 = = strcmp ( arg , " createbucket " ) ) {
create_bucket = true ;
return 0 ;
}
2020-09-17 10:45:36 +00:00
if ( is_prefix ( arg , " endpoint= " ) ) {
2020-08-22 12:40:53 +00:00
endpoint = strchr ( arg , ' = ' ) + sizeof ( char ) ;
is_specified_endpoint = true ;
return 0 ;
}
if ( 0 = = strcmp ( arg , " use_path_request_style " ) ) {
pathrequeststyle = true ;
return 0 ;
}
2020-09-11 08:38:20 +00:00
if ( 0 = = strcmp ( arg , " noua " ) ) {
2020-08-22 12:40:53 +00:00
S3fsCurl : : SetUserAgentFlag ( false ) ;
return 0 ;
}
if ( 0 = = strcmp ( arg , " use_xattr " ) ) {
is_use_xattr = true ;
return 0 ;
2020-09-17 10:45:36 +00:00
} else if ( is_prefix ( arg , " use_xattr= " ) ) {
2020-08-22 12:40:53 +00:00
const char * strflag = strchr ( arg , ' = ' ) + sizeof ( char ) ;
if ( 0 = = strcmp ( strflag , " 1 " ) ) {
is_use_xattr = true ;
} else if ( 0 = = strcmp ( strflag , " 0 " ) ) {
is_use_xattr = false ;
} else {
S3FS_PRN_EXIT ( " option use_xattr has unknown parameter(%s). " , strflag ) ;
return - 1 ;
}
return 0 ;
}
2020-09-17 10:45:36 +00:00
if ( is_prefix ( arg , " cipher_suites= " ) ) {
2020-08-22 12:40:53 +00:00
cipher_suites = strchr ( arg , ' = ' ) + sizeof ( char ) ;
return 0 ;
}
2020-09-17 10:45:36 +00:00
if ( is_prefix ( arg , " instance_name= " ) ) {
2020-08-22 12:40:53 +00:00
instance_name = strchr ( arg , ' = ' ) + sizeof ( char ) ;
instance_name = " [ " + instance_name + " ] " ;
return 0 ;
}
2020-09-17 10:45:36 +00:00
if ( is_prefix ( arg , " mime= " ) ) {
2020-08-22 12:40:53 +00:00
mimetype_file = strchr ( arg , ' = ' ) + sizeof ( char ) ;
return 0 ;
2014-08-26 17:11:10 +00:00
}
2016-05-06 04:37:32 +00:00
//
2020-08-22 12:40:53 +00:00
// debug option for s3fs
//
2020-09-17 10:45:36 +00:00
if ( is_prefix ( arg , " dbglevel= " ) ) {
2020-08-22 12:40:53 +00:00
const char * strlevel = strchr ( arg , ' = ' ) + sizeof ( char ) ;
if ( 0 = = strcasecmp ( strlevel , " silent " ) | | 0 = = strcasecmp ( strlevel , " critical " ) | | 0 = = strcasecmp ( strlevel , " crit " ) ) {
S3fsSignals : : SetLogLevel ( S3FS_LOG_CRIT ) ;
} else if ( 0 = = strcasecmp ( strlevel , " error " ) | | 0 = = strcasecmp ( strlevel , " err " ) ) {
S3fsSignals : : SetLogLevel ( S3FS_LOG_ERR ) ;
} else if ( 0 = = strcasecmp ( strlevel , " wan " ) | | 0 = = strcasecmp ( strlevel , " warn " ) | | 0 = = strcasecmp ( strlevel , " warning " ) ) {
S3fsSignals : : SetLogLevel ( S3FS_LOG_WARN ) ;
} else if ( 0 = = strcasecmp ( strlevel , " inf " ) | | 0 = = strcasecmp ( strlevel , " info " ) | | 0 = = strcasecmp ( strlevel , " information " ) ) {
S3fsSignals : : SetLogLevel ( S3FS_LOG_INFO ) ;
} else if ( 0 = = strcasecmp ( strlevel , " dbg " ) | | 0 = = strcasecmp ( strlevel , " debug " ) ) {
S3fsSignals : : SetLogLevel ( S3FS_LOG_DBG ) ;
} else {
S3FS_PRN_EXIT ( " option dbglevel has unknown parameter(%s). " , strlevel ) ;
return - 1 ;
}
return 0 ;
}
//
// debug option
//
// debug_level is S3FS_LOG_INFO, after second -d is passed to fuse.
//
if ( 0 = = strcmp ( arg , " -d " ) | | 0 = = strcmp ( arg , " --debug " ) ) {
if ( ! IS_S3FS_LOG_INFO ( ) & & ! IS_S3FS_LOG_DBG ( ) ) {
S3fsSignals : : SetLogLevel ( S3FS_LOG_INFO ) ;
return 0 ;
}
if ( 0 = = strcmp ( arg , " --debug " ) ) {
// fuse doesn't understand "--debug", but it understands -d.
// but we can't pass -d back to fuse.
return 0 ;
}
}
// "f2" is not used no more.
// (set S3FS_LOG_DBG)
if ( 0 = = strcmp ( arg , " f2 " ) ) {
S3fsSignals : : SetLogLevel ( S3FS_LOG_DBG ) ;
return 0 ;
}
if ( 0 = = strcmp ( arg , " curldbg " ) ) {
S3fsCurl : : SetVerbose ( true ) ;
return 0 ;
2020-09-17 10:45:36 +00:00
} else if ( is_prefix ( arg , " curldbg= " ) ) {
2020-08-22 12:40:53 +00:00
const char * strlevel = strchr ( arg , ' = ' ) + sizeof ( char ) ;
if ( 0 = = strcasecmp ( strlevel , " normal " ) ) {
S3fsCurl : : SetVerbose ( true ) ;
} else if ( 0 = = strcasecmp ( strlevel , " body " ) ) {
S3fsCurl : : SetVerbose ( true ) ;
S3fsCurl : : SetDumpBody ( true ) ;
} else {
S3FS_PRN_EXIT ( " option curldbg has unknown parameter(%s). " , strlevel ) ;
return - 1 ;
}
return 0 ;
}
//
// Check cache file, using SIGUSR1
//
if ( 0 = = strcmp ( arg , " set_check_cache_sigusr1 " ) ) {
if ( ! S3fsSignals : : SetUsr1Handler ( NULL ) ) {
S3FS_PRN_EXIT ( " could not set sigusr1 for checking cache. " ) ;
return - 1 ;
}
return 0 ;
2020-09-17 10:45:36 +00:00
} else if ( is_prefix ( arg , " set_check_cache_sigusr1= " ) ) {
2020-08-22 12:40:53 +00:00
const char * strfilepath = strchr ( arg , ' = ' ) + sizeof ( char ) ;
if ( ! S3fsSignals : : SetUsr1Handler ( strfilepath ) ) {
S3FS_PRN_EXIT ( " could not set sigusr1 for checking cache and output file(%s). " , strfilepath ) ;
return - 1 ;
}
return 0 ;
}
2020-09-17 10:45:36 +00:00
if ( is_prefix ( arg , " accessKeyId= " ) ) {
2020-08-22 12:40:53 +00:00
S3FS_PRN_EXIT ( " option accessKeyId is no longer supported. " ) ;
return - 1 ;
}
2020-09-17 10:45:36 +00:00
if ( is_prefix ( arg , " secretAccessKey= " ) ) {
2020-08-22 12:40:53 +00:00
S3FS_PRN_EXIT ( " option secretAccessKey is no longer supported. " ) ;
return - 1 ;
}
if ( 0 = = strcmp ( arg , " use_wtf8 " ) ) {
use_wtf8 = true ;
return 0 ;
}
if ( 0 = = strcmp ( arg , " requester_pays " ) ) {
S3fsCurl : : SetRequesterPays ( true ) ;
return 0 ;
}
2017-05-13 07:35:55 +00:00
// [NOTE]
2020-08-22 12:40:53 +00:00
// following option will be discarding, because these are not for fuse.
// (Referenced sshfs.c)
2017-05-13 07:35:55 +00:00
//
2020-08-22 12:40:53 +00:00
if ( 0 = = strcmp ( arg , " auto " ) | |
0 = = strcmp ( arg , " noauto " ) | |
0 = = strcmp ( arg , " user " ) | |
0 = = strcmp ( arg , " nouser " ) | |
0 = = strcmp ( arg , " users " ) | |
0 = = strcmp ( arg , " _netdev " ) )
{
return 0 ;
}
2010-11-13 23:59:23 +00:00
}
2020-08-22 12:40:53 +00:00
return 1 ;
}
int main ( int argc , char * argv [ ] )
{
int ch ;
int fuse_res ;
int option_index = 0 ;
struct fuse_operations s3fs_oper ;
time_t incomp_abort_time = ( 24 * 60 * 60 ) ;
static const struct option long_opts [ ] = {
{ " help " , no_argument , NULL , ' h ' } ,
{ " version " , no_argument , 0 , 0 } ,
{ " debug " , no_argument , NULL , ' d ' } ,
{ " incomplete-mpu-list " , no_argument , NULL , ' u ' } ,
{ " incomplete-mpu-abort " , optional_argument , NULL , ' a ' } , // 'a' is only identifier and is not option.
{ NULL , 0 , NULL , 0 }
} ;
// init syslog(default CRIT)
openlog ( " s3fs " , LOG_PID | LOG_ODELAY | LOG_NOWAIT , LOG_USER ) ;
S3fsSignals : : SetLogLevel ( debug_level ) ;
// init xml2
xmlInitParser ( ) ;
LIBXML_TEST_VERSION
init_sysconf_vars ( ) ;
// get program name - emulate basename
program_name . assign ( argv [ 0 ] ) ;
size_t found = program_name . find_last_of ( ' / ' ) ;
2020-09-11 09:37:24 +00:00
if ( found ! = std : : string : : npos ) {
2020-08-22 12:40:53 +00:00
program_name . replace ( 0 , found + 1 , " " ) ;
}
while ( ( ch = getopt_long ( argc , argv , " dho:fsu " , long_opts , & option_index ) ) ! = - 1 ) {
switch ( ch ) {
case 0 :
if ( strcmp ( long_opts [ option_index ] . name , " version " ) = = 0 ) {
show_version ( ) ;
exit ( EXIT_SUCCESS ) ;
}
break ;
case ' h ' :
show_help ( ) ;
exit ( EXIT_SUCCESS ) ;
case ' o ' :
break ;
case ' d ' :
break ;
case ' f ' :
foreground = true ;
break ;
case ' s ' :
break ;
case ' u ' : // --incomplete-mpu-list
if ( NO_UTILITY_MODE ! = utility_mode ) {
S3FS_PRN_EXIT ( " already utility mode option is specified. " ) ;
exit ( EXIT_FAILURE ) ;
}
utility_mode = INCOMP_TYPE_LIST ;
break ;
case ' a ' : // --incomplete-mpu-abort
if ( NO_UTILITY_MODE ! = utility_mode ) {
S3FS_PRN_EXIT ( " already utility mode option is specified. " ) ;
exit ( EXIT_FAILURE ) ;
}
utility_mode = INCOMP_TYPE_ABORT ;
// check expire argument
if ( NULL ! = optarg & & 0 = = strcasecmp ( optarg , " all " ) ) { // all is 0s
incomp_abort_time = 0 ;
} else if ( NULL ! = optarg ) {
if ( ! convert_unixtime_from_option_arg ( optarg , incomp_abort_time ) ) {
S3FS_PRN_EXIT ( " --incomplete-mpu-abort option argument is wrong. " ) ;
exit ( EXIT_FAILURE ) ;
}
}
// if optarg is null, incomp_abort_time is 24H(default)
break ;
default :
exit ( EXIT_FAILURE ) ;
}
2018-07-08 03:49:10 +00:00
}
2020-08-22 12:40:53 +00:00
// Load SSE environment
if ( ! S3fsCurl : : LoadEnvSse ( ) ) {
S3FS_PRN_EXIT ( " something wrong about SSE environment. " ) ;
exit ( EXIT_FAILURE ) ;
2011-02-12 16:48:23 +00:00
}
2020-08-22 12:40:53 +00:00
// ssl init
if ( ! s3fs_init_global_ssl ( ) ) {
S3FS_PRN_EXIT ( " could not initialize for ssl libraries. " ) ;
exit ( EXIT_FAILURE ) ;
Summary of Changes(1.62 -> 1.63)
1) Lifetime for the stats cache
Added the new option "stat_cache_expire".
This option which is specified by seconds means the lifetime for each stats cache entry.
If this option is not specified, the stats cache is kept in s3fs process until the stats cache grown to maximum size. (default)
If this option is specified, the stats cache entry is out from the memory when the entry expires time.
2) Enable file permission
s3fs before 1.62 did not consider the file access permission.
s3fs after this version can consider it.
For access permission, the s3fs_getattr() function was divided into sub function which can check the file access permission.
It is like access() function.
And the function calling the s3fs_getattr() calls this new sub function instead of s3fs_getattr().
Last the s3fs_opendir() function which is called by FUSE was added for checking directory access permission when listing the files in directory.
3) UID/GUID
When a file or a directory was created, the s3fs could not set the UID/GID as the user who executed a command.
(Almost the UID/GID are root, because the s3fs run by root.)
After this version, the s3fs set correct UID/GID as the user who executes the commond.
4) About the mtime
If the object does not have "x-amz-meta-mtime" meta, the s3fs uses the "Last-Modified" header instead of it.
But the s3fs had a bug in this code, and this version fixed this bug.
When user modified the file, the s3fs did not update the mtime of the file.
This version fixed this bug.
In the get_local_fd() function, the local file's mtime was changed only when s3fs run with "use_cache" option.
This version always updates the mtime whether the local cache file is used or not.
And s3fs_flush ( ) function set the mtime of local cache file from S3 object mtime, but it was wrong .
This version is that the s3fs_flush ( ) changes the mtime of S3 object from the local cache file or the tmpfile .
The s3fs cuts some requests, because the s3fs can always check mtime whether the s3fs uses or does not use the local cache file.
5) A case of no "x-amz-meta-mode"
If the object did not have "x-amz-meta-mtime" mete, the s3fs recognized the file as not regular file.
After this version, the s3fs recognizes the file as regular file.
6) "." and ".." directory
The s3fs_readdir() did not return "X" and "XX" directory name.
After this version, the s3fs is changed that it returns "X" and "XX".
Example, the result of "ls" lists "X" and "XX" directory.
7) Fixed a bug
The insert_object() had a bug, and it is fixed.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@390 df820570-a93a-0410-bd06-b72b767a4274
2013-02-24 08:58:54 +00:00
}
2020-08-22 12:40:53 +00:00
// init curl (without mime types)
//
2017-03-19 15:19:04 +00:00
// [NOTE]
2020-08-22 12:40:53 +00:00
// The curl initialization here does not load mime types.
// The mime types file parameter are dynamic values according
// to the user's environment, and are analyzed by the my_fuse_opt_proc
// function.
// The my_fuse_opt_proc function is executed after this curl
// initialization. Because the curl method is used in the
// my_fuse_opt_proc function, then it must be called here to
// initialize. Fortunately, the processing using mime types
// is only PUT/POST processing, and it is not used until the
// call of my_fuse_opt_proc function is completed. Therefore,
// the mime type is loaded just after calling the my_fuse_opt_proc
// function.
//
if ( ! S3fsCurl : : InitS3fsCurl ( ) ) {
S3FS_PRN_EXIT ( " Could not initiate curl library. " ) ;
s3fs_destroy_global_ssl ( ) ;
exit ( EXIT_FAILURE ) ;
Changes codes for performance(part 3)
* Summay
This revision includes big change about temporary file and local cache file.
By this big change, s3fs works with good performance when s3fs opens/
closes/syncs/reads object.
I made a big change about the handling about temporary file and local cache
file to do this implementation.
* Detail
1) About temporary file(local file)
s3fs uses a temporary file on local file system when s3fs does download/
upload/open/seek object on S3.
After this revision, s3fs calls ftruncate() function when s3fs makes the
temporary file.
In this way s3fs can set a file size of precisely length without downloading.
(Notice - ftruncate function is for XSI-compliant systems, so that possibly
you have a problem on non-XSI-compliant systems.)
By this change, s3fs can download a part of a object by requesting with
"Range" http header. It seems like downloading by each block unit.
The default block(part) size is 50MB, it is caused the result which is default
parallel requests count(5) by default multipart upload size(10MB).
If you need to change this block size, you can change by new option
"fd_page_size". This option can take from 1MB(1024 * 1024) to any bytes.
So that, you have to take care about that fdcache.cpp(and fdcache.h) were
changed a lot.
2) About local cache
Local cache files which are in directory specified by "use_cache" option do
not have always all of object data.
This cause is that s3fs uses ftruncate function and reads(writes) each block
unit of a temporary file.
s3fs manages each block unit's status which are "downloaded area" or "not".
For this status, s3fs makes new temporary file in cache directory which is
specified by "use_cache" option. This status files is in a directory which is
named "<use_cache sirectory>/.<bucket_name>/".
When s3fs opens this status file, s3fs locks this file for exclusive control by
calling flock function. You need to take care about this, the status files can
not be laid on network drive(like NFS).
This revision changes about file open mode, s3fs always opens a local cache
file and each status file with writable mode.
Last, this revision adds new option "del_cache", this option means that s3fs
deletes all local cache file when s3fs starts and exits.
3) Uploading
When s3fs writes data to file descriptor through FUSE request, old s3fs
revision downloads all of the object. But new revision does not download all,
it downloads only small percial area(some block units) including writing data
area.
And when s3fs closes or flushes the file descriptor, s3fs downloads other area
which is not downloaded from server. After that, s3fs uploads all of data.
Already r456 revision has parallel upload function, then this revision with
r456 and r457 are very big change for performance.
4) Downloading
By changing a temporary file and a local cache file, when s3fs downloads a
object, it downloads only the required range(some block units).
And s3fs downloads units by parallel GET request, it is same as a case of
uploading. (Maximum parallel request count and each download size are
specified same parameters for uploading.)
In the new revision, when s3fs opens file, s3fs returns file descriptor soon.
Because s3fs only opens(makes) the file descriptor with no downloading
data. And when s3fs reads a data, s3fs downloads only some block unit
including specified area.
This result is good for performance.
5) Changes option name
The option "parallel_upload" which added at r456 is changed to new option
name as "parallel_count". This reason is this option value is not only used by
uploading object, but a uploading object also uses this option. (For a while,
you can use old option name "parallel_upload" for compatibility.)
git-svn-id: http://s3fs.googlecode.com/svn/trunk@458 df820570-a93a-0410-bd06-b72b767a4274
2013-07-23 16:01:48 +00:00
}
2020-08-22 12:40:53 +00:00
// clear this structure
memset ( & s3fs_oper , 0 , sizeof ( s3fs_oper ) ) ;
// This is the fuse-style parser for the arguments
// after which the bucket name and mountpoint names
// should have been set
struct fuse_args custom_args = FUSE_ARGS_INIT ( argc , argv ) ;
if ( 0 ! = fuse_opt_parse ( & custom_args , NULL , NULL , my_fuse_opt_proc ) ) {
S3fsCurl : : DestroyS3fsCurl ( ) ;
s3fs_destroy_global_ssl ( ) ;
exit ( EXIT_FAILURE ) ;
2013-07-10 06:24:06 +00:00
}
2020-08-22 12:40:53 +00:00
// init mime types for curl
if ( ! S3fsCurl : : InitMimeType ( mimetype_file ) ) {
S3FS_PRN_WARN ( " Missing MIME types prevents setting Content-Type on uploaded objects. " ) ;
2015-10-18 17:03:41 +00:00
}
2020-08-22 12:40:53 +00:00
// [NOTE]
// exclusive option check here.
//
if ( storage_class_t : : REDUCED_REDUNDANCY = = S3fsCurl : : GetStorageClass ( ) & & ! S3fsCurl : : IsSseDisable ( ) ) {
S3FS_PRN_EXIT ( " use_sse option could not be specified with storage class reduced_redundancy. " ) ;
S3fsCurl : : DestroyS3fsCurl ( ) ;
s3fs_destroy_global_ssl ( ) ;
exit ( EXIT_FAILURE ) ;
2014-03-30 07:53:41 +00:00
}
2020-08-22 12:40:53 +00:00
if ( ! S3fsCurl : : FinalCheckSse ( ) ) {
S3FS_PRN_EXIT ( " something wrong about SSE options. " ) ;
S3fsCurl : : DestroyS3fsCurl ( ) ;
s3fs_destroy_global_ssl ( ) ;
exit ( EXIT_FAILURE ) ;
2015-07-27 22:47:08 +00:00
}
2020-08-22 12:40:53 +00:00
// The first plain argument is the bucket
if ( bucket . empty ( ) ) {
S3FS_PRN_EXIT ( " missing BUCKET argument. " ) ;
show_usage ( ) ;
S3fsCurl : : DestroyS3fsCurl ( ) ;
s3fs_destroy_global_ssl ( ) ;
exit ( EXIT_FAILURE ) ;
2016-09-19 04:28:01 +00:00
}
2020-08-22 12:40:53 +00:00
// bucket names cannot contain upper case characters in virtual-hosted style
if ( ( ! pathrequeststyle ) & & ( lower ( bucket ) ! = bucket ) ) {
S3FS_PRN_EXIT ( " BUCKET %s, name not compatible with virtual-hosted style. " , bucket . c_str ( ) ) ;
S3fsCurl : : DestroyS3fsCurl ( ) ;
s3fs_destroy_global_ssl ( ) ;
exit ( EXIT_FAILURE ) ;
2017-04-04 12:32:53 +00:00
}
2020-08-22 12:40:53 +00:00
// check bucket name for illegal characters
found = bucket . find_first_of ( " /: \\ ;!@#$%^&*?|+= " ) ;
2020-09-11 09:37:24 +00:00
if ( found ! = std : : string : : npos ) {
2020-08-22 12:40:53 +00:00
S3FS_PRN_EXIT ( " BUCKET %s -- bucket name contains an illegal character. " , bucket . c_str ( ) ) ;
S3fsCurl : : DestroyS3fsCurl ( ) ;
s3fs_destroy_global_ssl ( ) ;
exit ( EXIT_FAILURE ) ;
2018-02-28 07:51:35 +00:00
}
2020-08-22 12:40:53 +00:00
2020-09-17 10:45:36 +00:00
if ( ! pathrequeststyle & & is_prefix ( s3host . c_str ( ) , " https:// " ) & & bucket . find_first_of ( ' . ' ) ! = std : : string : : npos ) {
2020-08-22 12:40:53 +00:00
S3FS_PRN_EXIT ( " BUCKET %s -- cannot mount bucket with . while using HTTPS without use_path_request_style " , bucket . c_str ( ) ) ;
S3fsCurl : : DestroyS3fsCurl ( ) ;
s3fs_destroy_global_ssl ( ) ;
exit ( EXIT_FAILURE ) ;
2020-03-19 15:13:21 +00:00
}
2020-08-22 12:40:53 +00:00
// The second plain argument is the mountpoint
// if the option was given, we all ready checked for a
// readable, non-empty directory, this checks determines
// if the mountpoint option was ever supplied
if ( NO_UTILITY_MODE = = utility_mode ) {
if ( mountpoint . empty ( ) ) {
S3FS_PRN_EXIT ( " missing MOUNTPOINT argument. " ) ;
show_usage ( ) ;
S3fsCurl : : DestroyS3fsCurl ( ) ;
s3fs_destroy_global_ssl ( ) ;
exit ( EXIT_FAILURE ) ;
}
2015-09-30 19:41:27 +00:00
}
2020-08-22 12:40:53 +00:00
// error checking of command line arguments for compatibility
if ( S3fsCurl : : IsPublicBucket ( ) & & S3fsCurl : : IsSetAccessKeys ( ) ) {
S3FS_PRN_EXIT ( " specifying both public_bucket and the access keys options is invalid. " ) ;
S3fsCurl : : DestroyS3fsCurl ( ) ;
s3fs_destroy_global_ssl ( ) ;
exit ( EXIT_FAILURE ) ;
2010-11-13 23:59:23 +00:00
}
2020-08-22 12:40:53 +00:00
if ( ! passwd_file . empty ( ) & & S3fsCurl : : IsSetAccessKeys ( ) ) {
S3FS_PRN_EXIT ( " specifying both passwd_file and the access keys options is invalid. " ) ;
S3fsCurl : : DestroyS3fsCurl ( ) ;
s3fs_destroy_global_ssl ( ) ;
exit ( EXIT_FAILURE ) ;
2013-08-23 16:28:50 +00:00
}
2020-08-22 12:40:53 +00:00
if ( ! S3fsCurl : : IsPublicBucket ( ) & & ! load_iamrole & & ! is_ecs ) {
if ( EXIT_SUCCESS ! = get_access_keys ( ) ) {
S3fsCurl : : DestroyS3fsCurl ( ) ;
s3fs_destroy_global_ssl ( ) ;
exit ( EXIT_FAILURE ) ;
}
if ( ! S3fsCurl : : IsSetAccessKeys ( ) ) {
S3FS_PRN_EXIT ( " could not establish security credentials, check documentation. " ) ;
S3fsCurl : : DestroyS3fsCurl ( ) ;
s3fs_destroy_global_ssl ( ) ;
exit ( EXIT_FAILURE ) ;
}
// More error checking on the access key pair can be done
// like checking for appropriate lengths and characters
2020-06-28 08:00:41 +00:00
}
2011-06-26 00:37:52 +00:00
2020-08-22 12:40:53 +00:00
// check cache dir permission
if ( ! FdManager : : CheckCacheDirExist ( ) | | ! FdManager : : CheckCacheTopDir ( ) | | ! CacheFileStat : : CheckCacheFileStatTopDir ( ) ) {
S3FS_PRN_EXIT ( " could not allow cache directory permission, check permission of cache directories. " ) ;
S3fsCurl : : DestroyS3fsCurl ( ) ;
s3fs_destroy_global_ssl ( ) ;
exit ( EXIT_FAILURE ) ;
2011-06-26 00:37:52 +00:00
}
2020-08-22 12:40:53 +00:00
// check IBM IAM requirements
if ( is_ibm_iam_auth ) {
// check that default ACL is either public-read or private
acl_t defaultACL = S3fsCurl : : GetDefaultAcl ( ) ;
if ( defaultACL ! = acl_t : : PRIVATE & & defaultACL ! = acl_t : : PUBLIC_READ ) {
S3FS_PRN_EXIT ( " can only use 'public-read' or 'private' ACL while using ibm_iam_auth " ) ;
S3fsCurl : : DestroyS3fsCurl ( ) ;
s3fs_destroy_global_ssl ( ) ;
exit ( EXIT_FAILURE ) ;
}
if ( create_bucket & & ! S3fsCurl : : IsSetAccessKeyID ( ) ) {
S3FS_PRN_EXIT ( " missing service instance ID for bucket creation " ) ;
S3fsCurl : : DestroyS3fsCurl ( ) ;
s3fs_destroy_global_ssl ( ) ;
exit ( EXIT_FAILURE ) ;
}
2011-06-26 00:37:52 +00:00
}
2020-08-22 12:40:53 +00:00
// set user agent
S3fsCurl : : InitUserAgent ( ) ;
// There's room for more command line error checking
// Check to see if the bucket name contains periods and https (SSL) is
// being used. This is a known limitation:
// https://docs.amazonwebservices.com/AmazonS3/latest/dev/
// The Developers Guide suggests that either use HTTP of for us to write
// our own certificate verification logic.
// For now, this will be unsupported unless we get a request for it to
// be supported. In that case, we have a couple of options:
// - implement a command line option that bypasses the verify host
// but doesn't bypass verifying the certificate
// - write our own host verification (this might be complex)
// See issue #128strncasecmp
/*
if ( 1 = = S3fsCurl : : GetSslVerifyHostname ( ) ) {
found = bucket . find_first_of ( " . " ) ;
2020-09-11 09:37:24 +00:00
if ( found ! = std : : string : : npos ) {
2020-08-22 12:40:53 +00:00
found = s3host . find ( " https: " ) ;
2020-09-11 09:37:24 +00:00
if ( found ! = std : : string : : npos ) {
2020-08-22 12:40:53 +00:00
S3FS_PRN_EXIT ( " Using https and a bucket name with periods is unsupported. " ) ;
exit ( 1 ) ;
}
}
2019-02-19 10:32:37 +00:00
}
2020-08-22 12:40:53 +00:00
*/
if ( NO_UTILITY_MODE ! = utility_mode ) {
int exitcode = s3fs_utility_processing ( incomp_abort_time ) ;
S3fsCurl : : DestroyS3fsCurl ( ) ;
s3fs_destroy_global_ssl ( ) ;
exit ( exitcode ) ;
2019-11-18 11:38:16 +00:00
}
2019-04-07 11:51:55 +00:00
2020-08-22 12:40:53 +00:00
// Check multipart / copy api for mix multipart uploading
if ( nomultipart | | nocopyapi | | norenameapi ) {
FdEntity : : SetNoMixMultipart ( ) ;
2019-04-07 11:51:55 +00:00
}
2010-11-13 23:59:23 +00:00
2020-08-22 12:40:53 +00:00
// check free disk space
if ( ! FdManager : : IsSafeDiskSpace ( NULL , S3fsCurl : : GetMultipartSize ( ) * S3fsCurl : : GetMaxParallelCount ( ) ) ) {
S3FS_PRN_EXIT ( " There is no enough disk space for used as cache(or temporary) directory by s3fs. " ) ;
S3fsCurl : : DestroyS3fsCurl ( ) ;
s3fs_destroy_global_ssl ( ) ;
2019-02-03 14:22:16 +00:00
exit ( EXIT_FAILURE ) ;
2020-08-22 12:40:53 +00:00
}
2011-02-26 14:48:02 +00:00
2020-08-22 12:40:53 +00:00
s3fs_oper . getattr = s3fs_getattr ;
s3fs_oper . readlink = s3fs_readlink ;
s3fs_oper . mknod = s3fs_mknod ;
s3fs_oper . mkdir = s3fs_mkdir ;
s3fs_oper . unlink = s3fs_unlink ;
s3fs_oper . rmdir = s3fs_rmdir ;
s3fs_oper . symlink = s3fs_symlink ;
s3fs_oper . rename = s3fs_rename ;
s3fs_oper . link = s3fs_link ;
if ( ! nocopyapi ) {
s3fs_oper . chmod = s3fs_chmod ;
s3fs_oper . chown = s3fs_chown ;
s3fs_oper . utimens = s3fs_utimens ;
} else {
s3fs_oper . chmod = s3fs_chmod_nocopy ;
s3fs_oper . chown = s3fs_chown_nocopy ;
s3fs_oper . utimens = s3fs_utimens_nocopy ;
}
s3fs_oper . truncate = s3fs_truncate ;
s3fs_oper . open = s3fs_open ;
s3fs_oper . read = s3fs_read ;
s3fs_oper . write = s3fs_write ;
s3fs_oper . statfs = s3fs_statfs ;
s3fs_oper . flush = s3fs_flush ;
s3fs_oper . fsync = s3fs_fsync ;
s3fs_oper . release = s3fs_release ;
s3fs_oper . opendir = s3fs_opendir ;
s3fs_oper . readdir = s3fs_readdir ;
s3fs_oper . init = s3fs_init ;
s3fs_oper . destroy = s3fs_destroy ;
s3fs_oper . access = s3fs_access ;
s3fs_oper . create = s3fs_create ;
// extended attributes
if ( is_use_xattr ) {
s3fs_oper . setxattr = s3fs_setxattr ;
s3fs_oper . getxattr = s3fs_getxattr ;
s3fs_oper . listxattr = s3fs_listxattr ;
s3fs_oper . removexattr = s3fs_removexattr ;
}
// now passing things off to fuse, fuse will finish evaluating the command line args
fuse_res = fuse_main ( custom_args . argc , custom_args . argv , & s3fs_oper , NULL ) ;
fuse_opt_free_args ( & custom_args ) ;
// Destroy curl
if ( ! S3fsCurl : : DestroyS3fsCurl ( ) ) {
S3FS_PRN_WARN ( " Could not release curl library. " ) ;
2010-11-26 22:11:48 +00:00
}
2020-08-22 12:40:53 +00:00
s3fs_destroy_global_ssl ( ) ;
2010-11-20 17:55:15 +00:00
2020-08-22 12:40:53 +00:00
// cleanup xml2
xmlCleanupParser ( ) ;
S3FS_MALLOCTRIM ( 0 ) ;
2018-05-27 10:48:03 +00:00
2020-08-22 12:40:53 +00:00
exit ( fuse_res ) ;
2010-11-13 23:59:23 +00:00
}
2013-01-19 16:05:07 +00:00
2014-09-07 15:08:27 +00:00
/*
* Local variables :
2020-08-22 12:40:53 +00:00
* tab - width : 4
* c - basic - offset : 4
2014-09-07 15:08:27 +00:00
* End :
2020-08-22 12:40:53 +00:00
* vim600 : expandtab sw = 4 ts = 4 fdm = marker
* vim < 600 : expandtab sw = 4 ts = 4
2014-09-07 15:08:27 +00:00
*/