mirror of
https://github.com/s3fs-fuse/s3fs-fuse.git
synced 2024-11-05 04:17:52 +00:00
Supported a object which is larger than free disk space
This commit is contained in:
parent
4252fab685
commit
d102eb752d
@ -158,19 +158,16 @@ number of parallel request for uploading big objects.
|
|||||||
s3fs uploads large object(default:over 20MB) by multipart post request, and sends parallel requests.
|
s3fs uploads large object(default:over 20MB) by multipart post request, and sends parallel requests.
|
||||||
This option limits parallel request count which s3fs requests at once.
|
This option limits parallel request count which s3fs requests at once.
|
||||||
It is necessary to set this value depending on a CPU and a network band.
|
It is necessary to set this value depending on a CPU and a network band.
|
||||||
This option is lated to fd_page_size option and affects it.
|
|
||||||
.TP
|
|
||||||
\fB\-o\fR fd_page_size(default="52428800"(50MB))
|
|
||||||
number of internal management page size for each file descriptor.
|
|
||||||
For delayed reading and writing by s3fs, s3fs manages pages which is separated from object. Each pages has a status that data is already loaded(or not loaded yet).
|
|
||||||
This option should not be changed when you don't have a trouble with performance.
|
|
||||||
This value is changed automatically by parallel_count and multipart_size values(fd_page_size value = parallel_count * multipart_size).
|
|
||||||
.TP
|
.TP
|
||||||
\fB\-o\fR multipart_size(default="10"(10MB))
|
\fB\-o\fR multipart_size(default="10"(10MB))
|
||||||
number of one part size in multipart uploading request.
|
number of one part size in multipart uploading request.
|
||||||
The default size is 10MB(10485760byte), this value is minimum size.
|
The default size is 10MB(10485760byte), this value is minimum size.
|
||||||
Specify number of MB and over 10(MB).
|
Specify number of MB and over 10(MB).
|
||||||
This option is lated to fd_page_size option and affects it.
|
.TP
|
||||||
|
\fB\-o\fR ensure_diskfree(default the same as multipart_size value)
|
||||||
|
sets MB to ensure disk free space. This option means the threshold of free space size on disk which is used for the cache file by s3fs.
|
||||||
|
s3fs makes file for downloading, and uploading and caching files.
|
||||||
|
If the disk free space is smaller than this value, s3fs do not use diskspace as possible in exchange for the performance.
|
||||||
.TP
|
.TP
|
||||||
\fB\-o\fR url (default="http://s3.amazonaws.com")
|
\fB\-o\fR url (default="http://s3.amazonaws.com")
|
||||||
sets the url to use to access Amazon S3. If you want to use HTTPS, then you can set url=https://s3.amazonaws.com
|
sets the url to use to access Amazon S3. If you want to use HTTPS, then you can set url=https://s3.amazonaws.com
|
||||||
|
37
src/curl.cpp
37
src/curl.cpp
@ -3373,6 +3373,41 @@ int S3fsCurl::MultipartUploadRequest(const char* tpath, headers_t& meta, int fd,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int S3fsCurl::MultipartUploadRequest(string upload_id, const char* tpath, int fd, off_t offset, size_t size, etaglist_t& list)
|
||||||
|
{
|
||||||
|
S3FS_PRN_INFO3("[upload_id=%s][tpath=%s][fd=%d][offset=%jd][size=%jd]", upload_id.c_str(), SAFESTRPTR(tpath), fd, (intmax_t)offset, (intmax_t)size);
|
||||||
|
|
||||||
|
// duplicate fd
|
||||||
|
int fd2;
|
||||||
|
if(-1 == (fd2 = dup(fd)) || 0 != lseek(fd2, 0, SEEK_SET)){
|
||||||
|
S3FS_PRN_ERR("Could not duplicate file descriptor(errno=%d)", errno);
|
||||||
|
if(-1 != fd2){
|
||||||
|
close(fd2);
|
||||||
|
}
|
||||||
|
return -errno;
|
||||||
|
}
|
||||||
|
|
||||||
|
// set
|
||||||
|
partdata.fd = fd2;
|
||||||
|
partdata.startpos = offset;
|
||||||
|
partdata.size = size;
|
||||||
|
b_partdata_startpos = partdata.startpos;
|
||||||
|
b_partdata_size = partdata.size;
|
||||||
|
|
||||||
|
// upload part
|
||||||
|
int result;
|
||||||
|
if(0 != (result = UploadMultipartPostRequest(tpath, (list.size() + 1), upload_id))){
|
||||||
|
S3FS_PRN_ERR("failed uploading part(%d)", result);
|
||||||
|
close(fd2);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
list.push_back(partdata.etag);
|
||||||
|
DestroyCurlHandle();
|
||||||
|
close(fd2);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
int S3fsCurl::MultipartRenameRequest(const char* from, const char* to, headers_t& meta, off_t size)
|
int S3fsCurl::MultipartRenameRequest(const char* from, const char* to, headers_t& meta, off_t size)
|
||||||
{
|
{
|
||||||
int result;
|
int result;
|
||||||
@ -3604,7 +3639,7 @@ int S3fsMultiCurl::MultiRead(void)
|
|||||||
isRetry = true;
|
isRetry = true;
|
||||||
}else if(404 == responseCode){
|
}else if(404 == responseCode){
|
||||||
// not found
|
// not found
|
||||||
S3FS_PRN_ERR("failed a request(%ld: %s)", responseCode, s3fscurl->url.c_str());
|
S3FS_PRN_WARN("failed a request(%ld: %s)", responseCode, s3fscurl->url.c_str());
|
||||||
}else if(500 == responseCode){
|
}else if(500 == responseCode){
|
||||||
// case of all other result, do retry.(11/13/2013)
|
// case of all other result, do retry.(11/13/2013)
|
||||||
// because it was found that s3fs got 500 error from S3, but could success
|
// because it was found that s3fs got 500 error from S3, but could success
|
||||||
|
@ -273,10 +273,7 @@ class S3fsCurl
|
|||||||
bool GetUploadId(std::string& upload_id);
|
bool GetUploadId(std::string& upload_id);
|
||||||
int GetIAMCredentials(void);
|
int GetIAMCredentials(void);
|
||||||
|
|
||||||
int PreMultipartPostRequest(const char* tpath, headers_t& meta, std::string& upload_id, bool is_copy);
|
|
||||||
int CompleteMultipartPostRequest(const char* tpath, std::string& upload_id, etaglist_t& parts);
|
|
||||||
int UploadMultipartPostSetup(const char* tpath, int part_num, std::string& upload_id);
|
int UploadMultipartPostSetup(const char* tpath, int part_num, std::string& upload_id);
|
||||||
int UploadMultipartPostRequest(const char* tpath, int part_num, std::string& upload_id);
|
|
||||||
int CopyMultipartPostRequest(const char* from, const char* to, int part_num, std::string& upload_id, headers_t& meta);
|
int CopyMultipartPostRequest(const char* from, const char* to, int part_num, std::string& upload_id, headers_t& meta);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
@ -353,10 +350,14 @@ class S3fsCurl
|
|||||||
int GetObjectRequest(const char* tpath, int fd, off_t start = -1, ssize_t size = -1);
|
int GetObjectRequest(const char* tpath, int fd, off_t start = -1, ssize_t size = -1);
|
||||||
int CheckBucket(void);
|
int CheckBucket(void);
|
||||||
int ListBucketRequest(const char* tpath, const char* query);
|
int ListBucketRequest(const char* tpath, const char* query);
|
||||||
|
int PreMultipartPostRequest(const char* tpath, headers_t& meta, std::string& upload_id, bool is_copy);
|
||||||
|
int CompleteMultipartPostRequest(const char* tpath, std::string& upload_id, etaglist_t& parts);
|
||||||
|
int UploadMultipartPostRequest(const char* tpath, int part_num, std::string& upload_id);
|
||||||
int MultipartListRequest(std::string& body);
|
int MultipartListRequest(std::string& body);
|
||||||
int AbortMultipartUpload(const char* tpath, std::string& upload_id);
|
int AbortMultipartUpload(const char* tpath, std::string& upload_id);
|
||||||
int MultipartHeadRequest(const char* tpath, off_t size, headers_t& meta, bool is_copy);
|
int MultipartHeadRequest(const char* tpath, off_t size, headers_t& meta, bool is_copy);
|
||||||
int MultipartUploadRequest(const char* tpath, headers_t& meta, int fd, bool is_copy);
|
int MultipartUploadRequest(const char* tpath, headers_t& meta, int fd, bool is_copy);
|
||||||
|
int MultipartUploadRequest(std::string upload_id, const char* tpath, int fd, off_t offset, size_t size, etaglist_t& list);
|
||||||
int MultipartRenameRequest(const char* from, const char* to, headers_t& meta, off_t size);
|
int MultipartRenameRequest(const char* from, const char* to, headers_t& meta, off_t size);
|
||||||
|
|
||||||
// methods(valiables)
|
// methods(valiables)
|
||||||
|
1329
src/fdcache.cpp
1329
src/fdcache.cpp
File diff suppressed because it is too large
Load Diff
111
src/fdcache.h
111
src/fdcache.h
@ -20,6 +20,9 @@
|
|||||||
#ifndef FD_CACHE_H_
|
#ifndef FD_CACHE_H_
|
||||||
#define FD_CACHE_H_
|
#define FD_CACHE_H_
|
||||||
|
|
||||||
|
#include <sys/statvfs.h>
|
||||||
|
#include "curl.h"
|
||||||
|
|
||||||
//------------------------------------------------
|
//------------------------------------------------
|
||||||
// CacheFileStat
|
// CacheFileStat
|
||||||
//------------------------------------------------
|
//------------------------------------------------
|
||||||
@ -53,40 +56,49 @@ struct fdpage
|
|||||||
{
|
{
|
||||||
off_t offset;
|
off_t offset;
|
||||||
size_t bytes;
|
size_t bytes;
|
||||||
bool init;
|
bool loaded;
|
||||||
|
|
||||||
fdpage(off_t start = 0, size_t size = 0, bool is_init = false)
|
fdpage(off_t start = 0, size_t size = 0, bool is_loaded = false)
|
||||||
: offset(start), bytes(size), init(is_init) {}
|
: offset(start), bytes(size), loaded(is_loaded) {}
|
||||||
|
|
||||||
off_t next(void) const { return (offset + bytes); }
|
off_t next(void) const { return (offset + bytes); }
|
||||||
off_t end(void) const { return (0 < bytes ? offset + bytes - 1 : 0); }
|
off_t end(void) const { return (0 < bytes ? offset + bytes - 1 : 0); }
|
||||||
};
|
};
|
||||||
typedef std::list<struct fdpage*> fdpage_list_t;
|
typedef std::list<struct fdpage*> fdpage_list_t;
|
||||||
|
|
||||||
|
class FdEntity;
|
||||||
|
|
||||||
//
|
//
|
||||||
// Management of loading area/modifying
|
// Management of loading area/modifying
|
||||||
//
|
//
|
||||||
class PageList
|
class PageList
|
||||||
{
|
{
|
||||||
|
friend class FdEntity; // only one method access directly pages.
|
||||||
|
|
||||||
private:
|
private:
|
||||||
fdpage_list_t pages;
|
fdpage_list_t pages;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void Clear(void);
|
void Clear(void);
|
||||||
|
bool Compress(void);
|
||||||
|
bool Parse(off_t new_pos);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
static void FreeList(fdpage_list_t& list);
|
static void FreeList(fdpage_list_t& list);
|
||||||
|
|
||||||
explicit PageList(off_t size = 0, bool is_init = false);
|
explicit PageList(size_t size = 0, bool is_loaded = false);
|
||||||
~PageList();
|
~PageList();
|
||||||
|
|
||||||
off_t Size(void) const;
|
bool Init(size_t size, bool is_loaded);
|
||||||
int Resize(off_t size, bool is_init);
|
size_t Size(void) const;
|
||||||
int Init(off_t size, bool is_init);
|
bool Resize(size_t size, bool is_loaded);
|
||||||
bool IsInit(off_t start, off_t size);
|
|
||||||
bool SetInit(off_t start, off_t size, bool is_init = true);
|
bool IsPageLoaded(off_t start = 0, size_t size = 0) const; // size=0 is checking to end of list
|
||||||
bool FindUninitPage(off_t start, off_t& resstart, size_t& ressize);
|
bool SetPageLoadedStatus(off_t start, size_t size, bool is_loaded = true, bool is_compress = true);
|
||||||
int GetUninitPages(fdpage_list_t& uninit_list, off_t start = 0, off_t size = -1);
|
bool FindUnloadedPage(off_t start, off_t& resstart, size_t& ressize) const;
|
||||||
|
size_t GetTotalUnloadedPageSize(off_t start = 0, size_t size = 0) const; // size=0 is checking to end of list
|
||||||
|
int GetUnloadedPages(fdpage_list_t& unloaded_list, off_t start = 0, size_t size = 0) const; // size=0 is checking to end of list
|
||||||
|
|
||||||
bool Serialize(CacheFileStat& file, bool is_output);
|
bool Serialize(CacheFileStat& file, bool is_output);
|
||||||
void Dump(void);
|
void Dump(void);
|
||||||
};
|
};
|
||||||
@ -100,17 +112,28 @@ class FdEntity
|
|||||||
pthread_mutex_t fdent_lock;
|
pthread_mutex_t fdent_lock;
|
||||||
bool is_lock_init;
|
bool is_lock_init;
|
||||||
PageList pagelist;
|
PageList pagelist;
|
||||||
int refcnt; // reference count
|
int refcnt; // reference count
|
||||||
std::string path; // object path
|
std::string path; // object path
|
||||||
std::string cachepath; // local cache file path
|
std::string cachepath; // local cache file path
|
||||||
int fd; // file descriptor(tmp file or cache file)
|
// (if this is empty, does not load/save pagelist.)
|
||||||
FILE* file; // file pointer(tmp file or cache file)
|
int fd; // file descriptor(tmp file or cache file)
|
||||||
bool is_modify; // if file is changed, this flag is true
|
FILE* pfile; // file pointer(tmp file or cache file)
|
||||||
|
bool is_modify; // if file is changed, this flag is true
|
||||||
|
headers_t orgmeta; // original headers at opening
|
||||||
|
size_t size_orgmeta; // original file size in original headers
|
||||||
|
|
||||||
|
std::string upload_id; // for no cached multipart uploading when no disk space
|
||||||
|
etaglist_t etaglist; // for no cached multipart uploading when no disk space
|
||||||
|
off_t mp_start; // start position for no cached multipart(write method only)
|
||||||
|
size_t mp_size; // size for no cached multipart(write method only)
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
static int FillFile(int fd, unsigned char byte, size_t size, off_t start);
|
||||||
|
|
||||||
void Clear(void);
|
void Clear(void);
|
||||||
int Dup(void);
|
bool SetAllStatus(bool is_loaded); // [NOTE] not locking
|
||||||
bool SetAllStatus(bool is_enable);
|
bool SetAllStatusLoaded(void) { return SetAllStatus(true); }
|
||||||
|
bool SetAllStatusUnloaded(void) { return SetAllStatus(false); }
|
||||||
|
|
||||||
public:
|
public:
|
||||||
explicit FdEntity(const char* tpath = NULL, const char* cpath = NULL);
|
explicit FdEntity(const char* tpath = NULL, const char* cpath = NULL);
|
||||||
@ -118,21 +141,31 @@ class FdEntity
|
|||||||
|
|
||||||
void Close(void);
|
void Close(void);
|
||||||
bool IsOpen(void) const { return (-1 != fd); }
|
bool IsOpen(void) const { return (-1 != fd); }
|
||||||
int Open(off_t size = -1, time_t time = -1);
|
int Open(headers_t* pmeta = NULL, ssize_t size = -1, time_t time = -1);
|
||||||
|
bool OpenAndLoadAll(headers_t* pmeta = NULL, size_t* size = NULL, bool force_load = false);
|
||||||
|
int Dup(void);
|
||||||
|
|
||||||
const char* GetPath(void) const { return path.c_str(); }
|
const char* GetPath(void) const { return path.c_str(); }
|
||||||
void SetPath(const std::string &newpath) { path = newpath; }
|
void SetPath(const std::string &newpath) { path = newpath; }
|
||||||
int GetFd(void) const { return fd; }
|
int GetFd(void) const { return fd; }
|
||||||
int SetMtime(time_t time);
|
|
||||||
bool GetSize(off_t& size);
|
|
||||||
bool GetMtime(time_t& time);
|
|
||||||
bool GetStats(struct stat& st);
|
|
||||||
|
|
||||||
bool SetAllEnable(void) { return SetAllStatus(true); }
|
bool GetStats(struct stat& st);
|
||||||
bool SetAllDisable(void) { return SetAllStatus(false); }
|
int SetMtime(time_t time);
|
||||||
bool LoadFull(off_t* size = NULL, bool force_load = false);
|
bool UpdateMtime(void);
|
||||||
int Load(off_t start, off_t size);
|
bool GetSize(size_t& size);
|
||||||
int RowFlush(const char* tpath, headers_t& meta, bool force_sync = false);
|
bool SetMode(mode_t mode);
|
||||||
int Flush(headers_t& meta, bool force_sync = false) { return RowFlush(NULL, meta, force_sync); }
|
bool SetUId(uid_t uid);
|
||||||
|
bool SetGId(gid_t gid);
|
||||||
|
|
||||||
|
int Load(off_t start = 0, size_t size = 0); // size=0 means loading to end
|
||||||
|
int NoCacheLoadAndPost(off_t start = 0, size_t size = 0); // size=0 means loading to end
|
||||||
|
int NoCachePreMultipartPost(void);
|
||||||
|
int NoCacheMultipartPost(int tgfd, off_t start, size_t size);
|
||||||
|
int NoCacheCompleteMultipartPost(void);
|
||||||
|
|
||||||
|
int RowFlush(const char* tpath, bool force_sync = false);
|
||||||
|
int Flush(bool force_sync = false) { return RowFlush(NULL, force_sync); }
|
||||||
|
|
||||||
ssize_t Read(char* bytes, off_t start, size_t size, bool force_load = false);
|
ssize_t Read(char* bytes, off_t start, size_t size, bool force_load = false);
|
||||||
ssize_t Write(const char* bytes, off_t start, size_t size);
|
ssize_t Write(const char* bytes, off_t start, size_t size);
|
||||||
};
|
};
|
||||||
@ -148,9 +181,12 @@ class FdManager
|
|||||||
static pthread_mutex_t fd_manager_lock;
|
static pthread_mutex_t fd_manager_lock;
|
||||||
static bool is_lock_init;
|
static bool is_lock_init;
|
||||||
static std::string cache_dir;
|
static std::string cache_dir;
|
||||||
static size_t page_size;
|
static size_t free_disk_space; // limit free disk space
|
||||||
|
|
||||||
fdent_map_t fent;
|
fdent_map_t fent;
|
||||||
|
|
||||||
|
private:
|
||||||
|
static fsblkcnt_t GetFreeDiskSpace(const char* path);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
FdManager();
|
FdManager();
|
||||||
@ -164,17 +200,22 @@ class FdManager
|
|||||||
static bool SetCacheDir(const char* dir);
|
static bool SetCacheDir(const char* dir);
|
||||||
static bool IsCacheDir(void) { return (0 < FdManager::cache_dir.size()); }
|
static bool IsCacheDir(void) { return (0 < FdManager::cache_dir.size()); }
|
||||||
static const char* GetCacheDir(void) { return FdManager::cache_dir.c_str(); }
|
static const char* GetCacheDir(void) { return FdManager::cache_dir.c_str(); }
|
||||||
static size_t SetPageSize(size_t size);
|
|
||||||
static size_t GetPageSize(void) { return FdManager::page_size; }
|
|
||||||
static bool MakeCachePath(const char* path, std::string& cache_path, bool is_create_dir = true);
|
static bool MakeCachePath(const char* path, std::string& cache_path, bool is_create_dir = true);
|
||||||
static bool CheckCacheTopDir(void);
|
static bool CheckCacheTopDir(void);
|
||||||
static bool MakeRandomTempPath(const char* path, std::string& tmppath);
|
static bool MakeRandomTempPath(const char* path, std::string& tmppath);
|
||||||
|
|
||||||
|
static size_t GetEnsureFreeDiskSpace(void) { return FdManager::free_disk_space; }
|
||||||
|
static size_t SetEnsureFreeDiskSpace(size_t size);
|
||||||
|
static size_t SetEnsureFreeDiskSpace(void) { return FdManager::SetEnsureFreeDiskSpace(FdManager::free_disk_space); }
|
||||||
|
static size_t InitEnsureFreeDiskSpace(void) { return FdManager::SetEnsureFreeDiskSpace(0); }
|
||||||
|
static bool IsSafeDiskSpace(const char* path, size_t size);
|
||||||
|
|
||||||
FdEntity* GetFdEntity(const char* path, int existfd = -1);
|
FdEntity* GetFdEntity(const char* path, int existfd = -1);
|
||||||
FdEntity* Open(const char* path, off_t size = -1, time_t time = -1, bool force_tmpfile = false, bool is_create = true);
|
FdEntity* Open(const char* path, headers_t* pmeta = NULL, ssize_t size = -1, time_t time = -1, bool force_tmpfile = false, bool is_create = true);
|
||||||
FdEntity* ExistOpen(const char* path, int existfd = -1);
|
FdEntity* ExistOpen(const char* path, int existfd = -1);
|
||||||
void Rename(const std::string &from, const std::string &to);
|
void Rename(const std::string &from, const std::string &to);
|
||||||
bool Close(FdEntity* ent);
|
bool Close(FdEntity* ent);
|
||||||
|
bool ChangeEntityToTempPath(FdEntity* ent, const char* path);
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // FD_CACHE_H_
|
#endif // FD_CACHE_H_
|
||||||
|
195
src/s3fs.cpp
195
src/s3fs.cpp
@ -719,10 +719,11 @@ static FdEntity* get_local_fent(const char* path, bool is_load)
|
|||||||
{
|
{
|
||||||
struct stat stobj;
|
struct stat stobj;
|
||||||
FdEntity* ent;
|
FdEntity* ent;
|
||||||
|
headers_t meta;
|
||||||
|
|
||||||
S3FS_PRN_INFO2("[path=%s]", path);
|
S3FS_PRN_INFO2("[path=%s]", path);
|
||||||
|
|
||||||
if(0 != get_object_attribute(path, &stobj)){
|
if(0 != get_object_attribute(path, &stobj, &meta)){
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -730,17 +731,16 @@ static FdEntity* get_local_fent(const char* path, bool is_load)
|
|||||||
time_t mtime = (!S_ISREG(stobj.st_mode) || S_ISLNK(stobj.st_mode)) ? -1 : stobj.st_mtime;
|
time_t mtime = (!S_ISREG(stobj.st_mode) || S_ISLNK(stobj.st_mode)) ? -1 : stobj.st_mtime;
|
||||||
bool force_tmpfile = S_ISREG(stobj.st_mode) ? false : true;
|
bool force_tmpfile = S_ISREG(stobj.st_mode) ? false : true;
|
||||||
|
|
||||||
if(NULL == (ent = FdManager::get()->Open(path, stobj.st_size, mtime, force_tmpfile, true))){
|
if(NULL == (ent = FdManager::get()->Open(path, &meta, static_cast<ssize_t>(stobj.st_size), mtime, force_tmpfile, true))){
|
||||||
S3FS_PRN_ERR("Coult not open file. errno(%d)", errno);
|
S3FS_PRN_ERR("Coult not open file. errno(%d)", errno);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
// load
|
// load
|
||||||
if(is_load && !ent->LoadFull()){
|
if(is_load && !ent->OpenAndLoadAll(&meta)){
|
||||||
S3FS_PRN_ERR("Coult not load file. errno(%d)", errno);
|
S3FS_PRN_ERR("Coult not load file. errno(%d)", errno);
|
||||||
FdManager::get()->Close(ent);
|
FdManager::get()->Close(ent);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
return ent;
|
return ent;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -778,7 +778,7 @@ static int put_headers(const char* path, headers_t& meta, bool is_copy)
|
|||||||
// no opened fd
|
// no opened fd
|
||||||
if(FdManager::get()->IsCacheDir()){
|
if(FdManager::get()->IsCacheDir()){
|
||||||
// create cache file if be needed
|
// create cache file if be needed
|
||||||
ent = FdManager::get()->Open(path, buf.st_size, -1, false, true);
|
ent = FdManager::get()->Open(path, &meta, static_cast<ssize_t>(buf.st_size), -1, false, true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if(ent){
|
if(ent){
|
||||||
@ -807,6 +807,7 @@ static int s3fs_getattr(const char* path, struct stat* stbuf)
|
|||||||
// (See: Issue 241)
|
// (See: Issue 241)
|
||||||
if(stbuf){
|
if(stbuf){
|
||||||
FdEntity* ent;
|
FdEntity* ent;
|
||||||
|
|
||||||
if(NULL != (ent = FdManager::get()->ExistOpen(path))){
|
if(NULL != (ent = FdManager::get()->ExistOpen(path))){
|
||||||
struct stat tmpstbuf;
|
struct stat tmpstbuf;
|
||||||
if(ent->GetStats(tmpstbuf)){
|
if(ent->GetStats(tmpstbuf)){
|
||||||
@ -835,18 +836,18 @@ static int s3fs_readlink(const char* path, char* buf, size_t size)
|
|||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
// Get size
|
// Get size
|
||||||
off_t readsize;
|
size_t readsize;
|
||||||
if(!ent->GetSize(readsize)){
|
if(!ent->GetSize(readsize)){
|
||||||
S3FS_PRN_ERR("could not get file size(file=%s)", path);
|
S3FS_PRN_ERR("could not get file size(file=%s)", path);
|
||||||
FdManager::get()->Close(ent);
|
FdManager::get()->Close(ent);
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
if(static_cast<off_t>(size) <= readsize){
|
if(size <= readsize){
|
||||||
readsize = size - 1;
|
readsize = size - 1;
|
||||||
}
|
}
|
||||||
// Read
|
// Read
|
||||||
ssize_t ressize;
|
ssize_t ressize;
|
||||||
if(0 > (ressize = ent->Read(buf, 0, static_cast<size_t>(readsize)))){
|
if(0 > (ressize = ent->Read(buf, 0, readsize))){
|
||||||
S3FS_PRN_ERR("could not read file(file=%s, errno=%zd)", path, ressize);
|
S3FS_PRN_ERR("could not read file(file=%s, errno=%zd)", path, ressize);
|
||||||
FdManager::get()->Close(ent);
|
FdManager::get()->Close(ent);
|
||||||
return static_cast<int>(ressize);
|
return static_cast<int>(ressize);
|
||||||
@ -947,8 +948,11 @@ static int s3fs_create(const char* path, mode_t mode, struct fuse_file_info* fi)
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
FdEntity* ent;
|
|
||||||
if(NULL == (ent = FdManager::get()->Open(path, 0, -1, false, true))){
|
FdEntity* ent;
|
||||||
|
headers_t meta;
|
||||||
|
get_object_attribute(path, NULL, &meta);
|
||||||
|
if(NULL == (ent = FdManager::get()->Open(path, &meta, 0, -1, false, true))){
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
fi->fh = ent->GetFd();
|
fi->fh = ent->GetFd();
|
||||||
@ -1127,7 +1131,7 @@ static int s3fs_symlink(const char* from, const char* to)
|
|||||||
|
|
||||||
// open tmpfile
|
// open tmpfile
|
||||||
FdEntity* ent;
|
FdEntity* ent;
|
||||||
if(NULL == (ent = FdManager::get()->Open(to, 0, -1, true, true))){
|
if(NULL == (ent = FdManager::get()->Open(to, &headers, 0, -1, true, true))){
|
||||||
S3FS_PRN_ERR("could not open tmpfile(errno=%d)", errno);
|
S3FS_PRN_ERR("could not open tmpfile(errno=%d)", errno);
|
||||||
return -errno;
|
return -errno;
|
||||||
}
|
}
|
||||||
@ -1139,7 +1143,7 @@ static int s3fs_symlink(const char* from, const char* to)
|
|||||||
return -errno;
|
return -errno;
|
||||||
}
|
}
|
||||||
// upload
|
// upload
|
||||||
if(0 != (result = ent->Flush(headers, true))){
|
if(0 != (result = ent->Flush(true))){
|
||||||
S3FS_PRN_WARN("could not upload tmpfile(result=%d)", result);
|
S3FS_PRN_WARN("could not upload tmpfile(result=%d)", result);
|
||||||
}
|
}
|
||||||
FdManager::get()->Close(ent);
|
FdManager::get()->Close(ent);
|
||||||
@ -1219,7 +1223,7 @@ static int rename_object_nocopy(const char* from, const char* to)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// upload
|
// upload
|
||||||
if(0 != (result = ent->RowFlush(to, meta, true))){
|
if(0 != (result = ent->RowFlush(to, true))){
|
||||||
S3FS_PRN_ERR("could not upload file(%s): result=%d", to, result);
|
S3FS_PRN_ERR("could not upload file(%s): result=%d", to, result);
|
||||||
FdManager::get()->Close(ent);
|
FdManager::get()->Close(ent);
|
||||||
return result;
|
return result;
|
||||||
@ -1531,13 +1535,12 @@ static int s3fs_chmod(const char* path, mode_t mode)
|
|||||||
|
|
||||||
static int s3fs_chmod_nocopy(const char* path, mode_t mode)
|
static int s3fs_chmod_nocopy(const char* path, mode_t mode)
|
||||||
{
|
{
|
||||||
int result;
|
int result;
|
||||||
string strpath;
|
string strpath;
|
||||||
string newpath;
|
string newpath;
|
||||||
string nowcache;
|
string nowcache;
|
||||||
headers_t meta;
|
|
||||||
struct stat stbuf;
|
struct stat stbuf;
|
||||||
int nDirType = DIRTYPE_UNKNOWN;
|
int nDirType = DIRTYPE_UNKNOWN;
|
||||||
|
|
||||||
S3FS_PRN_INFO1("[path=%s][mode=%04o]", path, mode);
|
S3FS_PRN_INFO1("[path=%s][mode=%04o]", path, mode);
|
||||||
|
|
||||||
@ -1554,11 +1557,11 @@ static int s3fs_chmod_nocopy(const char* path, mode_t mode)
|
|||||||
|
|
||||||
// Get attributes
|
// Get attributes
|
||||||
if(S_ISDIR(stbuf.st_mode)){
|
if(S_ISDIR(stbuf.st_mode)){
|
||||||
result = chk_dir_object_type(path, newpath, strpath, nowcache, &meta, &nDirType);
|
result = chk_dir_object_type(path, newpath, strpath, nowcache, NULL, &nDirType);
|
||||||
}else{
|
}else{
|
||||||
strpath = path;
|
strpath = path;
|
||||||
nowcache = strpath;
|
nowcache = strpath;
|
||||||
result = get_object_attribute(strpath.c_str(), NULL, &meta);
|
result = get_object_attribute(strpath.c_str(), NULL, NULL);
|
||||||
}
|
}
|
||||||
if(0 != result){
|
if(0 != result){
|
||||||
return result;
|
return result;
|
||||||
@ -1584,9 +1587,6 @@ static int s3fs_chmod_nocopy(const char* path, mode_t mode)
|
|||||||
}else{
|
}else{
|
||||||
// normal object or directory object of newer version
|
// normal object or directory object of newer version
|
||||||
|
|
||||||
// Change file mode
|
|
||||||
meta["x-amz-meta-mode"] = str(mode);
|
|
||||||
|
|
||||||
// open & load
|
// open & load
|
||||||
FdEntity* ent;
|
FdEntity* ent;
|
||||||
if(NULL == (ent = get_local_fent(strpath.c_str(), true))){
|
if(NULL == (ent = get_local_fent(strpath.c_str(), true))){
|
||||||
@ -1594,8 +1594,11 @@ static int s3fs_chmod_nocopy(const char* path, mode_t mode)
|
|||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Change file mode
|
||||||
|
ent->SetMode(mode);
|
||||||
|
|
||||||
// upload
|
// upload
|
||||||
if(0 != (result = ent->Flush(meta, true))){
|
if(0 != (result = ent->Flush(true))){
|
||||||
S3FS_PRN_ERR("could not upload file(%s): result=%d", strpath.c_str(), result);
|
S3FS_PRN_ERR("could not upload file(%s): result=%d", strpath.c_str(), result);
|
||||||
FdManager::get()->Close(ent);
|
FdManager::get()->Close(ent);
|
||||||
return result;
|
return result;
|
||||||
@ -1693,13 +1696,12 @@ static int s3fs_chown(const char* path, uid_t uid, gid_t gid)
|
|||||||
|
|
||||||
static int s3fs_chown_nocopy(const char* path, uid_t uid, gid_t gid)
|
static int s3fs_chown_nocopy(const char* path, uid_t uid, gid_t gid)
|
||||||
{
|
{
|
||||||
int result;
|
int result;
|
||||||
string strpath;
|
string strpath;
|
||||||
string newpath;
|
string newpath;
|
||||||
string nowcache;
|
string nowcache;
|
||||||
headers_t meta;
|
|
||||||
struct stat stbuf;
|
struct stat stbuf;
|
||||||
int nDirType = DIRTYPE_UNKNOWN;
|
int nDirType = DIRTYPE_UNKNOWN;
|
||||||
|
|
||||||
S3FS_PRN_INFO1("[path=%s][uid=%u][gid=%u]", path, (unsigned int)uid, (unsigned int)gid);
|
S3FS_PRN_INFO1("[path=%s][uid=%u][gid=%u]", path, (unsigned int)uid, (unsigned int)gid);
|
||||||
|
|
||||||
@ -1716,11 +1718,11 @@ static int s3fs_chown_nocopy(const char* path, uid_t uid, gid_t gid)
|
|||||||
|
|
||||||
// Get attributes
|
// Get attributes
|
||||||
if(S_ISDIR(stbuf.st_mode)){
|
if(S_ISDIR(stbuf.st_mode)){
|
||||||
result = chk_dir_object_type(path, newpath, strpath, nowcache, &meta, &nDirType);
|
result = chk_dir_object_type(path, newpath, strpath, nowcache, NULL, &nDirType);
|
||||||
}else{
|
}else{
|
||||||
strpath = path;
|
strpath = path;
|
||||||
nowcache = strpath;
|
nowcache = strpath;
|
||||||
result = get_object_attribute(strpath.c_str(), NULL, &meta);
|
result = get_object_attribute(strpath.c_str(), NULL, NULL);
|
||||||
}
|
}
|
||||||
if(0 != result){
|
if(0 != result){
|
||||||
return result;
|
return result;
|
||||||
@ -1755,10 +1757,6 @@ static int s3fs_chown_nocopy(const char* path, uid_t uid, gid_t gid)
|
|||||||
}else{
|
}else{
|
||||||
// normal object or directory object of newer version
|
// normal object or directory object of newer version
|
||||||
|
|
||||||
// Change owner
|
|
||||||
meta["x-amz-meta-uid"] = str(uid);
|
|
||||||
meta["x-amz-meta-gid"] = str(gid);
|
|
||||||
|
|
||||||
// open & load
|
// open & load
|
||||||
FdEntity* ent;
|
FdEntity* ent;
|
||||||
if(NULL == (ent = get_local_fent(strpath.c_str(), true))){
|
if(NULL == (ent = get_local_fent(strpath.c_str(), true))){
|
||||||
@ -1766,8 +1764,12 @@ static int s3fs_chown_nocopy(const char* path, uid_t uid, gid_t gid)
|
|||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Change owner
|
||||||
|
ent->SetUId(uid);
|
||||||
|
ent->SetGId(gid);
|
||||||
|
|
||||||
// upload
|
// upload
|
||||||
if(0 != (result = ent->Flush(meta, true))){
|
if(0 != (result = ent->Flush(true))){
|
||||||
S3FS_PRN_ERR("could not upload file(%s): result=%d", strpath.c_str(), result);
|
S3FS_PRN_ERR("could not upload file(%s): result=%d", strpath.c_str(), result);
|
||||||
FdManager::get()->Close(ent);
|
FdManager::get()->Close(ent);
|
||||||
return result;
|
return result;
|
||||||
@ -1851,13 +1853,12 @@ static int s3fs_utimens(const char* path, const struct timespec ts[2])
|
|||||||
|
|
||||||
static int s3fs_utimens_nocopy(const char* path, const struct timespec ts[2])
|
static int s3fs_utimens_nocopy(const char* path, const struct timespec ts[2])
|
||||||
{
|
{
|
||||||
int result;
|
int result;
|
||||||
string strpath;
|
string strpath;
|
||||||
string newpath;
|
string newpath;
|
||||||
string nowcache;
|
string nowcache;
|
||||||
headers_t meta;
|
|
||||||
struct stat stbuf;
|
struct stat stbuf;
|
||||||
int nDirType = DIRTYPE_UNKNOWN;
|
int nDirType = DIRTYPE_UNKNOWN;
|
||||||
|
|
||||||
S3FS_PRN_INFO1("[path=%s][mtime=%s]", path, str(ts[1].tv_sec).c_str());
|
S3FS_PRN_INFO1("[path=%s][mtime=%s]", path, str(ts[1].tv_sec).c_str());
|
||||||
|
|
||||||
@ -1876,11 +1877,11 @@ static int s3fs_utimens_nocopy(const char* path, const struct timespec ts[2])
|
|||||||
|
|
||||||
// Get attributes
|
// Get attributes
|
||||||
if(S_ISDIR(stbuf.st_mode)){
|
if(S_ISDIR(stbuf.st_mode)){
|
||||||
result = chk_dir_object_type(path, newpath, strpath, nowcache, &meta, &nDirType);
|
result = chk_dir_object_type(path, newpath, strpath, nowcache, NULL, &nDirType);
|
||||||
}else{
|
}else{
|
||||||
strpath = path;
|
strpath = path;
|
||||||
nowcache = strpath;
|
nowcache = strpath;
|
||||||
result = get_object_attribute(strpath.c_str(), NULL, &meta);
|
result = get_object_attribute(strpath.c_str(), NULL, NULL);
|
||||||
}
|
}
|
||||||
if(0 != result){
|
if(0 != result){
|
||||||
return result;
|
return result;
|
||||||
@ -1906,9 +1907,6 @@ static int s3fs_utimens_nocopy(const char* path, const struct timespec ts[2])
|
|||||||
}else{
|
}else{
|
||||||
// normal object or directory object of newer version
|
// normal object or directory object of newer version
|
||||||
|
|
||||||
// Change date
|
|
||||||
meta["x-amz-meta-mtime"] = str(ts[1].tv_sec);
|
|
||||||
|
|
||||||
// open & load
|
// open & load
|
||||||
FdEntity* ent;
|
FdEntity* ent;
|
||||||
if(NULL == (ent = get_local_fent(strpath.c_str(), true))){
|
if(NULL == (ent = get_local_fent(strpath.c_str(), true))){
|
||||||
@ -1924,7 +1922,7 @@ static int s3fs_utimens_nocopy(const char* path, const struct timespec ts[2])
|
|||||||
}
|
}
|
||||||
|
|
||||||
// upload
|
// upload
|
||||||
if(0 != (result = ent->Flush(meta, true))){
|
if(0 != (result = ent->Flush(true))){
|
||||||
S3FS_PRN_ERR("could not upload file(%s): result=%d", strpath.c_str(), result);
|
S3FS_PRN_ERR("could not upload file(%s): result=%d", strpath.c_str(), result);
|
||||||
FdManager::get()->Close(ent);
|
FdManager::get()->Close(ent);
|
||||||
return result;
|
return result;
|
||||||
@ -1946,6 +1944,10 @@ static int s3fs_truncate(const char* path, off_t size)
|
|||||||
|
|
||||||
S3FS_PRN_INFO("[path=%s][size=%jd]", path, (intmax_t)size);
|
S3FS_PRN_INFO("[path=%s][size=%jd]", path, (intmax_t)size);
|
||||||
|
|
||||||
|
if(size < 0){
|
||||||
|
size = 0;
|
||||||
|
}
|
||||||
|
|
||||||
if(0 != (result = check_parent_object_access(path, X_OK))){
|
if(0 != (result = check_parent_object_access(path, X_OK))){
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
@ -1956,11 +1958,11 @@ static int s3fs_truncate(const char* path, off_t size)
|
|||||||
// Get file information
|
// Get file information
|
||||||
if(0 == (result = get_object_attribute(path, NULL, &meta))){
|
if(0 == (result = get_object_attribute(path, NULL, &meta))){
|
||||||
// Exists -> Get file(with size)
|
// Exists -> Get file(with size)
|
||||||
if(NULL == (ent = FdManager::get()->Open(path, size, -1, false, true))){
|
if(NULL == (ent = FdManager::get()->Open(path, &meta, static_cast<ssize_t>(size), -1, false, true))){
|
||||||
S3FS_PRN_ERR("could not open file(%s): errno=%d", path, errno);
|
S3FS_PRN_ERR("could not open file(%s): errno=%d", path, errno);
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
if(0 != (result = ent->Load(0, size))){
|
if(0 != (result = ent->Load(0, static_cast<size_t>(size)))){
|
||||||
S3FS_PRN_ERR("could not download file(%s): result=%d", path, result);
|
S3FS_PRN_ERR("could not download file(%s): result=%d", path, result);
|
||||||
FdManager::get()->Close(ent);
|
FdManager::get()->Close(ent);
|
||||||
return result;
|
return result;
|
||||||
@ -1968,14 +1970,25 @@ static int s3fs_truncate(const char* path, off_t size)
|
|||||||
|
|
||||||
}else{
|
}else{
|
||||||
// Not found -> Make tmpfile(with size)
|
// Not found -> Make tmpfile(with size)
|
||||||
if(NULL == (ent = FdManager::get()->Open(path, size, -1, true, true))){
|
|
||||||
|
struct fuse_context* pcxt;
|
||||||
|
if(NULL == (pcxt = fuse_get_context())){
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
meta["Content-Type"] = string("application/octet-stream"); // Static
|
||||||
|
meta["x-amz-meta-mode"] = str(S_IFLNK | S_IRWXU | S_IRWXG | S_IRWXO);
|
||||||
|
meta["x-amz-meta-mtime"] = str(time(NULL));
|
||||||
|
meta["x-amz-meta-uid"] = str(pcxt->uid);
|
||||||
|
meta["x-amz-meta-gid"] = str(pcxt->gid);
|
||||||
|
|
||||||
|
if(NULL == (ent = FdManager::get()->Open(path, &meta, static_cast<ssize_t>(size), -1, true, true))){
|
||||||
S3FS_PRN_ERR("could not open file(%s): errno=%d", path, errno);
|
S3FS_PRN_ERR("could not open file(%s): errno=%d", path, errno);
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// upload
|
// upload
|
||||||
if(0 != (result = ent->Flush(meta, true))){
|
if(0 != (result = ent->Flush(true))){
|
||||||
S3FS_PRN_ERR("could not upload file(%s): result=%d", path, result);
|
S3FS_PRN_ERR("could not upload file(%s): result=%d", path, result);
|
||||||
FdManager::get()->Close(ent);
|
FdManager::get()->Close(ent);
|
||||||
return result;
|
return result;
|
||||||
@ -2021,8 +2034,10 @@ static int s3fs_open(const char* path, struct fuse_file_info* fi)
|
|||||||
st.st_mtime = -1;
|
st.st_mtime = -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
FdEntity* ent;
|
FdEntity* ent;
|
||||||
if(NULL == (ent = FdManager::get()->Open(path, st.st_size, st.st_mtime, false, true))){
|
headers_t meta;
|
||||||
|
get_object_attribute(path, NULL, &meta);
|
||||||
|
if(NULL == (ent = FdManager::get()->Open(path, &meta, static_cast<ssize_t>(st.st_size), st.st_mtime, false, true))){
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
fi->fh = ent->GetFd();
|
fi->fh = ent->GetFd();
|
||||||
@ -2047,8 +2062,8 @@ static int s3fs_read(const char* path, char* buf, size_t size, off_t offset, str
|
|||||||
}
|
}
|
||||||
|
|
||||||
// check real file size
|
// check real file size
|
||||||
off_t realsize = 0;
|
size_t realsize = 0;
|
||||||
if(!ent->GetSize(realsize) || 0 >= realsize){
|
if(!ent->GetSize(realsize) || realsize <= 0){
|
||||||
S3FS_PRN_ERR("file size is 0, so break to read.");
|
S3FS_PRN_ERR("file size is 0, so break to read.");
|
||||||
FdManager::get()->Close(ent);
|
FdManager::get()->Close(ent);
|
||||||
return 0;
|
return 0;
|
||||||
@ -2116,20 +2131,8 @@ static int s3fs_flush(const char* path, struct fuse_file_info* fi)
|
|||||||
|
|
||||||
FdEntity* ent;
|
FdEntity* ent;
|
||||||
if(NULL != (ent = FdManager::get()->ExistOpen(path, static_cast<int>(fi->fh)))){
|
if(NULL != (ent = FdManager::get()->ExistOpen(path, static_cast<int>(fi->fh)))){
|
||||||
headers_t meta;
|
ent->UpdateMtime();
|
||||||
if(0 != (result = get_object_attribute(path, NULL, &meta))){
|
result = ent->Flush(false);
|
||||||
FdManager::get()->Close(ent);
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
// If both mtime are not same, force to change mtime based on fd.
|
|
||||||
time_t ent_mtime;
|
|
||||||
if(ent->GetMtime(ent_mtime)){
|
|
||||||
if(str(ent_mtime) != meta["x-amz-meta-mtime"]){
|
|
||||||
meta["x-amz-meta-mtime"] = str(ent_mtime);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
result = ent->Flush(meta, false);
|
|
||||||
FdManager::get()->Close(ent);
|
FdManager::get()->Close(ent);
|
||||||
}
|
}
|
||||||
S3FS_MALLOCTRIM(0);
|
S3FS_MALLOCTRIM(0);
|
||||||
@ -2148,20 +2151,10 @@ static int s3fs_fsync(const char* path, int datasync, struct fuse_file_info* fi)
|
|||||||
|
|
||||||
FdEntity* ent;
|
FdEntity* ent;
|
||||||
if(NULL != (ent = FdManager::get()->ExistOpen(path, static_cast<int>(fi->fh)))){
|
if(NULL != (ent = FdManager::get()->ExistOpen(path, static_cast<int>(fi->fh)))){
|
||||||
headers_t meta;
|
if(0 == datasync){
|
||||||
if(0 != (result = get_object_attribute(path, NULL, &meta))){
|
ent->UpdateMtime();
|
||||||
FdManager::get()->Close(ent);
|
|
||||||
return result;
|
|
||||||
}
|
}
|
||||||
|
result = ent->Flush(false);
|
||||||
// If datasync is not zero, only flush data without meta updating.
|
|
||||||
time_t ent_mtime;
|
|
||||||
if(ent->GetMtime(ent_mtime)){
|
|
||||||
if(0 == datasync && str(ent_mtime) != meta["x-amz-meta-mtime"]){
|
|
||||||
meta["x-amz-meta-mtime"] = str(ent_mtime);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
result = ent->Flush(meta, false);
|
|
||||||
FdManager::get()->Close(ent);
|
FdManager::get()->Close(ent);
|
||||||
}
|
}
|
||||||
S3FS_MALLOCTRIM(0);
|
S3FS_MALLOCTRIM(0);
|
||||||
@ -2501,7 +2494,7 @@ static int append_objects_from_xml_ex(const char* path, xmlDocPtr doc, xmlXPathC
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
if(xmlXPathNodeSetIsEmpty(contents_xp->nodesetval)){
|
if(xmlXPathNodeSetIsEmpty(contents_xp->nodesetval)){
|
||||||
S3FS_PRN_ERR("contents_xp->nodesetval is empty.");
|
S3FS_PRN_WARN("contents_xp->nodesetval is empty.");
|
||||||
S3FS_XMLXPATHFREEOBJECT(contents_xp);
|
S3FS_XMLXPATHFREEOBJECT(contents_xp);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -4467,19 +4460,10 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
S3fsCurl::SetMaxParallelCount(maxpara);
|
S3fsCurl::SetMaxParallelCount(maxpara);
|
||||||
|
|
||||||
if(FdManager::GetPageSize() < static_cast<size_t>(S3fsCurl::GetMultipartSize() * S3fsCurl::GetMaxParallelCount())){
|
|
||||||
FdManager::SetPageSize(static_cast<size_t>(S3fsCurl::GetMultipartSize() * S3fsCurl::GetMaxParallelCount()));
|
|
||||||
}
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
if(0 == STR2NCMP(arg, "fd_page_size=")){
|
if(0 == STR2NCMP(arg, "fd_page_size=")){
|
||||||
size_t pagesize = static_cast<size_t>(s3fs_strtoofft(strchr(arg, '=') + sizeof(char)));
|
S3FS_PRN_ERR("option fd_page_size is no longer supported, so skip this option.");
|
||||||
if(pagesize < static_cast<size_t>(S3fsCurl::GetMultipartSize() * S3fsCurl::GetMaxParallelCount())){
|
|
||||||
S3FS_PRN_EXIT("argument should be over 1MB: fd_page_size");
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
FdManager::SetPageSize(pagesize);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
if(0 == STR2NCMP(arg, "multipart_size=")){
|
if(0 == STR2NCMP(arg, "multipart_size=")){
|
||||||
@ -4488,9 +4472,17 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar
|
|||||||
S3FS_PRN_EXIT("multipart_size option must be at least 10 MB.");
|
S3FS_PRN_EXIT("multipart_size option must be at least 10 MB.");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
if(FdManager::GetPageSize() < static_cast<size_t>(S3fsCurl::GetMultipartSize() * S3fsCurl::GetMaxParallelCount())){
|
// update ensure free disk space
|
||||||
FdManager::SetPageSize(static_cast<size_t>(S3fsCurl::GetMultipartSize() * S3fsCurl::GetMaxParallelCount()));
|
FdManager::SetEnsureFreeDiskSpace();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
if(0 == STR2NCMP(arg, "ensure_diskfree=")){
|
||||||
|
size_t dfsize = static_cast<size_t>(s3fs_strtoofft(strchr(arg, '=') + sizeof(char))) * 1024 * 1024;
|
||||||
|
if(dfsize < static_cast<size_t>(S3fsCurl::GetMultipartSize())){
|
||||||
|
S3FS_PRN_WARN("specified size to ensure disk free space is smaller than multipart size, so set multipart size to it.");
|
||||||
|
dfsize = static_cast<size_t>(S3fsCurl::GetMultipartSize());
|
||||||
}
|
}
|
||||||
|
FdManager::SetEnsureFreeDiskSpace(dfsize);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
if(0 == STR2NCMP(arg, "singlepart_copy_limit=")){
|
if(0 == STR2NCMP(arg, "singlepart_copy_limit=")){
|
||||||
@ -4789,6 +4781,13 @@ int main(int argc, char* argv[])
|
|||||||
exit(s3fs_utility_mode());
|
exit(s3fs_utility_mode());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// check free disk space
|
||||||
|
FdManager::InitEnsureFreeDiskSpace();
|
||||||
|
if(!FdManager::IsSafeDiskSpace(NULL, S3fsCurl::GetMultipartSize())){
|
||||||
|
S3FS_PRN_EXIT("There is no enough disk space for used as cache(or temporary) directory by s3fs.");
|
||||||
|
exit(EXIT_FAILURE);
|
||||||
|
}
|
||||||
|
|
||||||
s3fs_oper.getattr = s3fs_getattr;
|
s3fs_oper.getattr = s3fs_getattr;
|
||||||
s3fs_oper.readlink = s3fs_readlink;
|
s3fs_oper.readlink = s3fs_readlink;
|
||||||
s3fs_oper.mknod = s3fs_mknod;
|
s3fs_oper.mknod = s3fs_mknod;
|
||||||
|
@ -1017,18 +1017,16 @@ void show_help (void)
|
|||||||
" multipart_size (default=\"10\")\n"
|
" multipart_size (default=\"10\")\n"
|
||||||
" - part size, in MB, for each multipart request.\n"
|
" - part size, in MB, for each multipart request.\n"
|
||||||
"\n"
|
"\n"
|
||||||
|
" ensure_diskfree (default same multipart_size value)\n"
|
||||||
|
" - sets MB to ensure disk free space. s3fs makes file for\n"
|
||||||
|
" downloading, uploading and caching files. If the disk free\n"
|
||||||
|
" space is smaller than this value, s3fs do not use diskspace\n"
|
||||||
|
" as possible in exchange for the performance.\n"
|
||||||
|
"\n"
|
||||||
" singlepart_copy_limit (default=\"5120\")\n"
|
" singlepart_copy_limit (default=\"5120\")\n"
|
||||||
" - maximum size, in MB, of a single-part copy before trying \n"
|
" - maximum size, in MB, of a single-part copy before trying \n"
|
||||||
" multipart copy.\n"
|
" multipart copy.\n"
|
||||||
"\n"
|
"\n"
|
||||||
" fd_page_size (default=\"52428800\"(50MB))\n"
|
|
||||||
" - number of internal management page size for each file descriptor.\n"
|
|
||||||
" For delayed reading and writing by s3fs, s3fs manages pages which \n"
|
|
||||||
" is separated from object. Each pages has a status that data is \n"
|
|
||||||
" already loaded(or not loaded yet).\n"
|
|
||||||
" This option should not be changed when you don't have a trouble \n"
|
|
||||||
" with performance.\n"
|
|
||||||
"\n"
|
|
||||||
" url (default=\"http://s3.amazonaws.com\")\n"
|
" url (default=\"http://s3.amazonaws.com\")\n"
|
||||||
" - sets the url to use to access amazon s3\n"
|
" - sets the url to use to access amazon s3\n"
|
||||||
"\n"
|
"\n"
|
||||||
|
Loading…
Reference in New Issue
Block a user