mirror of
https://github.com/s3fs-fuse/s3fs-fuse.git
synced 2024-12-22 16:58:55 +00:00
Merge pull request #280 from ggtakec/master
Supported a object which is larger than free disk space
This commit is contained in:
commit
70db77af38
@ -158,19 +158,16 @@ number of parallel request for uploading big objects.
|
||||
s3fs uploads large object(default:over 20MB) by multipart post request, and sends parallel requests.
|
||||
This option limits parallel request count which s3fs requests at once.
|
||||
It is necessary to set this value depending on a CPU and a network band.
|
||||
This option is lated to fd_page_size option and affects it.
|
||||
.TP
|
||||
\fB\-o\fR fd_page_size(default="52428800"(50MB))
|
||||
number of internal management page size for each file descriptor.
|
||||
For delayed reading and writing by s3fs, s3fs manages pages which is separated from object. Each pages has a status that data is already loaded(or not loaded yet).
|
||||
This option should not be changed when you don't have a trouble with performance.
|
||||
This value is changed automatically by parallel_count and multipart_size values(fd_page_size value = parallel_count * multipart_size).
|
||||
.TP
|
||||
\fB\-o\fR multipart_size(default="10"(10MB))
|
||||
number of one part size in multipart uploading request.
|
||||
The default size is 10MB(10485760byte), this value is minimum size.
|
||||
Specify number of MB and over 10(MB).
|
||||
This option is lated to fd_page_size option and affects it.
|
||||
The default size is 10MB(10485760byte), minimum value is 5MB(5242880byte).
|
||||
Specify number of MB and over 5(MB).
|
||||
.TP
|
||||
\fB\-o\fR ensure_diskfree(default the same as multipart_size value)
|
||||
sets MB to ensure disk free space. This option means the threshold of free space size on disk which is used for the cache file by s3fs.
|
||||
s3fs makes file for downloading, and uploading and caching files.
|
||||
If the disk free space is smaller than this value, s3fs do not use diskspace as possible in exchange for the performance.
|
||||
.TP
|
||||
\fB\-o\fR url (default="http://s3.amazonaws.com")
|
||||
sets the url to use to access Amazon S3. If you want to use HTTPS, then you can set url=https://s3.amazonaws.com
|
||||
|
39
src/curl.cpp
39
src/curl.cpp
@ -1025,7 +1025,7 @@ string S3fsCurl::SetIAMRole(const char* role)
|
||||
bool S3fsCurl::SetMultipartSize(off_t size)
|
||||
{
|
||||
size = size * 1024 * 1024;
|
||||
if(size < MULTIPART_SIZE){
|
||||
if(size < MIN_MULTIPART_SIZE){
|
||||
return false;
|
||||
}
|
||||
S3fsCurl::multipart_size = size;
|
||||
@ -3373,6 +3373,41 @@ int S3fsCurl::MultipartUploadRequest(const char* tpath, headers_t& meta, int fd,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int S3fsCurl::MultipartUploadRequest(string upload_id, const char* tpath, int fd, off_t offset, size_t size, etaglist_t& list)
|
||||
{
|
||||
S3FS_PRN_INFO3("[upload_id=%s][tpath=%s][fd=%d][offset=%jd][size=%jd]", upload_id.c_str(), SAFESTRPTR(tpath), fd, (intmax_t)offset, (intmax_t)size);
|
||||
|
||||
// duplicate fd
|
||||
int fd2;
|
||||
if(-1 == (fd2 = dup(fd)) || 0 != lseek(fd2, 0, SEEK_SET)){
|
||||
S3FS_PRN_ERR("Could not duplicate file descriptor(errno=%d)", errno);
|
||||
if(-1 != fd2){
|
||||
close(fd2);
|
||||
}
|
||||
return -errno;
|
||||
}
|
||||
|
||||
// set
|
||||
partdata.fd = fd2;
|
||||
partdata.startpos = offset;
|
||||
partdata.size = size;
|
||||
b_partdata_startpos = partdata.startpos;
|
||||
b_partdata_size = partdata.size;
|
||||
|
||||
// upload part
|
||||
int result;
|
||||
if(0 != (result = UploadMultipartPostRequest(tpath, (list.size() + 1), upload_id))){
|
||||
S3FS_PRN_ERR("failed uploading part(%d)", result);
|
||||
close(fd2);
|
||||
return result;
|
||||
}
|
||||
list.push_back(partdata.etag);
|
||||
DestroyCurlHandle();
|
||||
close(fd2);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int S3fsCurl::MultipartRenameRequest(const char* from, const char* to, headers_t& meta, off_t size)
|
||||
{
|
||||
int result;
|
||||
@ -3604,7 +3639,7 @@ int S3fsMultiCurl::MultiRead(void)
|
||||
isRetry = true;
|
||||
}else if(404 == responseCode){
|
||||
// not found
|
||||
S3FS_PRN_ERR("failed a request(%ld: %s)", responseCode, s3fscurl->url.c_str());
|
||||
S3FS_PRN_WARN("failed a request(%ld: %s)", responseCode, s3fscurl->url.c_str());
|
||||
}else if(500 == responseCode){
|
||||
// case of all other result, do retry.(11/13/2013)
|
||||
// because it was found that s3fs got 500 error from S3, but could success
|
||||
|
12
src/curl.h
12
src/curl.h
@ -20,6 +20,11 @@
|
||||
#ifndef S3FS_CURL_H_
|
||||
#define S3FS_CURL_H_
|
||||
|
||||
//----------------------------------------------
|
||||
// Symbols
|
||||
//----------------------------------------------
|
||||
#define MIN_MULTIPART_SIZE 5242880 // 5MB
|
||||
|
||||
//----------------------------------------------
|
||||
// class BodyData
|
||||
//----------------------------------------------
|
||||
@ -273,10 +278,7 @@ class S3fsCurl
|
||||
bool GetUploadId(std::string& upload_id);
|
||||
int GetIAMCredentials(void);
|
||||
|
||||
int PreMultipartPostRequest(const char* tpath, headers_t& meta, std::string& upload_id, bool is_copy);
|
||||
int CompleteMultipartPostRequest(const char* tpath, std::string& upload_id, etaglist_t& parts);
|
||||
int UploadMultipartPostSetup(const char* tpath, int part_num, std::string& upload_id);
|
||||
int UploadMultipartPostRequest(const char* tpath, int part_num, std::string& upload_id);
|
||||
int CopyMultipartPostRequest(const char* from, const char* to, int part_num, std::string& upload_id, headers_t& meta);
|
||||
|
||||
public:
|
||||
@ -353,10 +355,14 @@ class S3fsCurl
|
||||
int GetObjectRequest(const char* tpath, int fd, off_t start = -1, ssize_t size = -1);
|
||||
int CheckBucket(void);
|
||||
int ListBucketRequest(const char* tpath, const char* query);
|
||||
int PreMultipartPostRequest(const char* tpath, headers_t& meta, std::string& upload_id, bool is_copy);
|
||||
int CompleteMultipartPostRequest(const char* tpath, std::string& upload_id, etaglist_t& parts);
|
||||
int UploadMultipartPostRequest(const char* tpath, int part_num, std::string& upload_id);
|
||||
int MultipartListRequest(std::string& body);
|
||||
int AbortMultipartUpload(const char* tpath, std::string& upload_id);
|
||||
int MultipartHeadRequest(const char* tpath, off_t size, headers_t& meta, bool is_copy);
|
||||
int MultipartUploadRequest(const char* tpath, headers_t& meta, int fd, bool is_copy);
|
||||
int MultipartUploadRequest(std::string upload_id, const char* tpath, int fd, off_t offset, size_t size, etaglist_t& list);
|
||||
int MultipartRenameRequest(const char* from, const char* to, headers_t& meta, off_t size);
|
||||
|
||||
// methods(valiables)
|
||||
|
1395
src/fdcache.cpp
1395
src/fdcache.cpp
File diff suppressed because it is too large
Load Diff
111
src/fdcache.h
111
src/fdcache.h
@ -20,6 +20,9 @@
|
||||
#ifndef FD_CACHE_H_
|
||||
#define FD_CACHE_H_
|
||||
|
||||
#include <sys/statvfs.h>
|
||||
#include "curl.h"
|
||||
|
||||
//------------------------------------------------
|
||||
// CacheFileStat
|
||||
//------------------------------------------------
|
||||
@ -53,40 +56,49 @@ struct fdpage
|
||||
{
|
||||
off_t offset;
|
||||
size_t bytes;
|
||||
bool init;
|
||||
bool loaded;
|
||||
|
||||
fdpage(off_t start = 0, size_t size = 0, bool is_init = false)
|
||||
: offset(start), bytes(size), init(is_init) {}
|
||||
fdpage(off_t start = 0, size_t size = 0, bool is_loaded = false)
|
||||
: offset(start), bytes(size), loaded(is_loaded) {}
|
||||
|
||||
off_t next(void) const { return (offset + bytes); }
|
||||
off_t end(void) const { return (0 < bytes ? offset + bytes - 1 : 0); }
|
||||
};
|
||||
typedef std::list<struct fdpage*> fdpage_list_t;
|
||||
|
||||
class FdEntity;
|
||||
|
||||
//
|
||||
// Management of loading area/modifying
|
||||
//
|
||||
class PageList
|
||||
{
|
||||
friend class FdEntity; // only one method access directly pages.
|
||||
|
||||
private:
|
||||
fdpage_list_t pages;
|
||||
|
||||
private:
|
||||
void Clear(void);
|
||||
bool Compress(void);
|
||||
bool Parse(off_t new_pos);
|
||||
|
||||
public:
|
||||
static void FreeList(fdpage_list_t& list);
|
||||
|
||||
explicit PageList(off_t size = 0, bool is_init = false);
|
||||
explicit PageList(size_t size = 0, bool is_loaded = false);
|
||||
~PageList();
|
||||
|
||||
off_t Size(void) const;
|
||||
int Resize(off_t size, bool is_init);
|
||||
int Init(off_t size, bool is_init);
|
||||
bool IsInit(off_t start, off_t size);
|
||||
bool SetInit(off_t start, off_t size, bool is_init = true);
|
||||
bool FindUninitPage(off_t start, off_t& resstart, size_t& ressize);
|
||||
int GetUninitPages(fdpage_list_t& uninit_list, off_t start = 0, off_t size = -1);
|
||||
bool Init(size_t size, bool is_loaded);
|
||||
size_t Size(void) const;
|
||||
bool Resize(size_t size, bool is_loaded);
|
||||
|
||||
bool IsPageLoaded(off_t start = 0, size_t size = 0) const; // size=0 is checking to end of list
|
||||
bool SetPageLoadedStatus(off_t start, size_t size, bool is_loaded = true, bool is_compress = true);
|
||||
bool FindUnloadedPage(off_t start, off_t& resstart, size_t& ressize) const;
|
||||
size_t GetTotalUnloadedPageSize(off_t start = 0, size_t size = 0) const; // size=0 is checking to end of list
|
||||
int GetUnloadedPages(fdpage_list_t& unloaded_list, off_t start = 0, size_t size = 0) const; // size=0 is checking to end of list
|
||||
|
||||
bool Serialize(CacheFileStat& file, bool is_output);
|
||||
void Dump(void);
|
||||
};
|
||||
@ -100,17 +112,28 @@ class FdEntity
|
||||
pthread_mutex_t fdent_lock;
|
||||
bool is_lock_init;
|
||||
PageList pagelist;
|
||||
int refcnt; // reference count
|
||||
std::string path; // object path
|
||||
std::string cachepath; // local cache file path
|
||||
int fd; // file descriptor(tmp file or cache file)
|
||||
FILE* file; // file pointer(tmp file or cache file)
|
||||
bool is_modify; // if file is changed, this flag is true
|
||||
int refcnt; // reference count
|
||||
std::string path; // object path
|
||||
std::string cachepath; // local cache file path
|
||||
// (if this is empty, does not load/save pagelist.)
|
||||
int fd; // file descriptor(tmp file or cache file)
|
||||
FILE* pfile; // file pointer(tmp file or cache file)
|
||||
bool is_modify; // if file is changed, this flag is true
|
||||
headers_t orgmeta; // original headers at opening
|
||||
size_t size_orgmeta; // original file size in original headers
|
||||
|
||||
std::string upload_id; // for no cached multipart uploading when no disk space
|
||||
etaglist_t etaglist; // for no cached multipart uploading when no disk space
|
||||
off_t mp_start; // start position for no cached multipart(write method only)
|
||||
size_t mp_size; // size for no cached multipart(write method only)
|
||||
|
||||
private:
|
||||
static int FillFile(int fd, unsigned char byte, size_t size, off_t start);
|
||||
|
||||
void Clear(void);
|
||||
int Dup(void);
|
||||
bool SetAllStatus(bool is_enable);
|
||||
bool SetAllStatus(bool is_loaded); // [NOTE] not locking
|
||||
//bool SetAllStatusLoaded(void) { return SetAllStatus(true); }
|
||||
bool SetAllStatusUnloaded(void) { return SetAllStatus(false); }
|
||||
|
||||
public:
|
||||
explicit FdEntity(const char* tpath = NULL, const char* cpath = NULL);
|
||||
@ -118,21 +141,32 @@ class FdEntity
|
||||
|
||||
void Close(void);
|
||||
bool IsOpen(void) const { return (-1 != fd); }
|
||||
int Open(off_t size = -1, time_t time = -1);
|
||||
int Open(headers_t* pmeta = NULL, ssize_t size = -1, time_t time = -1);
|
||||
bool OpenAndLoadAll(headers_t* pmeta = NULL, size_t* size = NULL, bool force_load = false);
|
||||
int Dup(void);
|
||||
|
||||
const char* GetPath(void) const { return path.c_str(); }
|
||||
void SetPath(const std::string &newpath) { path = newpath; }
|
||||
int GetFd(void) const { return fd; }
|
||||
int SetMtime(time_t time);
|
||||
bool GetSize(off_t& size);
|
||||
bool GetMtime(time_t& time);
|
||||
bool GetStats(struct stat& st);
|
||||
|
||||
bool SetAllEnable(void) { return SetAllStatus(true); }
|
||||
bool SetAllDisable(void) { return SetAllStatus(false); }
|
||||
bool LoadFull(off_t* size = NULL, bool force_load = false);
|
||||
int Load(off_t start, off_t size);
|
||||
int RowFlush(const char* tpath, headers_t& meta, bool force_sync = false);
|
||||
int Flush(headers_t& meta, bool force_sync = false) { return RowFlush(NULL, meta, force_sync); }
|
||||
bool GetStats(struct stat& st);
|
||||
int SetMtime(time_t time);
|
||||
bool UpdateMtime(void);
|
||||
bool GetSize(size_t& size);
|
||||
bool SetMode(mode_t mode);
|
||||
bool SetUId(uid_t uid);
|
||||
bool SetGId(gid_t gid);
|
||||
bool SetContentType(const char* path);
|
||||
|
||||
int Load(off_t start = 0, size_t size = 0); // size=0 means loading to end
|
||||
int NoCacheLoadAndPost(off_t start = 0, size_t size = 0); // size=0 means loading to end
|
||||
int NoCachePreMultipartPost(void);
|
||||
int NoCacheMultipartPost(int tgfd, off_t start, size_t size);
|
||||
int NoCacheCompleteMultipartPost(void);
|
||||
|
||||
int RowFlush(const char* tpath, bool force_sync = false);
|
||||
int Flush(bool force_sync = false) { return RowFlush(NULL, force_sync); }
|
||||
|
||||
ssize_t Read(char* bytes, off_t start, size_t size, bool force_load = false);
|
||||
ssize_t Write(const char* bytes, off_t start, size_t size);
|
||||
};
|
||||
@ -148,9 +182,12 @@ class FdManager
|
||||
static pthread_mutex_t fd_manager_lock;
|
||||
static bool is_lock_init;
|
||||
static std::string cache_dir;
|
||||
static size_t page_size;
|
||||
static size_t free_disk_space; // limit free disk space
|
||||
|
||||
fdent_map_t fent;
|
||||
fdent_map_t fent;
|
||||
|
||||
private:
|
||||
static fsblkcnt_t GetFreeDiskSpace(const char* path);
|
||||
|
||||
public:
|
||||
FdManager();
|
||||
@ -164,17 +201,21 @@ class FdManager
|
||||
static bool SetCacheDir(const char* dir);
|
||||
static bool IsCacheDir(void) { return (0 < FdManager::cache_dir.size()); }
|
||||
static const char* GetCacheDir(void) { return FdManager::cache_dir.c_str(); }
|
||||
static size_t SetPageSize(size_t size);
|
||||
static size_t GetPageSize(void) { return FdManager::page_size; }
|
||||
static bool MakeCachePath(const char* path, std::string& cache_path, bool is_create_dir = true);
|
||||
static bool CheckCacheTopDir(void);
|
||||
static bool MakeRandomTempPath(const char* path, std::string& tmppath);
|
||||
|
||||
static size_t GetEnsureFreeDiskSpace(void) { return FdManager::free_disk_space; }
|
||||
static size_t SetEnsureFreeDiskSpace(size_t size);
|
||||
static size_t InitEnsureFreeDiskSpace(void) { return SetEnsureFreeDiskSpace(0); }
|
||||
static bool IsSafeDiskSpace(const char* path, size_t size);
|
||||
|
||||
FdEntity* GetFdEntity(const char* path, int existfd = -1);
|
||||
FdEntity* Open(const char* path, off_t size = -1, time_t time = -1, bool force_tmpfile = false, bool is_create = true);
|
||||
FdEntity* Open(const char* path, headers_t* pmeta = NULL, ssize_t size = -1, time_t time = -1, bool force_tmpfile = false, bool is_create = true);
|
||||
FdEntity* ExistOpen(const char* path, int existfd = -1);
|
||||
void Rename(const std::string &from, const std::string &to);
|
||||
bool Close(FdEntity* ent);
|
||||
bool ChangeEntityToTempPath(FdEntity* ent, const char* path);
|
||||
};
|
||||
|
||||
#endif // FD_CACHE_H_
|
||||
|
214
src/s3fs.cpp
214
src/s3fs.cpp
@ -719,10 +719,11 @@ static FdEntity* get_local_fent(const char* path, bool is_load)
|
||||
{
|
||||
struct stat stobj;
|
||||
FdEntity* ent;
|
||||
headers_t meta;
|
||||
|
||||
S3FS_PRN_INFO2("[path=%s]", path);
|
||||
|
||||
if(0 != get_object_attribute(path, &stobj)){
|
||||
if(0 != get_object_attribute(path, &stobj, &meta)){
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -730,17 +731,16 @@ static FdEntity* get_local_fent(const char* path, bool is_load)
|
||||
time_t mtime = (!S_ISREG(stobj.st_mode) || S_ISLNK(stobj.st_mode)) ? -1 : stobj.st_mtime;
|
||||
bool force_tmpfile = S_ISREG(stobj.st_mode) ? false : true;
|
||||
|
||||
if(NULL == (ent = FdManager::get()->Open(path, stobj.st_size, mtime, force_tmpfile, true))){
|
||||
if(NULL == (ent = FdManager::get()->Open(path, &meta, static_cast<ssize_t>(stobj.st_size), mtime, force_tmpfile, true))){
|
||||
S3FS_PRN_ERR("Coult not open file. errno(%d)", errno);
|
||||
return NULL;
|
||||
}
|
||||
// load
|
||||
if(is_load && !ent->LoadFull()){
|
||||
if(is_load && !ent->OpenAndLoadAll(&meta)){
|
||||
S3FS_PRN_ERR("Coult not load file. errno(%d)", errno);
|
||||
FdManager::get()->Close(ent);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return ent;
|
||||
}
|
||||
|
||||
@ -778,7 +778,7 @@ static int put_headers(const char* path, headers_t& meta, bool is_copy)
|
||||
// no opened fd
|
||||
if(FdManager::get()->IsCacheDir()){
|
||||
// create cache file if be needed
|
||||
ent = FdManager::get()->Open(path, buf.st_size, -1, false, true);
|
||||
ent = FdManager::get()->Open(path, &meta, static_cast<ssize_t>(buf.st_size), -1, false, true);
|
||||
}
|
||||
}
|
||||
if(ent){
|
||||
@ -807,6 +807,7 @@ static int s3fs_getattr(const char* path, struct stat* stbuf)
|
||||
// (See: Issue 241)
|
||||
if(stbuf){
|
||||
FdEntity* ent;
|
||||
|
||||
if(NULL != (ent = FdManager::get()->ExistOpen(path))){
|
||||
struct stat tmpstbuf;
|
||||
if(ent->GetStats(tmpstbuf)){
|
||||
@ -835,18 +836,18 @@ static int s3fs_readlink(const char* path, char* buf, size_t size)
|
||||
return -EIO;
|
||||
}
|
||||
// Get size
|
||||
off_t readsize;
|
||||
size_t readsize;
|
||||
if(!ent->GetSize(readsize)){
|
||||
S3FS_PRN_ERR("could not get file size(file=%s)", path);
|
||||
FdManager::get()->Close(ent);
|
||||
return -EIO;
|
||||
}
|
||||
if(static_cast<off_t>(size) <= readsize){
|
||||
if(size <= readsize){
|
||||
readsize = size - 1;
|
||||
}
|
||||
// Read
|
||||
ssize_t ressize;
|
||||
if(0 > (ressize = ent->Read(buf, 0, static_cast<size_t>(readsize)))){
|
||||
if(0 > (ressize = ent->Read(buf, 0, readsize))){
|
||||
S3FS_PRN_ERR("could not read file(file=%s, errno=%zd)", path, ressize);
|
||||
FdManager::get()->Close(ent);
|
||||
return static_cast<int>(ressize);
|
||||
@ -947,8 +948,11 @@ static int s3fs_create(const char* path, mode_t mode, struct fuse_file_info* fi)
|
||||
return result;
|
||||
}
|
||||
|
||||
FdEntity* ent;
|
||||
if(NULL == (ent = FdManager::get()->Open(path, 0, -1, false, true))){
|
||||
|
||||
FdEntity* ent;
|
||||
headers_t meta;
|
||||
get_object_attribute(path, NULL, &meta);
|
||||
if(NULL == (ent = FdManager::get()->Open(path, &meta, 0, -1, false, true))){
|
||||
return -EIO;
|
||||
}
|
||||
fi->fh = ent->GetFd();
|
||||
@ -1127,7 +1131,7 @@ static int s3fs_symlink(const char* from, const char* to)
|
||||
|
||||
// open tmpfile
|
||||
FdEntity* ent;
|
||||
if(NULL == (ent = FdManager::get()->Open(to, 0, -1, true, true))){
|
||||
if(NULL == (ent = FdManager::get()->Open(to, &headers, 0, -1, true, true))){
|
||||
S3FS_PRN_ERR("could not open tmpfile(errno=%d)", errno);
|
||||
return -errno;
|
||||
}
|
||||
@ -1139,7 +1143,7 @@ static int s3fs_symlink(const char* from, const char* to)
|
||||
return -errno;
|
||||
}
|
||||
// upload
|
||||
if(0 != (result = ent->Flush(headers, true))){
|
||||
if(0 != (result = ent->Flush(true))){
|
||||
S3FS_PRN_WARN("could not upload tmpfile(result=%d)", result);
|
||||
}
|
||||
FdManager::get()->Close(ent);
|
||||
@ -1189,8 +1193,7 @@ static int rename_object(const char* from, const char* to)
|
||||
|
||||
static int rename_object_nocopy(const char* from, const char* to)
|
||||
{
|
||||
int result;
|
||||
headers_t meta;
|
||||
int result;
|
||||
|
||||
S3FS_PRN_INFO1("[from=%s][to=%s]", from , to);
|
||||
|
||||
@ -1203,14 +1206,6 @@ static int rename_object_nocopy(const char* from, const char* to)
|
||||
return result;
|
||||
}
|
||||
|
||||
// Get attributes
|
||||
if(0 != (result = get_object_attribute(from, NULL, &meta))){
|
||||
return result;
|
||||
}
|
||||
|
||||
// Set header
|
||||
meta["Content-Type"] = S3fsCurl::LookupMimeType(string(to));
|
||||
|
||||
// open & load
|
||||
FdEntity* ent;
|
||||
if(NULL == (ent = get_local_fent(from, true))){
|
||||
@ -1218,8 +1213,14 @@ static int rename_object_nocopy(const char* from, const char* to)
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
// Set header
|
||||
if(!ent->SetContentType(to)){
|
||||
S3FS_PRN_ERR("could not set content-type for %s", to);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
// upload
|
||||
if(0 != (result = ent->RowFlush(to, meta, true))){
|
||||
if(0 != (result = ent->RowFlush(to, true))){
|
||||
S3FS_PRN_ERR("could not upload file(%s): result=%d", to, result);
|
||||
FdManager::get()->Close(ent);
|
||||
return result;
|
||||
@ -1531,13 +1532,12 @@ static int s3fs_chmod(const char* path, mode_t mode)
|
||||
|
||||
static int s3fs_chmod_nocopy(const char* path, mode_t mode)
|
||||
{
|
||||
int result;
|
||||
string strpath;
|
||||
string newpath;
|
||||
string nowcache;
|
||||
headers_t meta;
|
||||
int result;
|
||||
string strpath;
|
||||
string newpath;
|
||||
string nowcache;
|
||||
struct stat stbuf;
|
||||
int nDirType = DIRTYPE_UNKNOWN;
|
||||
int nDirType = DIRTYPE_UNKNOWN;
|
||||
|
||||
S3FS_PRN_INFO1("[path=%s][mode=%04o]", path, mode);
|
||||
|
||||
@ -1554,11 +1554,11 @@ static int s3fs_chmod_nocopy(const char* path, mode_t mode)
|
||||
|
||||
// Get attributes
|
||||
if(S_ISDIR(stbuf.st_mode)){
|
||||
result = chk_dir_object_type(path, newpath, strpath, nowcache, &meta, &nDirType);
|
||||
result = chk_dir_object_type(path, newpath, strpath, nowcache, NULL, &nDirType);
|
||||
}else{
|
||||
strpath = path;
|
||||
nowcache = strpath;
|
||||
result = get_object_attribute(strpath.c_str(), NULL, &meta);
|
||||
result = get_object_attribute(strpath.c_str(), NULL, NULL);
|
||||
}
|
||||
if(0 != result){
|
||||
return result;
|
||||
@ -1584,9 +1584,6 @@ static int s3fs_chmod_nocopy(const char* path, mode_t mode)
|
||||
}else{
|
||||
// normal object or directory object of newer version
|
||||
|
||||
// Change file mode
|
||||
meta["x-amz-meta-mode"] = str(mode);
|
||||
|
||||
// open & load
|
||||
FdEntity* ent;
|
||||
if(NULL == (ent = get_local_fent(strpath.c_str(), true))){
|
||||
@ -1594,8 +1591,11 @@ static int s3fs_chmod_nocopy(const char* path, mode_t mode)
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
// Change file mode
|
||||
ent->SetMode(mode);
|
||||
|
||||
// upload
|
||||
if(0 != (result = ent->Flush(meta, true))){
|
||||
if(0 != (result = ent->Flush(true))){
|
||||
S3FS_PRN_ERR("could not upload file(%s): result=%d", strpath.c_str(), result);
|
||||
FdManager::get()->Close(ent);
|
||||
return result;
|
||||
@ -1693,13 +1693,12 @@ static int s3fs_chown(const char* path, uid_t uid, gid_t gid)
|
||||
|
||||
static int s3fs_chown_nocopy(const char* path, uid_t uid, gid_t gid)
|
||||
{
|
||||
int result;
|
||||
string strpath;
|
||||
string newpath;
|
||||
string nowcache;
|
||||
headers_t meta;
|
||||
int result;
|
||||
string strpath;
|
||||
string newpath;
|
||||
string nowcache;
|
||||
struct stat stbuf;
|
||||
int nDirType = DIRTYPE_UNKNOWN;
|
||||
int nDirType = DIRTYPE_UNKNOWN;
|
||||
|
||||
S3FS_PRN_INFO1("[path=%s][uid=%u][gid=%u]", path, (unsigned int)uid, (unsigned int)gid);
|
||||
|
||||
@ -1716,11 +1715,11 @@ static int s3fs_chown_nocopy(const char* path, uid_t uid, gid_t gid)
|
||||
|
||||
// Get attributes
|
||||
if(S_ISDIR(stbuf.st_mode)){
|
||||
result = chk_dir_object_type(path, newpath, strpath, nowcache, &meta, &nDirType);
|
||||
result = chk_dir_object_type(path, newpath, strpath, nowcache, NULL, &nDirType);
|
||||
}else{
|
||||
strpath = path;
|
||||
nowcache = strpath;
|
||||
result = get_object_attribute(strpath.c_str(), NULL, &meta);
|
||||
result = get_object_attribute(strpath.c_str(), NULL, NULL);
|
||||
}
|
||||
if(0 != result){
|
||||
return result;
|
||||
@ -1755,10 +1754,6 @@ static int s3fs_chown_nocopy(const char* path, uid_t uid, gid_t gid)
|
||||
}else{
|
||||
// normal object or directory object of newer version
|
||||
|
||||
// Change owner
|
||||
meta["x-amz-meta-uid"] = str(uid);
|
||||
meta["x-amz-meta-gid"] = str(gid);
|
||||
|
||||
// open & load
|
||||
FdEntity* ent;
|
||||
if(NULL == (ent = get_local_fent(strpath.c_str(), true))){
|
||||
@ -1766,8 +1761,12 @@ static int s3fs_chown_nocopy(const char* path, uid_t uid, gid_t gid)
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
// Change owner
|
||||
ent->SetUId(uid);
|
||||
ent->SetGId(gid);
|
||||
|
||||
// upload
|
||||
if(0 != (result = ent->Flush(meta, true))){
|
||||
if(0 != (result = ent->Flush(true))){
|
||||
S3FS_PRN_ERR("could not upload file(%s): result=%d", strpath.c_str(), result);
|
||||
FdManager::get()->Close(ent);
|
||||
return result;
|
||||
@ -1851,13 +1850,12 @@ static int s3fs_utimens(const char* path, const struct timespec ts[2])
|
||||
|
||||
static int s3fs_utimens_nocopy(const char* path, const struct timespec ts[2])
|
||||
{
|
||||
int result;
|
||||
string strpath;
|
||||
string newpath;
|
||||
string nowcache;
|
||||
headers_t meta;
|
||||
int result;
|
||||
string strpath;
|
||||
string newpath;
|
||||
string nowcache;
|
||||
struct stat stbuf;
|
||||
int nDirType = DIRTYPE_UNKNOWN;
|
||||
int nDirType = DIRTYPE_UNKNOWN;
|
||||
|
||||
S3FS_PRN_INFO1("[path=%s][mtime=%s]", path, str(ts[1].tv_sec).c_str());
|
||||
|
||||
@ -1876,11 +1874,11 @@ static int s3fs_utimens_nocopy(const char* path, const struct timespec ts[2])
|
||||
|
||||
// Get attributes
|
||||
if(S_ISDIR(stbuf.st_mode)){
|
||||
result = chk_dir_object_type(path, newpath, strpath, nowcache, &meta, &nDirType);
|
||||
result = chk_dir_object_type(path, newpath, strpath, nowcache, NULL, &nDirType);
|
||||
}else{
|
||||
strpath = path;
|
||||
nowcache = strpath;
|
||||
result = get_object_attribute(strpath.c_str(), NULL, &meta);
|
||||
result = get_object_attribute(strpath.c_str(), NULL, NULL);
|
||||
}
|
||||
if(0 != result){
|
||||
return result;
|
||||
@ -1906,9 +1904,6 @@ static int s3fs_utimens_nocopy(const char* path, const struct timespec ts[2])
|
||||
}else{
|
||||
// normal object or directory object of newer version
|
||||
|
||||
// Change date
|
||||
meta["x-amz-meta-mtime"] = str(ts[1].tv_sec);
|
||||
|
||||
// open & load
|
||||
FdEntity* ent;
|
||||
if(NULL == (ent = get_local_fent(strpath.c_str(), true))){
|
||||
@ -1924,7 +1919,7 @@ static int s3fs_utimens_nocopy(const char* path, const struct timespec ts[2])
|
||||
}
|
||||
|
||||
// upload
|
||||
if(0 != (result = ent->Flush(meta, true))){
|
||||
if(0 != (result = ent->Flush(true))){
|
||||
S3FS_PRN_ERR("could not upload file(%s): result=%d", strpath.c_str(), result);
|
||||
FdManager::get()->Close(ent);
|
||||
return result;
|
||||
@ -1946,6 +1941,10 @@ static int s3fs_truncate(const char* path, off_t size)
|
||||
|
||||
S3FS_PRN_INFO("[path=%s][size=%jd]", path, (intmax_t)size);
|
||||
|
||||
if(size < 0){
|
||||
size = 0;
|
||||
}
|
||||
|
||||
if(0 != (result = check_parent_object_access(path, X_OK))){
|
||||
return result;
|
||||
}
|
||||
@ -1956,11 +1955,11 @@ static int s3fs_truncate(const char* path, off_t size)
|
||||
// Get file information
|
||||
if(0 == (result = get_object_attribute(path, NULL, &meta))){
|
||||
// Exists -> Get file(with size)
|
||||
if(NULL == (ent = FdManager::get()->Open(path, size, -1, false, true))){
|
||||
if(NULL == (ent = FdManager::get()->Open(path, &meta, static_cast<ssize_t>(size), -1, false, true))){
|
||||
S3FS_PRN_ERR("could not open file(%s): errno=%d", path, errno);
|
||||
return -EIO;
|
||||
}
|
||||
if(0 != (result = ent->Load(0, size))){
|
||||
if(0 != (result = ent->Load(0, static_cast<size_t>(size)))){
|
||||
S3FS_PRN_ERR("could not download file(%s): result=%d", path, result);
|
||||
FdManager::get()->Close(ent);
|
||||
return result;
|
||||
@ -1968,14 +1967,25 @@ static int s3fs_truncate(const char* path, off_t size)
|
||||
|
||||
}else{
|
||||
// Not found -> Make tmpfile(with size)
|
||||
if(NULL == (ent = FdManager::get()->Open(path, size, -1, true, true))){
|
||||
|
||||
struct fuse_context* pcxt;
|
||||
if(NULL == (pcxt = fuse_get_context())){
|
||||
return -EIO;
|
||||
}
|
||||
meta["Content-Type"] = string("application/octet-stream"); // Static
|
||||
meta["x-amz-meta-mode"] = str(S_IFLNK | S_IRWXU | S_IRWXG | S_IRWXO);
|
||||
meta["x-amz-meta-mtime"] = str(time(NULL));
|
||||
meta["x-amz-meta-uid"] = str(pcxt->uid);
|
||||
meta["x-amz-meta-gid"] = str(pcxt->gid);
|
||||
|
||||
if(NULL == (ent = FdManager::get()->Open(path, &meta, static_cast<ssize_t>(size), -1, true, true))){
|
||||
S3FS_PRN_ERR("could not open file(%s): errno=%d", path, errno);
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
|
||||
// upload
|
||||
if(0 != (result = ent->Flush(meta, true))){
|
||||
if(0 != (result = ent->Flush(true))){
|
||||
S3FS_PRN_ERR("could not upload file(%s): result=%d", path, result);
|
||||
FdManager::get()->Close(ent);
|
||||
return result;
|
||||
@ -2021,8 +2031,10 @@ static int s3fs_open(const char* path, struct fuse_file_info* fi)
|
||||
st.st_mtime = -1;
|
||||
}
|
||||
|
||||
FdEntity* ent;
|
||||
if(NULL == (ent = FdManager::get()->Open(path, st.st_size, st.st_mtime, false, true))){
|
||||
FdEntity* ent;
|
||||
headers_t meta;
|
||||
get_object_attribute(path, NULL, &meta);
|
||||
if(NULL == (ent = FdManager::get()->Open(path, &meta, static_cast<ssize_t>(st.st_size), st.st_mtime, false, true))){
|
||||
return -EIO;
|
||||
}
|
||||
fi->fh = ent->GetFd();
|
||||
@ -2047,8 +2059,8 @@ static int s3fs_read(const char* path, char* buf, size_t size, off_t offset, str
|
||||
}
|
||||
|
||||
// check real file size
|
||||
off_t realsize = 0;
|
||||
if(!ent->GetSize(realsize) || 0 >= realsize){
|
||||
size_t realsize = 0;
|
||||
if(!ent->GetSize(realsize) || realsize <= 0){
|
||||
S3FS_PRN_ERR("file size is 0, so break to read.");
|
||||
FdManager::get()->Close(ent);
|
||||
return 0;
|
||||
@ -2116,20 +2128,8 @@ static int s3fs_flush(const char* path, struct fuse_file_info* fi)
|
||||
|
||||
FdEntity* ent;
|
||||
if(NULL != (ent = FdManager::get()->ExistOpen(path, static_cast<int>(fi->fh)))){
|
||||
headers_t meta;
|
||||
if(0 != (result = get_object_attribute(path, NULL, &meta))){
|
||||
FdManager::get()->Close(ent);
|
||||
return result;
|
||||
}
|
||||
|
||||
// If both mtime are not same, force to change mtime based on fd.
|
||||
time_t ent_mtime;
|
||||
if(ent->GetMtime(ent_mtime)){
|
||||
if(str(ent_mtime) != meta["x-amz-meta-mtime"]){
|
||||
meta["x-amz-meta-mtime"] = str(ent_mtime);
|
||||
}
|
||||
}
|
||||
result = ent->Flush(meta, false);
|
||||
ent->UpdateMtime();
|
||||
result = ent->Flush(false);
|
||||
FdManager::get()->Close(ent);
|
||||
}
|
||||
S3FS_MALLOCTRIM(0);
|
||||
@ -2148,20 +2148,10 @@ static int s3fs_fsync(const char* path, int datasync, struct fuse_file_info* fi)
|
||||
|
||||
FdEntity* ent;
|
||||
if(NULL != (ent = FdManager::get()->ExistOpen(path, static_cast<int>(fi->fh)))){
|
||||
headers_t meta;
|
||||
if(0 != (result = get_object_attribute(path, NULL, &meta))){
|
||||
FdManager::get()->Close(ent);
|
||||
return result;
|
||||
if(0 == datasync){
|
||||
ent->UpdateMtime();
|
||||
}
|
||||
|
||||
// If datasync is not zero, only flush data without meta updating.
|
||||
time_t ent_mtime;
|
||||
if(ent->GetMtime(ent_mtime)){
|
||||
if(0 == datasync && str(ent_mtime) != meta["x-amz-meta-mtime"]){
|
||||
meta["x-amz-meta-mtime"] = str(ent_mtime);
|
||||
}
|
||||
}
|
||||
result = ent->Flush(meta, false);
|
||||
result = ent->Flush(false);
|
||||
FdManager::get()->Close(ent);
|
||||
}
|
||||
S3FS_MALLOCTRIM(0);
|
||||
@ -2501,7 +2491,7 @@ static int append_objects_from_xml_ex(const char* path, xmlDocPtr doc, xmlXPathC
|
||||
return -1;
|
||||
}
|
||||
if(xmlXPathNodeSetIsEmpty(contents_xp->nodesetval)){
|
||||
S3FS_PRN_ERR("contents_xp->nodesetval is empty.");
|
||||
S3FS_PRN_WARN("contents_xp->nodesetval is empty.");
|
||||
S3FS_XMLXPATHFREEOBJECT(contents_xp);
|
||||
return 0;
|
||||
}
|
||||
@ -4467,30 +4457,29 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar
|
||||
return -1;
|
||||
}
|
||||
S3fsCurl::SetMaxParallelCount(maxpara);
|
||||
|
||||
if(FdManager::GetPageSize() < static_cast<size_t>(S3fsCurl::GetMultipartSize() * S3fsCurl::GetMaxParallelCount())){
|
||||
FdManager::SetPageSize(static_cast<size_t>(S3fsCurl::GetMultipartSize() * S3fsCurl::GetMaxParallelCount()));
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
if(0 == STR2NCMP(arg, "fd_page_size=")){
|
||||
size_t pagesize = static_cast<size_t>(s3fs_strtoofft(strchr(arg, '=') + sizeof(char)));
|
||||
if(pagesize < static_cast<size_t>(S3fsCurl::GetMultipartSize() * S3fsCurl::GetMaxParallelCount())){
|
||||
S3FS_PRN_EXIT("argument should be over 1MB: fd_page_size");
|
||||
return -1;
|
||||
}
|
||||
FdManager::SetPageSize(pagesize);
|
||||
S3FS_PRN_ERR("option fd_page_size is no longer supported, so skip this option.");
|
||||
return 0;
|
||||
}
|
||||
if(0 == STR2NCMP(arg, "multipart_size=")){
|
||||
off_t size = static_cast<off_t>(s3fs_strtoofft(strchr(arg, '=') + sizeof(char)));
|
||||
if(!S3fsCurl::SetMultipartSize(size)){
|
||||
S3FS_PRN_EXIT("multipart_size option must be at least 10 MB.");
|
||||
S3FS_PRN_EXIT("multipart_size option must be at least 5 MB.");
|
||||
return -1;
|
||||
}
|
||||
if(FdManager::GetPageSize() < static_cast<size_t>(S3fsCurl::GetMultipartSize() * S3fsCurl::GetMaxParallelCount())){
|
||||
FdManager::SetPageSize(static_cast<size_t>(S3fsCurl::GetMultipartSize() * S3fsCurl::GetMaxParallelCount()));
|
||||
// update ensure free disk space if it is not set.
|
||||
FdManager::InitEnsureFreeDiskSpace();
|
||||
return 0;
|
||||
}
|
||||
if(0 == STR2NCMP(arg, "ensure_diskfree=")){
|
||||
size_t dfsize = static_cast<size_t>(s3fs_strtoofft(strchr(arg, '=') + sizeof(char))) * 1024 * 1024;
|
||||
if(dfsize < static_cast<size_t>(S3fsCurl::GetMultipartSize())){
|
||||
S3FS_PRN_WARN("specified size to ensure disk free space is smaller than multipart size, so set multipart size to it.");
|
||||
dfsize = static_cast<size_t>(S3fsCurl::GetMultipartSize());
|
||||
}
|
||||
FdManager::SetEnsureFreeDiskSpace(dfsize);
|
||||
return 0;
|
||||
}
|
||||
if(0 == STR2NCMP(arg, "singlepart_copy_limit=")){
|
||||
@ -4789,6 +4778,13 @@ int main(int argc, char* argv[])
|
||||
exit(s3fs_utility_mode());
|
||||
}
|
||||
|
||||
// check free disk space
|
||||
FdManager::InitEnsureFreeDiskSpace();
|
||||
if(!FdManager::IsSafeDiskSpace(NULL, S3fsCurl::GetMultipartSize())){
|
||||
S3FS_PRN_EXIT("There is no enough disk space for used as cache(or temporary) directory by s3fs.");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
s3fs_oper.getattr = s3fs_getattr;
|
||||
s3fs_oper.readlink = s3fs_readlink;
|
||||
s3fs_oper.mknod = s3fs_mknod;
|
||||
|
@ -1017,18 +1017,16 @@ void show_help (void)
|
||||
" multipart_size (default=\"10\")\n"
|
||||
" - part size, in MB, for each multipart request.\n"
|
||||
"\n"
|
||||
" ensure_diskfree (default same multipart_size value)\n"
|
||||
" - sets MB to ensure disk free space. s3fs makes file for\n"
|
||||
" downloading, uploading and caching files. If the disk free\n"
|
||||
" space is smaller than this value, s3fs do not use diskspace\n"
|
||||
" as possible in exchange for the performance.\n"
|
||||
"\n"
|
||||
" singlepart_copy_limit (default=\"5120\")\n"
|
||||
" - maximum size, in MB, of a single-part copy before trying \n"
|
||||
" multipart copy.\n"
|
||||
"\n"
|
||||
" fd_page_size (default=\"52428800\"(50MB))\n"
|
||||
" - number of internal management page size for each file descriptor.\n"
|
||||
" For delayed reading and writing by s3fs, s3fs manages pages which \n"
|
||||
" is separated from object. Each pages has a status that data is \n"
|
||||
" already loaded(or not loaded yet).\n"
|
||||
" This option should not be changed when you don't have a trouble \n"
|
||||
" with performance.\n"
|
||||
"\n"
|
||||
" url (default=\"http://s3.amazonaws.com\")\n"
|
||||
" - sets the url to use to access amazon s3\n"
|
||||
"\n"
|
||||
|
@ -84,7 +84,8 @@ stdbuf -oL -eL $S3FS $TEST_BUCKET_1 $TEST_BUCKET_MOUNT_POINT_1 \
|
||||
-o sigv2 \
|
||||
-o singlepart_copy_limit=$((10 * 1024)) \
|
||||
-o url=${S3_URL} \
|
||||
-o use_path_request_style -f -o f2 -d -d |& stdbuf -oL -eL sed -u "s/^/s3fs: /" &
|
||||
-o use_path_request_style \
|
||||
-o dbglevel=info -f |& stdbuf -oL -eL sed -u "s/^/s3fs: /" &
|
||||
|
||||
retry 30 grep $TEST_BUCKET_MOUNT_POINT_1 /proc/mounts || exit 1
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user