mirror of
https://github.com/s3fs-fuse/s3fs-fuse.git
synced 2024-12-31 11:51:49 +00:00
Fixed a bug about move file over limit of ensure space
This commit is contained in:
parent
a23d02923c
commit
2bb745cdd7
111
src/fdcache.cpp
111
src/fdcache.cpp
@ -1803,13 +1803,15 @@ bool FdEntity::GetStats(struct stat& st, bool lock_already_held)
|
||||
return true;
|
||||
}
|
||||
|
||||
int FdEntity::SetCtime(time_t time)
|
||||
int FdEntity::SetCtime(time_t time, bool lock_already_held)
|
||||
{
|
||||
AutoLock auto_lock(&fdent_lock, lock_already_held ? AutoLock::ALREADY_LOCKED : AutoLock::NONE);
|
||||
|
||||
S3FS_PRN_INFO3("[path=%s][fd=%d][time=%lld]", path.c_str(), fd, static_cast<long long>(time));
|
||||
|
||||
if(-1 == time){
|
||||
return 0;
|
||||
}
|
||||
|
||||
AutoLock auto_lock(&fdent_lock);
|
||||
orgmeta["x-amz-meta-ctime"] = str(time);
|
||||
return 0;
|
||||
}
|
||||
@ -1884,6 +1886,25 @@ bool FdEntity::GetSize(off_t& size)
|
||||
return true;
|
||||
}
|
||||
|
||||
bool FdEntity::GetXattr(string& xattr)
|
||||
{
|
||||
AutoLock auto_lock(&fdent_lock);
|
||||
|
||||
headers_t::const_iterator iter = orgmeta.find("x-amz-meta-xattr");
|
||||
if(iter == orgmeta.end()){
|
||||
return false;
|
||||
}
|
||||
xattr = iter->second;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool FdEntity::SetXattr(const std::string& xattr)
|
||||
{
|
||||
AutoLock auto_lock(&fdent_lock);
|
||||
orgmeta["x-amz-meta-xattr"] = xattr;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool FdEntity::SetMode(mode_t mode)
|
||||
{
|
||||
AutoLock auto_lock(&fdent_lock);
|
||||
@ -2171,6 +2192,7 @@ int FdEntity::NoCachePreMultipartPost()
|
||||
// initialize multipart upload values
|
||||
upload_id.erase();
|
||||
etaglist.clear();
|
||||
pending_headers.clear();
|
||||
|
||||
S3fsCurl s3fscurl(true);
|
||||
int result;
|
||||
@ -2377,6 +2399,11 @@ int FdEntity::RowFlush(const char* tpath, bool force_sync)
|
||||
// So the file has already been removed, skip error.
|
||||
S3FS_PRN_ERR("failed to truncate file(%d) to zero, but continue...", fd);
|
||||
}
|
||||
|
||||
// put pading headers
|
||||
if(0 != (result = UploadPendingMeta())){
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
if(0 == result){
|
||||
@ -2577,6 +2604,84 @@ ssize_t FdEntity::Write(const char* bytes, off_t start, size_t size)
|
||||
return wsize;
|
||||
}
|
||||
|
||||
// [NOTE]
|
||||
// Returns true if merged to orgmeta.
|
||||
// If true is returned, the caller can update the header.
|
||||
// If it is false, do not update the header because multipart upload is in progress.
|
||||
// In this case, the header is pending internally and is updated after the upload
|
||||
// is complete(flush file).
|
||||
//
|
||||
bool FdEntity::MergeOrgMeta(headers_t& updatemeta)
|
||||
{
|
||||
AutoLock auto_lock(&fdent_lock);
|
||||
|
||||
bool is_pending;
|
||||
if(upload_id.empty()){
|
||||
// merge update meta
|
||||
headers_t mergedmeta = orgmeta;
|
||||
|
||||
merge_headers(orgmeta, updatemeta, false); // overwrite existing keys only
|
||||
merge_headers(mergedmeta, updatemeta, true); // overwrite all keys
|
||||
updatemeta = mergedmeta; // swap
|
||||
|
||||
is_pending = false;
|
||||
}else{
|
||||
// could not update meta because uploading now, then put pending.
|
||||
pending_headers.push_back(updatemeta);
|
||||
is_pending = true;
|
||||
}
|
||||
return is_pending;
|
||||
}
|
||||
|
||||
// global function in s3fs.cpp
|
||||
int put_headers(const char* path, headers_t& meta, bool is_copy);
|
||||
|
||||
int FdEntity::UploadPendingMeta()
|
||||
{
|
||||
AutoLock auto_lock(&fdent_lock);
|
||||
|
||||
int result = 0;
|
||||
for(headers_list_t::const_iterator iter = pending_headers.begin(); iter != pending_headers.end(); ++iter){
|
||||
// [NOTE]
|
||||
// orgmeta will be updated sequentially.
|
||||
headers_t putmeta = orgmeta;
|
||||
merge_headers(putmeta, *iter, true); // overwrite all keys
|
||||
merge_headers(orgmeta, *iter, false); // overwrite existing keys only
|
||||
|
||||
// [NOTE]
|
||||
// this is special cases, we remove the key which has empty values.
|
||||
for(headers_t::iterator hiter = putmeta.begin(); hiter != putmeta.end(); ){
|
||||
if(hiter->second.empty()){
|
||||
if(orgmeta.end() != orgmeta.find(hiter->first)){
|
||||
orgmeta.erase(hiter->first);
|
||||
}
|
||||
putmeta.erase(hiter++);
|
||||
}else{
|
||||
++hiter;
|
||||
}
|
||||
}
|
||||
|
||||
// update ctime/mtime
|
||||
time_t updatetime = get_mtime((*iter), false); // not overcheck
|
||||
if(0 != updatetime){
|
||||
SetMtime(updatetime, true);
|
||||
}
|
||||
updatetime = get_ctime((*iter), false); // not overcheck
|
||||
if(0 != updatetime){
|
||||
SetCtime(updatetime, true);
|
||||
}
|
||||
|
||||
// put headers
|
||||
int one_result = put_headers(path.c_str(), putmeta, true);
|
||||
if(0 != one_result){
|
||||
S3FS_PRN_ERR("failed to put header after flushing file(%s) by(%d).", path.c_str(), one_result);
|
||||
result = one_result; // keep lastest result code
|
||||
}
|
||||
}
|
||||
pending_headers.clear();
|
||||
return result;
|
||||
}
|
||||
|
||||
//------------------------------------------------
|
||||
// FdManager symbol
|
||||
//------------------------------------------------
|
||||
|
@ -131,6 +131,8 @@ class PageList
|
||||
//------------------------------------------------
|
||||
// class FdEntity
|
||||
//------------------------------------------------
|
||||
typedef std::list<headers_t> headers_list_t;
|
||||
|
||||
class FdEntity
|
||||
{
|
||||
private:
|
||||
@ -155,6 +157,7 @@ class FdEntity
|
||||
std::string cachepath; // local cache file path
|
||||
// (if this is empty, does not load/save pagelist.)
|
||||
std::string mirrorpath; // mirror file path to local cache file path
|
||||
headers_list_t pending_headers;// pending update headers
|
||||
|
||||
private:
|
||||
static int FillFile(int fd, unsigned char byte, off_t size, off_t start);
|
||||
@ -164,8 +167,8 @@ class FdEntity
|
||||
ino_t GetInode(void);
|
||||
int OpenMirrorFile(void);
|
||||
bool SetAllStatus(bool is_loaded); // [NOTE] not locking
|
||||
//bool SetAllStatusLoaded(void) { return SetAllStatus(true); }
|
||||
bool SetAllStatusUnloaded(void) { return SetAllStatus(false); }
|
||||
int UploadPendingMeta(void);
|
||||
|
||||
public:
|
||||
static bool SetNoMixMultipart(void);
|
||||
@ -183,13 +186,16 @@ class FdEntity
|
||||
bool RenamePath(const std::string& newpath, std::string& fentmapkey);
|
||||
int GetFd(void) const { return fd; }
|
||||
bool IsModified(void) const { return pagelist.IsModified(); }
|
||||
bool MergeOrgMeta(headers_t& updatemeta);
|
||||
|
||||
bool GetStats(struct stat& st, bool lock_already_held = false);
|
||||
int SetCtime(time_t time);
|
||||
int SetCtime(time_t time, bool lock_already_held = false);
|
||||
int SetMtime(time_t time, bool lock_already_held = false);
|
||||
bool UpdateCtime(void);
|
||||
bool UpdateMtime(void);
|
||||
bool GetSize(off_t& size);
|
||||
bool GetXattr(std::string& xattr);
|
||||
bool SetXattr(const std::string& xattr);
|
||||
bool SetMode(mode_t mode);
|
||||
bool SetUId(uid_t uid);
|
||||
bool SetGId(gid_t gid);
|
||||
|
255
src/s3fs.cpp
255
src/s3fs.cpp
@ -180,7 +180,7 @@ static xmlChar* get_base_exp(xmlDocPtr doc, const char* exp);
|
||||
static xmlChar* get_prefix(xmlDocPtr doc);
|
||||
static xmlChar* get_next_marker(xmlDocPtr doc);
|
||||
static char* get_object_name(xmlDocPtr doc, xmlNodePtr node, const char* path);
|
||||
static int put_headers(const char* path, headers_t& meta, bool is_copy);
|
||||
int put_headers(const char* path, headers_t& meta, bool is_copy); // [NOTE] global function because this is called from FdEntity class
|
||||
static int rename_large_object(const char* from, const char* to);
|
||||
static int create_file_object(const char* path, mode_t mode, uid_t uid, gid_t gid);
|
||||
static int create_directory_object(const char* path, mode_t mode, time_t time, uid_t uid, gid_t gid);
|
||||
@ -771,7 +771,7 @@ static FdEntity* get_local_fent(const char* path, bool is_load)
|
||||
* ow_sse_flg is for over writing sse header by use_sse option.
|
||||
* @return fuse return code
|
||||
*/
|
||||
static int put_headers(const char* path, headers_t& meta, bool is_copy)
|
||||
int put_headers(const char* path, headers_t& meta, bool is_copy)
|
||||
{
|
||||
int result;
|
||||
S3fsCurl s3fscurl(true);
|
||||
@ -1640,26 +1640,41 @@ static int s3fs_chmod(const char* _path, mode_t mode)
|
||||
}
|
||||
}else{
|
||||
// normal object or directory object of newer version
|
||||
meta["x-amz-meta-ctime"] = str(time(NULL));
|
||||
meta["x-amz-meta-mode"] = str(mode);
|
||||
meta["x-amz-copy-source"] = urlEncode(service_path + bucket + get_realpath(strpath.c_str()));
|
||||
meta["x-amz-metadata-directive"] = "REPLACE";
|
||||
|
||||
if(put_headers(strpath.c_str(), meta, true) != 0){
|
||||
return -EIO;
|
||||
}
|
||||
StatCache::getStatCacheData()->DelStat(nowcache);
|
||||
headers_t updatemeta;
|
||||
updatemeta["x-amz-meta-ctime"] = str(time(NULL));
|
||||
updatemeta["x-amz-meta-mode"] = str(mode);
|
||||
updatemeta["x-amz-copy-source"] = urlEncode(service_path + bucket + get_realpath(strpath.c_str()));
|
||||
updatemeta["x-amz-metadata-directive"] = "REPLACE";
|
||||
|
||||
// check opened file handle.
|
||||
//
|
||||
// If we have already opened file handle, should set mode to it.
|
||||
// And new mode is set when the file handle is closed.
|
||||
// If the file starts uploading by multipart when the disk capacity is insufficient,
|
||||
// we need to put these header after finishing upload.
|
||||
// Or if the file is only open, we must update to FdEntity's internal meta.
|
||||
//
|
||||
FdEntity* ent;
|
||||
if(NULL != (ent = FdManager::get()->ExistOpen(path))){
|
||||
ent->UpdateCtime();
|
||||
ent->SetMode(mode); // Set new mode to opened fd.
|
||||
FdManager::get()->Close(ent);
|
||||
if(NULL != (ent = FdManager::get()->ExistOpen(path, -1, true))){
|
||||
// the file is opened now.
|
||||
if(ent->MergeOrgMeta(updatemeta)){
|
||||
// now uploading
|
||||
// the meta is pending and accumulated to be put after the upload is complete.
|
||||
S3FS_PRN_INFO("meta pending until upload is complete");
|
||||
}else{
|
||||
// allow to put header
|
||||
// updatemeta already merged the orgmeta of the opened files.
|
||||
if(0 != put_headers(strpath.c_str(), updatemeta, true)){
|
||||
return -EIO;
|
||||
}
|
||||
StatCache::getStatCacheData()->DelStat(nowcache);
|
||||
}
|
||||
|
||||
}else{
|
||||
// not opened file, then put headers
|
||||
merge_headers(meta, updatemeta, true);
|
||||
if(0 != put_headers(strpath.c_str(), meta, true)){
|
||||
return -EIO;
|
||||
}
|
||||
StatCache::getStatCacheData()->DelStat(nowcache);
|
||||
}
|
||||
}
|
||||
S3FS_MALLOCTRIM(0);
|
||||
@ -1802,16 +1817,43 @@ static int s3fs_chown(const char* _path, uid_t uid, gid_t gid)
|
||||
return result;
|
||||
}
|
||||
}else{
|
||||
meta["x-amz-meta-ctime"] = str(time(NULL));
|
||||
meta["x-amz-meta-uid"] = str(uid);
|
||||
meta["x-amz-meta-gid"] = str(gid);
|
||||
meta["x-amz-copy-source"] = urlEncode(service_path + bucket + get_realpath(strpath.c_str()));
|
||||
meta["x-amz-metadata-directive"] = "REPLACE";
|
||||
headers_t updatemeta;
|
||||
updatemeta["x-amz-meta-ctime"] = str(time(NULL));
|
||||
updatemeta["x-amz-meta-uid"] = str(uid);
|
||||
updatemeta["x-amz-meta-gid"] = str(gid);
|
||||
updatemeta["x-amz-copy-source"] = urlEncode(service_path + bucket + get_realpath(strpath.c_str()));
|
||||
updatemeta["x-amz-metadata-directive"] = "REPLACE";
|
||||
|
||||
if(put_headers(strpath.c_str(), meta, true) != 0){
|
||||
return -EIO;
|
||||
// check opened file handle.
|
||||
//
|
||||
// If the file starts uploading by multipart when the disk capacity is insufficient,
|
||||
// we need to put these header after finishing upload.
|
||||
// Or if the file is only open, we must update to FdEntity's internal meta.
|
||||
//
|
||||
FdEntity* ent;
|
||||
if(NULL != (ent = FdManager::get()->ExistOpen(path, -1, true))){
|
||||
// the file is opened now.
|
||||
if(ent->MergeOrgMeta(updatemeta)){
|
||||
// now uploading
|
||||
// the meta is pending and accumulated to be put after the upload is complete.
|
||||
S3FS_PRN_INFO("meta pending until upload is complete");
|
||||
}else{
|
||||
// allow to put header
|
||||
// updatemeta already merged the orgmeta of the opened files.
|
||||
if(0 != put_headers(strpath.c_str(), updatemeta, true)){
|
||||
return -EIO;
|
||||
}
|
||||
StatCache::getStatCacheData()->DelStat(nowcache);
|
||||
}
|
||||
|
||||
}else{
|
||||
// not opened file, then put headers
|
||||
merge_headers(meta, updatemeta, true);
|
||||
if(0 != put_headers(strpath.c_str(), meta, true)){
|
||||
return -EIO;
|
||||
}
|
||||
StatCache::getStatCacheData()->DelStat(nowcache);
|
||||
}
|
||||
StatCache::getStatCacheData()->DelStat(nowcache);
|
||||
}
|
||||
S3FS_MALLOCTRIM(0);
|
||||
|
||||
@ -1957,14 +1999,41 @@ static int s3fs_utimens(const char* _path, const struct timespec ts[2])
|
||||
return result;
|
||||
}
|
||||
}else{
|
||||
meta["x-amz-meta-mtime"] = str(ts[1].tv_sec);
|
||||
meta["x-amz-copy-source"] = urlEncode(service_path + bucket + get_realpath(strpath.c_str()));
|
||||
meta["x-amz-metadata-directive"] = "REPLACE";
|
||||
headers_t updatemeta;
|
||||
updatemeta["x-amz-meta-mtime"] = str(ts[1].tv_sec);
|
||||
updatemeta["x-amz-copy-source"] = urlEncode(service_path + bucket + get_realpath(strpath.c_str()));
|
||||
updatemeta["x-amz-metadata-directive"] = "REPLACE";
|
||||
|
||||
if(put_headers(strpath.c_str(), meta, true) != 0){
|
||||
return -EIO;
|
||||
// check opened file handle.
|
||||
//
|
||||
// If the file starts uploading by multipart when the disk capacity is insufficient,
|
||||
// we need to put these header after finishing upload.
|
||||
// Or if the file is only open, we must update to FdEntity's internal meta.
|
||||
//
|
||||
FdEntity* ent;
|
||||
if(NULL != (ent = FdManager::get()->ExistOpen(path, -1, true))){
|
||||
// the file is opened now.
|
||||
if(ent->MergeOrgMeta(updatemeta)){
|
||||
// now uploading
|
||||
// the meta is pending and accumulated to be put after the upload is complete.
|
||||
S3FS_PRN_INFO("meta pending until upload is complete");
|
||||
}else{
|
||||
// allow to put header
|
||||
// updatemeta already merged the orgmeta of the opened files.
|
||||
if(0 != put_headers(strpath.c_str(), updatemeta, true)){
|
||||
return -EIO;
|
||||
}
|
||||
StatCache::getStatCacheData()->DelStat(nowcache);
|
||||
}
|
||||
|
||||
}else{
|
||||
// not opened file, then put headers
|
||||
merge_headers(meta, updatemeta, true);
|
||||
if(0 != put_headers(strpath.c_str(), meta, true)){
|
||||
return -EIO;
|
||||
}
|
||||
StatCache::getStatCacheData()->DelStat(nowcache);
|
||||
}
|
||||
StatCache::getStatCacheData()->DelStat(nowcache);
|
||||
}
|
||||
S3FS_MALLOCTRIM(0);
|
||||
|
||||
@ -3152,11 +3221,6 @@ static int s3fs_setxattr(const char* path, const char* name, const char* value,
|
||||
return result;
|
||||
}
|
||||
|
||||
// make new header_t
|
||||
if(0 != (result = set_xattrs_to_header(meta, name, value, size, flags))){
|
||||
return result;
|
||||
}
|
||||
|
||||
if(S_ISDIR(stbuf.st_mode) && IS_REPLACEDIR(nDirType)){
|
||||
// Should rebuild directory object(except new type)
|
||||
// Need to remove old dir("dir" etc) and make new dir("dir/")
|
||||
@ -3178,14 +3242,62 @@ static int s3fs_setxattr(const char* path, const char* name, const char* value,
|
||||
}
|
||||
|
||||
// set xattr all object
|
||||
meta["x-amz-meta-ctime"] = str(time(NULL));
|
||||
meta["x-amz-copy-source"] = urlEncode(service_path + bucket + get_realpath(strpath.c_str()));
|
||||
meta["x-amz-metadata-directive"] = "REPLACE";
|
||||
headers_t updatemeta;
|
||||
updatemeta["x-amz-meta-ctime"] = str(time(NULL));
|
||||
updatemeta["x-amz-copy-source"] = urlEncode(service_path + bucket + get_realpath(strpath.c_str()));
|
||||
updatemeta["x-amz-metadata-directive"] = "REPLACE";
|
||||
|
||||
if(0 != put_headers(strpath.c_str(), meta, true)){
|
||||
return -EIO;
|
||||
// check opened file handle.
|
||||
//
|
||||
// If the file starts uploading by multipart when the disk capacity is insufficient,
|
||||
// we need to put these header after finishing upload.
|
||||
// Or if the file is only open, we must update to FdEntity's internal meta.
|
||||
//
|
||||
FdEntity* ent;
|
||||
if(NULL != (ent = FdManager::get()->ExistOpen(path, -1, true))){
|
||||
// the file is opened now.
|
||||
|
||||
// get xattr and make new xattr
|
||||
string strxattr;
|
||||
if(ent->GetXattr(strxattr)){
|
||||
updatemeta["x-amz-meta-xattr"] = strxattr;
|
||||
}else{
|
||||
// [NOTE]
|
||||
// Set an empty xattr.
|
||||
// This requires the key to be present in order to add xattr.
|
||||
ent->SetXattr(strxattr);
|
||||
}
|
||||
if(0 != (result = set_xattrs_to_header(updatemeta, name, value, size, flags))){
|
||||
return result;
|
||||
}
|
||||
|
||||
if(ent->MergeOrgMeta(updatemeta)){
|
||||
// now uploading
|
||||
// the meta is pending and accumulated to be put after the upload is complete.
|
||||
S3FS_PRN_INFO("meta pending until upload is complete");
|
||||
}else{
|
||||
// allow to put header
|
||||
// updatemeta already merged the orgmeta of the opened files.
|
||||
if(0 != put_headers(strpath.c_str(), updatemeta, true)){
|
||||
return -EIO;
|
||||
}
|
||||
StatCache::getStatCacheData()->DelStat(nowcache);
|
||||
}
|
||||
|
||||
}else{
|
||||
// not opened file, then put headers
|
||||
merge_headers(meta, updatemeta, true);
|
||||
|
||||
// NOTICE: modify xattr from base meta
|
||||
if(0 != (result = set_xattrs_to_header(meta, name, value, size, flags))){
|
||||
return result;
|
||||
}
|
||||
|
||||
if(0 != put_headers(strpath.c_str(), meta, true)){
|
||||
return -EIO;
|
||||
}
|
||||
StatCache::getStatCacheData()->DelStat(nowcache);
|
||||
}
|
||||
StatCache::getStatCacheData()->DelStat(nowcache);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -3394,13 +3506,6 @@ static int s3fs_removexattr(const char* path, const char* name)
|
||||
delete xiter->second;
|
||||
xattrs.erase(xiter);
|
||||
|
||||
// build new xattr
|
||||
if(!xattrs.empty()){
|
||||
meta["x-amz-meta-xattr"] = build_xattrs(xattrs);
|
||||
}else{
|
||||
meta.erase("x-amz-meta-xattr");
|
||||
}
|
||||
|
||||
if(S_ISDIR(stbuf.st_mode) && IS_REPLACEDIR(nDirType)){
|
||||
// Should rebuild directory object(except new type)
|
||||
// Need to remove old dir("dir" etc) and make new dir("dir/")
|
||||
@ -3423,17 +3528,53 @@ static int s3fs_removexattr(const char* path, const char* name)
|
||||
}
|
||||
|
||||
// set xattr all object
|
||||
meta["x-amz-copy-source"] = urlEncode(service_path + bucket + get_realpath(strpath.c_str()));
|
||||
meta["x-amz-metadata-directive"] = "REPLACE";
|
||||
|
||||
if(0 != put_headers(strpath.c_str(), meta, true)){
|
||||
free_xattrs(xattrs);
|
||||
return -EIO;
|
||||
headers_t updatemeta;
|
||||
updatemeta["x-amz-copy-source"] = urlEncode(service_path + bucket + get_realpath(strpath.c_str()));
|
||||
updatemeta["x-amz-metadata-directive"] = "REPLACE";
|
||||
if(!xattrs.empty()){
|
||||
updatemeta["x-amz-meta-xattr"] = build_xattrs(xattrs);
|
||||
}else{
|
||||
updatemeta["x-amz-meta-xattr"] = string(""); // This is a special case. If empty, this header will eventually be removed.
|
||||
}
|
||||
StatCache::getStatCacheData()->DelStat(nowcache);
|
||||
|
||||
free_xattrs(xattrs);
|
||||
|
||||
// check opened file handle.
|
||||
//
|
||||
// If the file starts uploading by multipart when the disk capacity is insufficient,
|
||||
// we need to put these header after finishing upload.
|
||||
// Or if the file is only open, we must update to FdEntity's internal meta.
|
||||
//
|
||||
FdEntity* ent;
|
||||
if(NULL != (ent = FdManager::get()->ExistOpen(path, -1, true))){
|
||||
// the file is opened now.
|
||||
if(ent->MergeOrgMeta(updatemeta)){
|
||||
// now uploading
|
||||
// the meta is pending and accumulated to be put after the upload is complete.
|
||||
S3FS_PRN_INFO("meta pending until upload is complete");
|
||||
}else{
|
||||
// allow to put header
|
||||
// updatemeta already merged the orgmeta of the opened files.
|
||||
if(updatemeta["x-amz-meta-xattr"].empty()){
|
||||
updatemeta.erase("x-amz-meta-xattr");
|
||||
}
|
||||
if(0 != put_headers(strpath.c_str(), updatemeta, true)){
|
||||
return -EIO;
|
||||
}
|
||||
StatCache::getStatCacheData()->DelStat(nowcache);
|
||||
}
|
||||
|
||||
}else{
|
||||
// not opened file, then put headers
|
||||
if(updatemeta["x-amz-meta-xattr"].empty()){
|
||||
updatemeta.erase("x-amz-meta-xattr");
|
||||
}
|
||||
merge_headers(meta, updatemeta, true);
|
||||
if(0 != put_headers(strpath.c_str(), meta, true)){
|
||||
return -EIO;
|
||||
}
|
||||
StatCache::getStatCacheData()->DelStat(nowcache);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -801,7 +801,7 @@ time_t get_mtime(const char *str)
|
||||
return static_cast<time_t>(cvt_strtoofft(strmtime.c_str()));
|
||||
}
|
||||
|
||||
static time_t get_time(headers_t& meta, const char *header)
|
||||
static time_t get_time(const headers_t& meta, const char *header)
|
||||
{
|
||||
headers_t::const_iterator iter;
|
||||
if(meta.end() == (iter = meta.find(header))){
|
||||
@ -810,7 +810,7 @@ static time_t get_time(headers_t& meta, const char *header)
|
||||
return get_mtime((*iter).second.c_str());
|
||||
}
|
||||
|
||||
time_t get_mtime(headers_t& meta, bool overcheck)
|
||||
time_t get_mtime(const headers_t& meta, bool overcheck)
|
||||
{
|
||||
time_t t = get_time(meta, "x-amz-meta-mtime");
|
||||
if(t != 0){
|
||||
@ -826,7 +826,7 @@ time_t get_mtime(headers_t& meta, bool overcheck)
|
||||
return 0;
|
||||
}
|
||||
|
||||
time_t get_ctime(headers_t& meta, bool overcheck)
|
||||
time_t get_ctime(const headers_t& meta, bool overcheck)
|
||||
{
|
||||
time_t t = get_time(meta, "x-amz-meta-ctime");
|
||||
if(t != 0){
|
||||
@ -843,7 +843,7 @@ off_t get_size(const char *s)
|
||||
return cvt_strtoofft(s);
|
||||
}
|
||||
|
||||
off_t get_size(headers_t& meta)
|
||||
off_t get_size(const headers_t& meta)
|
||||
{
|
||||
headers_t::const_iterator iter = meta.find("Content-Length");
|
||||
if(meta.end() == iter){
|
||||
@ -857,7 +857,7 @@ mode_t get_mode(const char *s, int base)
|
||||
return static_cast<mode_t>(cvt_strtoofft(s, base));
|
||||
}
|
||||
|
||||
mode_t get_mode(headers_t& meta, const char* path, bool checkdir, bool forcedir)
|
||||
mode_t get_mode(const headers_t& meta, const char* path, bool checkdir, bool forcedir)
|
||||
{
|
||||
mode_t mode = 0;
|
||||
bool isS3sync = false;
|
||||
@ -941,7 +941,7 @@ uid_t get_uid(const char *s)
|
||||
return static_cast<uid_t>(cvt_strtoofft(s));
|
||||
}
|
||||
|
||||
uid_t get_uid(headers_t& meta)
|
||||
uid_t get_uid(const headers_t& meta)
|
||||
{
|
||||
headers_t::const_iterator iter;
|
||||
if(meta.end() != (iter = meta.find("x-amz-meta-uid"))){
|
||||
@ -960,7 +960,7 @@ gid_t get_gid(const char *s)
|
||||
return static_cast<gid_t>(cvt_strtoofft(s));
|
||||
}
|
||||
|
||||
gid_t get_gid(headers_t& meta)
|
||||
gid_t get_gid(const headers_t& meta)
|
||||
{
|
||||
headers_t::const_iterator iter;
|
||||
if(meta.end() != (iter = meta.find("x-amz-meta-gid"))){
|
||||
@ -1001,7 +1001,7 @@ time_t get_lastmodified(const char* s)
|
||||
return timegm(&tm); // GMT
|
||||
}
|
||||
|
||||
time_t get_lastmodified(headers_t& meta)
|
||||
time_t get_lastmodified(const headers_t& meta)
|
||||
{
|
||||
headers_t::const_iterator iter = meta.find("Last-Modified");
|
||||
if(meta.end() == iter){
|
||||
@ -1015,7 +1015,7 @@ time_t get_lastmodified(headers_t& meta)
|
||||
// If this function returns true, the object is possible to be directory
|
||||
// and is needed checking detail(searching sub object).
|
||||
//
|
||||
bool is_need_check_obj_detail(headers_t& meta)
|
||||
bool is_need_check_obj_detail(const headers_t& meta)
|
||||
{
|
||||
headers_t::const_iterator iter;
|
||||
|
||||
@ -1045,6 +1045,21 @@ bool is_need_check_obj_detail(headers_t& meta)
|
||||
return true;
|
||||
}
|
||||
|
||||
// [NOTE]
|
||||
// If add_noexist is false and the key does not exist, it will not be added.
|
||||
//
|
||||
bool merge_headers(headers_t& base, const headers_t& additional, bool add_noexist)
|
||||
{
|
||||
bool added = false;
|
||||
for(headers_t::const_iterator iter = additional.begin(); iter != additional.end(); ++iter){
|
||||
if(add_noexist || base.find(iter->first) != base.end()){
|
||||
base[iter->first] = iter->second;
|
||||
added = true;
|
||||
}
|
||||
}
|
||||
return added;
|
||||
}
|
||||
|
||||
bool simple_parse_xml(const char* data, size_t len, const char* key, std::string& value)
|
||||
{
|
||||
bool result = false;
|
||||
|
@ -126,21 +126,22 @@ bool delete_files_in_dir(const char* dir, bool is_remove_own);
|
||||
bool compare_sysname(const char* target);
|
||||
|
||||
time_t get_mtime(const char *s);
|
||||
time_t get_mtime(headers_t& meta, bool overcheck = true);
|
||||
time_t get_ctime(headers_t& meta, bool overcheck = true);
|
||||
time_t get_mtime(const headers_t& meta, bool overcheck = true);
|
||||
time_t get_ctime(const headers_t& meta, bool overcheck = true);
|
||||
off_t get_size(const char *s);
|
||||
off_t get_size(headers_t& meta);
|
||||
off_t get_size(const headers_t& meta);
|
||||
mode_t get_mode(const char *s, int base = 0);
|
||||
mode_t get_mode(headers_t& meta, const char* path = NULL, bool checkdir = false, bool forcedir = false);
|
||||
mode_t get_mode(const headers_t& meta, const char* path = NULL, bool checkdir = false, bool forcedir = false);
|
||||
uid_t get_uid(const char *s);
|
||||
uid_t get_uid(headers_t& meta);
|
||||
uid_t get_uid(const headers_t& meta);
|
||||
gid_t get_gid(const char *s);
|
||||
gid_t get_gid(headers_t& meta);
|
||||
gid_t get_gid(const headers_t& meta);
|
||||
blkcnt_t get_blocks(off_t size);
|
||||
time_t cvtIAMExpireStringToTime(const char* s);
|
||||
time_t get_lastmodified(const char* s);
|
||||
time_t get_lastmodified(headers_t& meta);
|
||||
bool is_need_check_obj_detail(headers_t& meta);
|
||||
time_t get_lastmodified(const headers_t& meta);
|
||||
bool is_need_check_obj_detail(const headers_t& meta);
|
||||
bool merge_headers(headers_t& base, const headers_t& additional, bool add_noexist);
|
||||
bool simple_parse_xml(const char* data, size_t len, const char* key, std::string& value);
|
||||
|
||||
void show_usage(void);
|
||||
|
@ -933,6 +933,9 @@ function test_ut_ossfs {
|
||||
}
|
||||
|
||||
function add_all_tests {
|
||||
if `ps -ef | grep -v grep | grep s3fs | grep -q use_cache`; then
|
||||
add_tests test_cache_file_stat
|
||||
fi
|
||||
if ! ps u $S3FS_PID | grep -q ensure_diskfree && ! uname | grep -q Darwin; then
|
||||
add_tests test_clean_up_cache
|
||||
fi
|
||||
@ -978,9 +981,6 @@ function add_all_tests {
|
||||
add_tests test_upload_sparsefile
|
||||
add_tests test_mix_upload_entities
|
||||
add_tests test_ut_ossfs
|
||||
if `ps -ef | grep -v grep | grep s3fs | grep -q use_cache`; then
|
||||
add_tests test_cache_file_stat
|
||||
fi
|
||||
}
|
||||
|
||||
init_suite
|
||||
|
Loading…
Reference in New Issue
Block a user