mirror of
https://github.com/s3fs-fuse/s3fs-fuse.git
synced 2025-02-02 10:38:25 +00:00
Fixed bugs and Changed utility mode
1) Fixed bugs * Rename objects Fixes s3fs specifies wrong part number of multipart rename. And s3fs adds x-amz-acl and x-amz-server-side-encryption header when rename objects. 2) Changed retry logic for multipart uploading(and renaming) Sometimes, s3fs gets 400 HTTP response for one of part from S3 when s3fs uploads a large object by multipart. New logic retries uploading failed part until "retries" option count. 3) Added action on utility mode. s3fs have had utility mode for displaying the result of REST listing multipart uploading. Changed this row result(xml) to list, after that, s3fs starts conversation for removing it. Then you can remove the object which is failed uploading by multipart, and do not need to pay for that ever. git-svn-id: http://s3fs.googlecode.com/svn/trunk@493 df820570-a93a-0410-bd06-b72b767a4274
This commit is contained in:
parent
1bae39e21f
commit
09fc2593e3
141
src/curl.cpp
141
src/curl.cpp
@ -849,7 +849,6 @@ S3fsCurl* S3fsCurl::UploadMultipartPostRetryCallback(S3fsCurl* s3fscurl)
|
||||
if(!s3fscurl){
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// parse and get part_num, upload_id.
|
||||
string upload_id;
|
||||
string part_num_str;
|
||||
@ -862,6 +861,11 @@ S3fsCurl* S3fsCurl::UploadMultipartPostRetryCallback(S3fsCurl* s3fscurl)
|
||||
}
|
||||
part_num = atoi(part_num_str.c_str());
|
||||
|
||||
if(s3fscurl->retry_count >= S3fsCurl::retries){
|
||||
DPRN("Over retry count(%d) limit(%s:%d).", s3fscurl->retry_count, s3fscurl->path.c_str(), part_num);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// duplicate request
|
||||
S3fsCurl* newcurl = new S3fsCurl(s3fscurl->IsUseAhbe());
|
||||
newcurl->partdata.etaglist = s3fscurl->partdata.etaglist;
|
||||
@ -871,6 +875,7 @@ S3fsCurl* S3fsCurl::UploadMultipartPostRetryCallback(S3fsCurl* s3fscurl)
|
||||
newcurl->partdata.size = s3fscurl->b_partdata_size;
|
||||
newcurl->b_partdata_startpos = s3fscurl->b_partdata_startpos;
|
||||
newcurl->b_partdata_size = s3fscurl->b_partdata_size;
|
||||
newcurl->retry_count = s3fscurl->retry_count + 1;
|
||||
|
||||
// setup new curl object
|
||||
if(0 != newcurl->UploadMultipartPostSetup(s3fscurl->path.c_str(), part_num, upload_id)){
|
||||
@ -978,6 +983,11 @@ S3fsCurl* S3fsCurl::ParallelGetObjectRetryCallback(S3fsCurl* s3fscurl)
|
||||
if(!s3fscurl){
|
||||
return NULL;
|
||||
}
|
||||
if(s3fscurl->retry_count >= S3fsCurl::retries){
|
||||
DPRN("Over retry count(%d) limit(%s).", s3fscurl->retry_count, s3fscurl->path.c_str());
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// duplicate request(setup new curl object)
|
||||
S3fsCurl* newcurl = new S3fsCurl(s3fscurl->IsUseAhbe());
|
||||
if(0 != (result = newcurl->PreGetObjectRequest(
|
||||
@ -986,6 +996,8 @@ S3fsCurl* S3fsCurl::ParallelGetObjectRetryCallback(S3fsCurl* s3fscurl)
|
||||
delete newcurl;
|
||||
return NULL;;
|
||||
}
|
||||
newcurl->retry_count = s3fscurl->retry_count + 1;
|
||||
|
||||
return newcurl;
|
||||
}
|
||||
|
||||
@ -1121,7 +1133,7 @@ bool S3fsCurl::CheckIAMCredentialUpdate(void)
|
||||
S3fsCurl::S3fsCurl(bool ahbe) :
|
||||
hCurl(NULL), path(""), base_path(""), saved_path(""), url(""), requestHeaders(NULL),
|
||||
bodydata(NULL), headdata(NULL), LastResponseCode(-1), postdata(NULL), postdata_remaining(0), is_use_ahbe(ahbe),
|
||||
b_infile(NULL), b_postdata(NULL), b_postdata_remaining(0), b_partdata_startpos(0), b_partdata_size(0)
|
||||
retry_count(0), b_infile(NULL), b_postdata(NULL), b_postdata_remaining(0), b_partdata_startpos(0), b_partdata_size(0)
|
||||
{
|
||||
type = REQTYPE_UNSET;
|
||||
}
|
||||
@ -1235,6 +1247,9 @@ bool S3fsCurl::ClearInternalData(void)
|
||||
LastResponseCode = -1;
|
||||
postdata = NULL;
|
||||
postdata_remaining = 0;
|
||||
#ifndef OR_CORE_RETRY
|
||||
retry_count = 0;
|
||||
#endif
|
||||
b_infile = NULL;
|
||||
b_postdata = NULL;
|
||||
b_postdata_remaining = 0;
|
||||
@ -1298,6 +1313,9 @@ bool S3fsCurl::RemakeHandle(void)
|
||||
}
|
||||
LastResponseCode = -1;
|
||||
|
||||
// count up(only use for multipart)
|
||||
retry_count++;
|
||||
|
||||
// set from backup
|
||||
postdata = b_postdata;
|
||||
postdata_remaining = b_postdata_remaining;
|
||||
@ -1427,6 +1445,12 @@ bool S3fsCurl::RemakeHandle(void)
|
||||
curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback);
|
||||
break;
|
||||
|
||||
case REQTYPE_ABORTMULTIUPLOAD:
|
||||
curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str());
|
||||
curl_easy_setopt(hCurl, CURLOPT_CUSTOMREQUEST, "DELETE");
|
||||
curl_easy_setopt(hCurl, CURLOPT_HTTPHEADER, requestHeaders);
|
||||
break;
|
||||
|
||||
default:
|
||||
DPRNNN("request type is unknown(%d)", type);
|
||||
return false;
|
||||
@ -2516,6 +2540,45 @@ int S3fsCurl::MultipartListRequest(string& body)
|
||||
return result;
|
||||
}
|
||||
|
||||
int S3fsCurl::AbortMultipartUpload(const char* tpath, string& upload_id)
|
||||
{
|
||||
FPRNNN("[tpath=%s]", SAFESTRPTR(tpath));
|
||||
|
||||
if(!tpath){
|
||||
return -1;
|
||||
}
|
||||
if(!CreateCurlHandle(true)){
|
||||
return -1;
|
||||
}
|
||||
string resource;
|
||||
string turl;
|
||||
MakeUrlResource(get_realpath(tpath).c_str(), resource, turl);
|
||||
|
||||
turl += "?uploadId=" + upload_id;
|
||||
resource += "?uploadId=" + upload_id;
|
||||
url = prepare_url(turl.c_str());
|
||||
path = tpath;
|
||||
requestHeaders = NULL;
|
||||
responseHeaders.clear();
|
||||
|
||||
string date = get_date();
|
||||
requestHeaders = curl_slist_sort_insert(requestHeaders, string("Date: " + date).c_str());
|
||||
if(!S3fsCurl::IsPublicBucket()){
|
||||
requestHeaders = curl_slist_sort_insert(
|
||||
requestHeaders,
|
||||
string("Authorization: AWS " + AWSAccessKeyId + ":" +
|
||||
CalcSignature("DELETE", "", "", date, resource)).c_str());
|
||||
}
|
||||
|
||||
curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str());
|
||||
curl_easy_setopt(hCurl, CURLOPT_CUSTOMREQUEST, "DELETE");
|
||||
curl_easy_setopt(hCurl, CURLOPT_HTTPHEADER, requestHeaders);
|
||||
|
||||
type = REQTYPE_ABORTMULTIUPLOAD;
|
||||
|
||||
return RequestPerform();
|
||||
}
|
||||
|
||||
//
|
||||
// PUT /ObjectName?partNumber=PartNumber&uploadId=UploadId HTTP/1.1
|
||||
// Host: BucketName.s3.amazonaws.com
|
||||
@ -2614,6 +2677,7 @@ int S3fsCurl::UploadMultipartPostRequest(const char* tpath, int part_num, string
|
||||
result = -1;
|
||||
}
|
||||
}
|
||||
|
||||
// closing
|
||||
delete bodydata;
|
||||
bodydata = NULL;
|
||||
@ -2623,7 +2687,7 @@ int S3fsCurl::UploadMultipartPostRequest(const char* tpath, int part_num, string
|
||||
return result;
|
||||
}
|
||||
|
||||
int S3fsCurl::CopyMultipartPostRequest(const char* from, const char* to, int part_num, string& upload_id, headers_t& meta, bool ow_sse_flg)
|
||||
int S3fsCurl::CopyMultipartPostRequest(const char* from, const char* to, int part_num, string& upload_id, headers_t& meta)
|
||||
{
|
||||
FPRNNN("[from=%s][to=%s][part=%d]", SAFESTRPTR(from), SAFESTRPTR(to), part_num);
|
||||
|
||||
@ -2662,24 +2726,8 @@ int S3fsCurl::CopyMultipartPostRequest(const char* from, const char* to, int par
|
||||
requestHeaders = curl_slist_sort_insert(requestHeaders, string(key + ":" + value).c_str());
|
||||
}else if(key == "x-amz-copy-source-range"){
|
||||
requestHeaders = curl_slist_sort_insert(requestHeaders, string(key + ":" + value).c_str());
|
||||
}else if(key.substr(0,9) == "x-amz-acl"){
|
||||
// not set value, but after set it.
|
||||
}else if(!ow_sse_flg && key == "x-amz-server-side-encryption"){
|
||||
// If ow_sse_flg is false, SSE inherit from meta.
|
||||
requestHeaders = curl_slist_sort_insert(requestHeaders, string(key + ":" + value).c_str());
|
||||
}
|
||||
}
|
||||
// "x-amz-acl", rrs, sse
|
||||
requestHeaders = curl_slist_sort_insert(requestHeaders, string("x-amz-acl:" + S3fsCurl::default_acl).c_str());
|
||||
if(S3fsCurl::is_use_rrs){
|
||||
requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-storage-class:REDUCED_REDUNDANCY");
|
||||
}
|
||||
if(ow_sse_flg && S3fsCurl::is_use_sse){
|
||||
requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-server-side-encryption:AES256");
|
||||
}
|
||||
if(is_use_ahbe){
|
||||
// set additional header by ahbe conf
|
||||
requestHeaders = AdditionalHeader::get()->AddHeader(requestHeaders, to);
|
||||
// NOTICE: x-amz-acl, x-amz-server-side-encryption is not set!
|
||||
}
|
||||
if(!S3fsCurl::IsPublicBucket()){
|
||||
requestHeaders = curl_slist_sort_insert(
|
||||
@ -2719,7 +2767,7 @@ int S3fsCurl::CopyMultipartPostRequest(const char* from, const char* to, int par
|
||||
return result;
|
||||
}
|
||||
|
||||
int S3fsCurl::MultipartHeadRequest(const char* tpath, off_t size, headers_t& meta, bool ow_sse_flg)
|
||||
int S3fsCurl::MultipartHeadRequest(const char* tpath, off_t size, headers_t& meta)
|
||||
{
|
||||
int result;
|
||||
string upload_id;
|
||||
@ -2730,7 +2778,7 @@ int S3fsCurl::MultipartHeadRequest(const char* tpath, off_t size, headers_t& met
|
||||
|
||||
FPRNNN("[tpath=%s]", SAFESTRPTR(tpath));
|
||||
|
||||
if(0 != (result = PreMultipartPostRequest(tpath, meta, upload_id, ow_sse_flg))){
|
||||
if(0 != (result = PreMultipartPostRequest(tpath, meta, upload_id, false))){
|
||||
return result;
|
||||
}
|
||||
DestroyCurlHandle();
|
||||
@ -2740,9 +2788,10 @@ int S3fsCurl::MultipartHeadRequest(const char* tpath, off_t size, headers_t& met
|
||||
|
||||
strrange << "bytes=" << (size - bytes_remaining) << "-" << (size - bytes_remaining + chunk - 1);
|
||||
meta["x-amz-copy-source-range"] = strrange.str();
|
||||
strrange.str("");
|
||||
strrange.clear(stringstream::goodbit);
|
||||
|
||||
if(0 != (result = CopyMultipartPostRequest(tpath, tpath, (list.size() + 1), upload_id, meta, ow_sse_flg))){
|
||||
if(0 != (result = CopyMultipartPostRequest(tpath, tpath, (list.size() + 1), upload_id, meta))){
|
||||
return result;
|
||||
}
|
||||
list.push_back(partdata.etag);
|
||||
@ -2844,9 +2893,10 @@ int S3fsCurl::MultipartRenameRequest(const char* from, const char* to, headers_t
|
||||
|
||||
strrange << "bytes=" << (size - bytes_remaining) << "-" << (size - bytes_remaining + chunk - 1);
|
||||
meta["x-amz-copy-source-range"] = strrange.str();
|
||||
strrange.str("");
|
||||
strrange.clear(stringstream::goodbit);
|
||||
|
||||
if(0 != (result = CopyMultipartPostRequest(from, to, list.size(), upload_id, meta, false))){
|
||||
if(0 != (result = CopyMultipartPostRequest(from, to, (list.size() + 1), upload_id, meta))){
|
||||
return result;
|
||||
}
|
||||
list.push_back(partdata.etag);
|
||||
@ -3030,17 +3080,32 @@ int S3fsMultiCurl::MultiRead(void)
|
||||
retrycurl= NULL;
|
||||
|
||||
if(s3fscurl){
|
||||
bool isRetry = false;
|
||||
if(CURLE_OK == msg->data.result){
|
||||
long responseCode = -1;
|
||||
if(s3fscurl->GetResponseCode(responseCode) && 400 > responseCode){
|
||||
// add into stat cache
|
||||
if(SuccessCallback && !SuccessCallback(s3fscurl)){
|
||||
DPRNNN("error from callback function(%s).", s3fscurl->base_path.c_str());
|
||||
if(s3fscurl->GetResponseCode(responseCode)){
|
||||
if(400 > responseCode){
|
||||
// add into stat cache
|
||||
if(SuccessCallback && !SuccessCallback(s3fscurl)){
|
||||
DPRNNN("error from callback function(%s).", s3fscurl->base_path.c_str());
|
||||
}
|
||||
}else if(400 == responseCode){
|
||||
// as possibly in multipart
|
||||
DPRNNN("failed a request(%ld: %s)", responseCode, s3fscurl->base_path.c_str());
|
||||
isRetry = true;
|
||||
}else{
|
||||
DPRNNN("failed a request(%ld: %s)", responseCode, s3fscurl->base_path.c_str());
|
||||
}
|
||||
}else{
|
||||
// This case is directory object("dir", "non dir object", "_$folder$", etc)
|
||||
DPRNNN("failed a request(%ld: %s)", responseCode, s3fscurl->base_path.c_str());
|
||||
DPRNNN("failed a request(Unknown respons code: %s)", s3fscurl->base_path.c_str());
|
||||
}
|
||||
}else{
|
||||
DPRNNN("failed to read(remaining: %d code: %d msg: %s), so retry this.",
|
||||
remaining_messages, msg->data.result, curl_easy_strerror(msg->data.result));
|
||||
isRetry = true;
|
||||
}
|
||||
|
||||
if(!isRetry){
|
||||
cMap_req.erase(hCurl);
|
||||
curl_multi_remove_handle(hMulti, hCurl);
|
||||
|
||||
@ -3048,9 +3113,6 @@ int S3fsMultiCurl::MultiRead(void)
|
||||
delete s3fscurl;
|
||||
|
||||
}else{
|
||||
DPRNNN("failed to read(remaining: %d code: %d msg: %s), so retry this.",
|
||||
remaining_messages, msg->data.result, curl_easy_strerror(msg->data.result));
|
||||
|
||||
cMap_req.erase(hCurl);
|
||||
curl_multi_remove_handle(hMulti, hCurl);
|
||||
|
||||
@ -3433,8 +3495,19 @@ struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* d
|
||||
}
|
||||
new_item->next = NULL;
|
||||
|
||||
string strnew = data;
|
||||
string::size_type pos = strnew.find(':', 0);
|
||||
if(string::npos != pos){
|
||||
strnew = strnew.substr(0, pos);
|
||||
}
|
||||
|
||||
for(lastpos = NULL, curpos = list; curpos; curpos = curpos->next){
|
||||
int result = strcmp(data, curpos->data);
|
||||
string strcur = curpos->data;
|
||||
if(string::npos != (pos = strcur.find(':', 0))){
|
||||
strcur = strcur.substr(0, pos);
|
||||
}
|
||||
|
||||
int result = strcmp(strnew.c_str(), strcur.c_str());
|
||||
if(0 == result){
|
||||
// same data, so replace it.
|
||||
if(lastpos){
|
||||
|
12
src/curl.h
12
src/curl.h
@ -133,7 +133,8 @@ class S3fsCurl
|
||||
REQTYPE_UPLOADMULTIPOST,
|
||||
REQTYPE_COPYMULTIPOST,
|
||||
REQTYPE_MULTILIST,
|
||||
REQTYPE_IAMCRED
|
||||
REQTYPE_IAMCRED,
|
||||
REQTYPE_ABORTMULTIUPLOAD
|
||||
};
|
||||
|
||||
// class variables
|
||||
@ -182,6 +183,7 @@ class S3fsCurl
|
||||
int postdata_remaining; // use by post method and read callback function.
|
||||
filepart partdata; // use by multipart upload/get object callback
|
||||
bool is_use_ahbe; // additional header by extension
|
||||
int retry_count; // retry count for multipart
|
||||
FILE* b_infile; // backup for retrying
|
||||
const unsigned char* b_postdata; // backup for retrying
|
||||
int b_postdata_remaining; // backup for retrying
|
||||
@ -237,7 +239,7 @@ class S3fsCurl
|
||||
int CompleteMultipartPostRequest(const char* tpath, std::string& upload_id, etaglist_t& parts);
|
||||
int UploadMultipartPostSetup(const char* tpath, int part_num, std::string& upload_id);
|
||||
int UploadMultipartPostRequest(const char* tpath, int part_num, std::string& upload_id);
|
||||
int CopyMultipartPostRequest(const char* from, const char* to, int part_num, std::string& upload_id, headers_t& meta, bool ow_sse_flg);
|
||||
int CopyMultipartPostRequest(const char* from, const char* to, int part_num, std::string& upload_id, headers_t& meta);
|
||||
|
||||
public:
|
||||
// class methods
|
||||
@ -294,7 +296,8 @@ class S3fsCurl
|
||||
int CheckBucket(void);
|
||||
int ListBucketRequest(const char* tpath, const char* query);
|
||||
int MultipartListRequest(std::string& body);
|
||||
int MultipartHeadRequest(const char* tpath, off_t size, headers_t& meta, bool ow_sse_flg);
|
||||
int AbortMultipartUpload(const char* tpath, std::string& upload_id);
|
||||
int MultipartHeadRequest(const char* tpath, off_t size, headers_t& meta);
|
||||
int MultipartUploadRequest(const char* tpath, headers_t& meta, int fd, bool ow_sse_flg);
|
||||
int MultipartRenameRequest(const char* from, const char* to, headers_t& meta, off_t size);
|
||||
|
||||
@ -312,6 +315,9 @@ class S3fsCurl
|
||||
bool EnableUseAhbe(void) { return SetUseAhbe(true); }
|
||||
bool DisableUseAhbe(void) { return SetUseAhbe(false); }
|
||||
bool IsUseAhbe(void) const { return is_use_ahbe; }
|
||||
int GetMultipartRetryCount(void) const { return retry_count; }
|
||||
void SetMultipartRetryCount(int retrycnt) { retry_count = retrycnt; }
|
||||
bool IsOverMultipartRetryCount(void) const { return (retry_count >= S3fsCurl::retries); }
|
||||
};
|
||||
|
||||
//----------------------------------------------
|
||||
|
312
src/s3fs.cpp
312
src/s3fs.cpp
@ -65,6 +65,17 @@ using namespace std;
|
||||
#define IS_REPLACEDIR(type) (DIRTYPE_OLD == type || DIRTYPE_FOLDER == type || DIRTYPE_NOOBJ == type)
|
||||
#define IS_RMTYPEDIR(type) (DIRTYPE_OLD == type || DIRTYPE_FOLDER == type)
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Structs
|
||||
//-------------------------------------------------------------------
|
||||
typedef struct uncomplete_multipart_info{
|
||||
string key;
|
||||
string id;
|
||||
string date;
|
||||
}UNCOMP_MP_INFO;
|
||||
|
||||
typedef std::list<UNCOMP_MP_INFO> uncomp_mp_list_t;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Global valiables
|
||||
//-------------------------------------------------------------------
|
||||
@ -132,6 +143,10 @@ static int rename_object_nocopy(const char* from, const char* to);
|
||||
static int clone_directory_object(const char* from, const char* to);
|
||||
static int rename_directory(const char* from, const char* to);
|
||||
static int remote_mountpath_exists(const char* path);
|
||||
static xmlChar* get_exp_value_xml(xmlDocPtr doc, xmlXPathContextPtr ctx, const char* exp_key);
|
||||
static void print_uncomp_mp_list(uncomp_mp_list_t& list);
|
||||
static bool abort_uncomp_mp_list(uncomp_mp_list_t& list);
|
||||
static bool get_uncomp_mp_list(xmlDocPtr doc, uncomp_mp_list_t& list);
|
||||
static int s3fs_utility_mode(void);
|
||||
static int s3fs_check_service(void);
|
||||
static int check_for_aws_format(void);
|
||||
@ -643,7 +658,7 @@ static int put_headers(const char* path, headers_t& meta, bool ow_sse_flg)
|
||||
|
||||
if(buf.st_size >= FIVE_GB){
|
||||
// multipart
|
||||
if(0 != (result = s3fscurl.MultipartHeadRequest(path, buf.st_size, meta, ow_sse_flg))){
|
||||
if(0 != (result = s3fscurl.MultipartHeadRequest(path, buf.st_size, meta))){
|
||||
return result;
|
||||
}
|
||||
}else{
|
||||
@ -2060,6 +2075,11 @@ static S3fsCurl* multi_head_retry_callback(S3fsCurl* s3fscurl)
|
||||
if(!s3fscurl){
|
||||
return NULL;
|
||||
}
|
||||
if(s3fscurl->IsOverMultipartRetryCount()){
|
||||
DPRN("Over retry count(%d) limit(%s).", s3fscurl->GetMultipartRetryCount(), s3fscurl->GetSpacialSavedPath().c_str());
|
||||
return NULL;
|
||||
}
|
||||
|
||||
S3fsCurl* newcurl = new S3fsCurl(s3fscurl->IsUseAhbe());
|
||||
string path = s3fscurl->GetPath();
|
||||
string base_path = s3fscurl->GetBasePath();
|
||||
@ -2070,6 +2090,8 @@ static S3fsCurl* multi_head_retry_callback(S3fsCurl* s3fscurl)
|
||||
delete newcurl;
|
||||
return NULL;
|
||||
}
|
||||
newcurl->SetMultipartRetryCount(s3fscurl->GetMultipartRetryCount());
|
||||
|
||||
return newcurl;
|
||||
}
|
||||
|
||||
@ -2628,6 +2650,188 @@ static int s3fs_access(const char* path, int mask)
|
||||
return result;
|
||||
}
|
||||
|
||||
static xmlChar* get_exp_value_xml(xmlDocPtr doc, xmlXPathContextPtr ctx, const char* exp_key)
|
||||
{
|
||||
if(!doc || !ctx || !exp_key){
|
||||
return NULL;
|
||||
}
|
||||
|
||||
xmlXPathObjectPtr exp;
|
||||
xmlNodeSetPtr exp_nodes;
|
||||
xmlChar* exp_value;
|
||||
|
||||
// search exp_key tag
|
||||
if(NULL == (exp = xmlXPathEvalExpression((xmlChar*)exp_key, ctx))){
|
||||
DPRNNN("Could not find key(%s).", exp_key);
|
||||
return NULL;
|
||||
}
|
||||
if(xmlXPathNodeSetIsEmpty(exp->nodesetval)){
|
||||
DPRNNN("Key(%s) node is empty.", exp_key);
|
||||
S3FS_XMLXPATHFREEOBJECT(exp);
|
||||
return NULL;
|
||||
}
|
||||
// get exp_key value & set in struct
|
||||
exp_nodes = exp->nodesetval;
|
||||
if(NULL == (exp_value = xmlNodeListGetString(doc, exp_nodes->nodeTab[0]->xmlChildrenNode, 1))){
|
||||
DPRNNN("Key(%s) value is empty.", exp_key);
|
||||
S3FS_XMLXPATHFREEOBJECT(exp);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
S3FS_XMLXPATHFREEOBJECT(exp);
|
||||
return exp_value;
|
||||
}
|
||||
|
||||
static void print_uncomp_mp_list(uncomp_mp_list_t& list)
|
||||
{
|
||||
printf("\n");
|
||||
printf("Lists the parts that have been uploaded for a specific multipart upload.\n");
|
||||
printf("\n");
|
||||
|
||||
if(0 < list.size()){
|
||||
printf("---------------------------------------------------------------\n");
|
||||
|
||||
int cnt = 0;
|
||||
for(uncomp_mp_list_t::iterator iter = list.begin(); iter != list.end(); iter++, cnt++){
|
||||
printf(" Path : %s\n", (*iter).key.c_str());
|
||||
printf(" UploadId : %s\n", (*iter).id.c_str());
|
||||
printf(" Date : %s\n", (*iter).date.c_str());
|
||||
printf("\n");
|
||||
}
|
||||
printf("---------------------------------------------------------------\n");
|
||||
|
||||
}else{
|
||||
printf("There is no list.\n");
|
||||
}
|
||||
}
|
||||
|
||||
static bool abort_uncomp_mp_list(uncomp_mp_list_t& list)
|
||||
{
|
||||
char buff[1024];
|
||||
|
||||
if(0 >= list.size()){
|
||||
return false;
|
||||
}
|
||||
memset(buff, 0, sizeof(buff));
|
||||
|
||||
// confirm
|
||||
while(true){
|
||||
printf("Would you remove all objects? [Y/N]\n");
|
||||
if(NULL != fgets(buff, sizeof(buff), stdin)){
|
||||
if(0 == strcasecmp(buff, "Y\n") || 0 == strcasecmp(buff, "YES\n")){
|
||||
break;
|
||||
}else if(0 == strcasecmp(buff, "N\n") || 0 == strcasecmp(buff, "NO\n")){
|
||||
return true;
|
||||
}
|
||||
printf("*** please put Y(yes) or N(no).\n");
|
||||
}
|
||||
}
|
||||
|
||||
// do removing their.
|
||||
S3fsCurl s3fscurl;
|
||||
bool result = true;
|
||||
for(uncomp_mp_list_t::iterator iter = list.begin(); iter != list.end(); iter++){
|
||||
const char* tpath = (*iter).key.c_str();
|
||||
string upload_id = (*iter).id;
|
||||
|
||||
if(0 != s3fscurl.AbortMultipartUpload(tpath, upload_id)){
|
||||
fprintf(stderr, "Failed to remove %s multipart uploading object.\n", tpath);
|
||||
result = false;
|
||||
}else{
|
||||
printf("Succeed to remove %s multipart uploading object.\n", tpath);
|
||||
}
|
||||
|
||||
// reset(initialize) curl object
|
||||
s3fscurl.DestroyCurlHandle();
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static bool get_uncomp_mp_list(xmlDocPtr doc, uncomp_mp_list_t& list)
|
||||
{
|
||||
if(!doc){
|
||||
return false;
|
||||
}
|
||||
|
||||
xmlXPathContextPtr ctx = xmlXPathNewContext(doc);;
|
||||
|
||||
string xmlnsurl;
|
||||
string ex_upload = "//";
|
||||
string ex_key = "";
|
||||
string ex_id = "";
|
||||
string ex_date = "";
|
||||
|
||||
if(!noxmlns && GetXmlNsUrl(doc, xmlnsurl)){
|
||||
xmlXPathRegisterNs(ctx, (xmlChar*)"s3", (xmlChar*)xmlnsurl.c_str());
|
||||
ex_upload += "s3:";
|
||||
ex_key += "s3:";
|
||||
ex_id += "s3:";
|
||||
ex_date += "s3:";
|
||||
}
|
||||
ex_upload += "Upload";
|
||||
ex_key += "Key";
|
||||
ex_id += "UploadId";
|
||||
ex_date += "Initiated";
|
||||
|
||||
// get "Upload" Tags
|
||||
xmlXPathObjectPtr upload_xp;
|
||||
if(NULL == (upload_xp = xmlXPathEvalExpression((xmlChar*)ex_upload.c_str(), ctx))){
|
||||
DPRNNN("xmlXPathEvalExpression returns null.");
|
||||
return false;
|
||||
}
|
||||
if(xmlXPathNodeSetIsEmpty(upload_xp->nodesetval)){
|
||||
DPRNNN("upload_xp->nodesetval is empty.");
|
||||
S3FS_XMLXPATHFREEOBJECT(upload_xp);
|
||||
S3FS_XMLXPATHFREECONTEXT(ctx);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Make list
|
||||
int cnt;
|
||||
xmlNodeSetPtr upload_nodes;
|
||||
list.clear();
|
||||
for(cnt = 0, upload_nodes = upload_xp->nodesetval; cnt < upload_nodes->nodeNr; cnt++){
|
||||
ctx->node = upload_nodes->nodeTab[cnt];
|
||||
|
||||
UNCOMP_MP_INFO part;
|
||||
xmlChar* ex_value;
|
||||
|
||||
// search "Key" tag
|
||||
if(NULL == (ex_value = get_exp_value_xml(doc, ctx, ex_key.c_str()))){
|
||||
continue;
|
||||
}
|
||||
if('/' != *((char*)ex_value)){
|
||||
part.key = "/";
|
||||
}else{
|
||||
part.key = "";
|
||||
}
|
||||
part.key += (char*)ex_value;
|
||||
S3FS_XMLFREE(ex_value);
|
||||
|
||||
// search "UploadId" tag
|
||||
if(NULL == (ex_value = get_exp_value_xml(doc, ctx, ex_id.c_str()))){
|
||||
continue;
|
||||
}
|
||||
part.id = (char*)ex_value;
|
||||
S3FS_XMLFREE(ex_value);
|
||||
|
||||
// search "Initiated" tag
|
||||
if(NULL == (ex_value = get_exp_value_xml(doc, ctx, ex_date.c_str()))){
|
||||
continue;
|
||||
}
|
||||
part.date = (char*)ex_value;
|
||||
S3FS_XMLFREE(ex_value);
|
||||
|
||||
list.push_back(part);
|
||||
}
|
||||
|
||||
S3FS_XMLXPATHFREEOBJECT(upload_xp);
|
||||
S3FS_XMLXPATHFREECONTEXT(ctx);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int s3fs_utility_mode(void)
|
||||
{
|
||||
if(!utility_mode){
|
||||
@ -2650,7 +2854,32 @@ static int s3fs_utility_mode(void)
|
||||
fprintf(stderr, "%s: Could not get list multipart upload.\n", program_name.c_str());
|
||||
result = EXIT_FAILURE;
|
||||
}else{
|
||||
printf("body.text:\n%s\n", body.c_str());
|
||||
// perse result(uncomplete multipart upload information)
|
||||
FPRNINFO("response body = {\n%s\n}", body.c_str());
|
||||
|
||||
xmlDocPtr doc;
|
||||
if(NULL == (doc = xmlReadMemory(body.c_str(), static_cast<int>(body.size()), "", NULL, 0))){
|
||||
DPRN("xmlReadMemory returns with error.");
|
||||
result = EXIT_FAILURE;
|
||||
|
||||
}else{
|
||||
// make working uploads list
|
||||
uncomp_mp_list_t list;
|
||||
if(!get_uncomp_mp_list(doc, list)){
|
||||
DPRN("get_uncomp_mp_list returns with error.");
|
||||
result = EXIT_FAILURE;
|
||||
|
||||
}else{
|
||||
// print list
|
||||
print_uncomp_mp_list(list);
|
||||
// remove
|
||||
if(!abort_uncomp_mp_list(list)){
|
||||
DPRN("something error occured in removing process.");
|
||||
result = EXIT_FAILURE;
|
||||
}
|
||||
}
|
||||
S3FS_XMLFREEDOC(doc);
|
||||
}
|
||||
}
|
||||
|
||||
// Destory curl
|
||||
@ -3111,45 +3340,60 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar
|
||||
return 0;
|
||||
}
|
||||
|
||||
// save the mountpoint and do some basic error checking
|
||||
mountpoint = arg;
|
||||
struct stat stbuf;
|
||||
// the second NONPOT option is the mountpoint(not utility mode)
|
||||
if(0 == mountpoint.size() && 0 == utility_mode){
|
||||
// save the mountpoint and do some basic error checking
|
||||
mountpoint = arg;
|
||||
struct stat stbuf;
|
||||
|
||||
if(stat(arg, &stbuf) == -1){
|
||||
fprintf(stderr, "%s: unable to access MOUNTPOINT %s: %s\n",
|
||||
program_name.c_str(), mountpoint.c_str(), strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
if(!(S_ISDIR(stbuf.st_mode))){
|
||||
fprintf(stderr, "%s: MOUNTPOINT: %s is not a directory\n",
|
||||
program_name.c_str(), mountpoint.c_str());
|
||||
return -1;
|
||||
}
|
||||
if(!set_moutpoint_attribute(stbuf)){
|
||||
fprintf(stderr, "%s: MOUNTPOINT: %s permission denied.\n",
|
||||
program_name.c_str(), mountpoint.c_str());
|
||||
return -1;
|
||||
}
|
||||
|
||||
if(!nonempty){
|
||||
struct dirent *ent;
|
||||
DIR *dp = opendir(mountpoint.c_str());
|
||||
if(dp == NULL){
|
||||
fprintf(stderr, "%s: failed to open MOUNTPOINT: %s: %s\n",
|
||||
program_name.c_str(), mountpoint.c_str(), strerror(errno));
|
||||
if(stat(arg, &stbuf) == -1){
|
||||
fprintf(stderr, "%s: unable to access MOUNTPOINT %s: %s\n",
|
||||
program_name.c_str(), mountpoint.c_str(), strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
while((ent = readdir(dp)) != NULL){
|
||||
if(strcmp(ent->d_name, ".") != 0 && strcmp(ent->d_name, "..") != 0){
|
||||
closedir(dp);
|
||||
fprintf(stderr, "%s: MOUNTPOINT directory %s is not empty.\n"
|
||||
"%s: if you are sure this is safe, can use the 'nonempty' mount option.\n",
|
||||
program_name.c_str(), mountpoint.c_str(), program_name.c_str());
|
||||
if(!(S_ISDIR(stbuf.st_mode))){
|
||||
fprintf(stderr, "%s: MOUNTPOINT: %s is not a directory\n",
|
||||
program_name.c_str(), mountpoint.c_str());
|
||||
return -1;
|
||||
}
|
||||
if(!set_moutpoint_attribute(stbuf)){
|
||||
fprintf(stderr, "%s: MOUNTPOINT: %s permission denied.\n",
|
||||
program_name.c_str(), mountpoint.c_str());
|
||||
return -1;
|
||||
}
|
||||
|
||||
if(!nonempty){
|
||||
struct dirent *ent;
|
||||
DIR *dp = opendir(mountpoint.c_str());
|
||||
if(dp == NULL){
|
||||
fprintf(stderr, "%s: failed to open MOUNTPOINT: %s: %s\n",
|
||||
program_name.c_str(), mountpoint.c_str(), strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
while((ent = readdir(dp)) != NULL){
|
||||
if(strcmp(ent->d_name, ".") != 0 && strcmp(ent->d_name, "..") != 0){
|
||||
closedir(dp);
|
||||
fprintf(stderr, "%s: MOUNTPOINT directory %s is not empty.\n"
|
||||
"%s: if you are sure this is safe, can use the 'nonempty' mount option.\n",
|
||||
program_name.c_str(), mountpoint.c_str(), program_name.c_str());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
closedir(dp);
|
||||
}
|
||||
closedir(dp);
|
||||
return 1;
|
||||
}
|
||||
|
||||
// Unknow option
|
||||
if(0 == utility_mode){
|
||||
fprintf(stderr, "%s: specified unknown third optioni(%s).\n", program_name.c_str(), arg);
|
||||
}else{
|
||||
fprintf(stderr, "%s: specified unknown second optioni(%s).\n"
|
||||
"%s: you don't need to specify second option(mountpoint) for utility mode(-u).\n",
|
||||
program_name.c_str(), arg, program_name.c_str());
|
||||
}
|
||||
return -1;
|
||||
|
||||
}else if(key == FUSE_OPT_KEY_OPT){
|
||||
if(0 == STR2NCMP(arg, "uid=")){
|
||||
s3fs_uid = strtoul(strchr(arg, '=') + sizeof(char), 0, 10);
|
||||
|
Loading…
x
Reference in New Issue
Block a user