mirror of
https://github.com/s3fs-fuse/s3fs-fuse.git
synced 2024-11-17 09:45:11 +00:00
Source file division and set 4 spaces and cleanup
This commit is contained in:
parent
c6e23212bb
commit
b5ffd419d8
@ -43,3 +43,12 @@ cppcheck:
|
||||
--suppress=missingIncludeSystem \
|
||||
--suppress=unmatchedSuppression \
|
||||
src/ test/
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: expandtab sw=4 ts= fdm=marker
|
||||
# vim<600: expandtab sw=4 ts=4
|
||||
#
|
||||
|
@ -44,3 +44,11 @@ echo "--- Finished autotools ----------"
|
||||
|
||||
exit 0
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: expandtab sw=4 ts= fdm=marker
|
||||
# vim<600: expandtab sw=4 ts=4
|
||||
#
|
||||
|
@ -341,3 +341,11 @@ dnl ----------------------------------------------
|
||||
dnl end configuration
|
||||
dnl ----------------------------------------------
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: expandtab sw=4 ts= fdm=marker
|
||||
# vim<600: expandtab sw=4 ts=4
|
||||
#
|
||||
|
@ -21,27 +21,43 @@ bin_PROGRAMS=s3fs
|
||||
|
||||
AM_CPPFLAGS = $(DEPS_CFLAGS)
|
||||
if USE_GNUTLS_NETTLE
|
||||
AM_CPPFLAGS += -DUSE_GNUTLS_NETTLE
|
||||
AM_CPPFLAGS += -DUSE_GNUTLS_NETTLE
|
||||
endif
|
||||
|
||||
s3fs_SOURCES = \
|
||||
s3fs.cpp \
|
||||
curl.cpp \
|
||||
cache.cpp \
|
||||
string_util.cpp \
|
||||
s3fs_util.cpp \
|
||||
fdcache.cpp \
|
||||
common_auth.cpp \
|
||||
addhead.cpp \
|
||||
sighandlers.cpp
|
||||
s3fs.cpp \
|
||||
s3fs_global.cpp \
|
||||
s3fs_help.cpp \
|
||||
s3fs_logger.cpp \
|
||||
s3fs_xml.cpp \
|
||||
metaheader.cpp \
|
||||
mpu_util.cpp \
|
||||
mvnode.cpp \
|
||||
curl.cpp \
|
||||
curl_handlerpool.cpp \
|
||||
curl_multi.cpp \
|
||||
curl_util.cpp \
|
||||
bodydata.cpp \
|
||||
s3objlist.cpp \
|
||||
cache.cpp \
|
||||
string_util.cpp \
|
||||
s3fs_util.cpp \
|
||||
fdcache.cpp \
|
||||
fdcache_entity.cpp \
|
||||
fdcache_page.cpp \
|
||||
fdcache_stat.cpp \
|
||||
addhead.cpp \
|
||||
sighandlers.cpp \
|
||||
autolock.cpp \
|
||||
common_auth.cpp
|
||||
if USE_SSL_OPENSSL
|
||||
s3fs_SOURCES += openssl_auth.cpp
|
||||
s3fs_SOURCES += openssl_auth.cpp
|
||||
endif
|
||||
if USE_SSL_GNUTLS
|
||||
s3fs_SOURCES += gnutls_auth.cpp
|
||||
s3fs_SOURCES += gnutls_auth.cpp
|
||||
endif
|
||||
if USE_SSL_NSS
|
||||
s3fs_SOURCES += nss_auth.cpp
|
||||
s3fs_SOURCES += nss_auth.cpp
|
||||
endif
|
||||
|
||||
s3fs_LDADD = $(DEPS_LIBS)
|
||||
@ -54,3 +70,12 @@ TESTS = test_string_util
|
||||
|
||||
clang-tidy:
|
||||
clang-tidy $(s3fs_SOURCES) -- $(DEPS_CFLAGS) $(CPPFLAGS)
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: expandtab sw=4 ts= fdm=marker
|
||||
# vim<600: expandtab sw=4 ts=4
|
||||
#
|
||||
|
383
src/addhead.cpp
383
src/addhead.cpp
@ -20,20 +20,13 @@
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <syslog.h>
|
||||
#include <curl/curl.h>
|
||||
#include <sstream>
|
||||
#include <fstream>
|
||||
#include <string>
|
||||
#include <map>
|
||||
#include <list>
|
||||
#include <vector>
|
||||
|
||||
#include "common.h"
|
||||
#include "addhead.h"
|
||||
#include "curl.h"
|
||||
#include "s3fs.h"
|
||||
#include "addhead.h"
|
||||
#include "curl_util.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
@ -52,236 +45,236 @@ AdditionalHeader AdditionalHeader::singleton;
|
||||
//-------------------------------------------------------------------
|
||||
AdditionalHeader::AdditionalHeader()
|
||||
{
|
||||
if(this == AdditionalHeader::get()){
|
||||
is_enable = false;
|
||||
}else{
|
||||
abort();
|
||||
}
|
||||
if(this == AdditionalHeader::get()){
|
||||
is_enable = false;
|
||||
}else{
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
AdditionalHeader::~AdditionalHeader()
|
||||
{
|
||||
if(this == AdditionalHeader::get()){
|
||||
Unload();
|
||||
}else{
|
||||
abort();
|
||||
}
|
||||
if(this == AdditionalHeader::get()){
|
||||
Unload();
|
||||
}else{
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
bool AdditionalHeader::Load(const char* file)
|
||||
{
|
||||
if(!file){
|
||||
S3FS_PRN_WARN("file is NULL.");
|
||||
return false;
|
||||
}
|
||||
Unload();
|
||||
if(!file){
|
||||
S3FS_PRN_WARN("file is NULL.");
|
||||
return false;
|
||||
}
|
||||
Unload();
|
||||
|
||||
ifstream AH(file);
|
||||
if(!AH.good()){
|
||||
S3FS_PRN_WARN("Could not open file(%s).", file);
|
||||
return false;
|
||||
}
|
||||
|
||||
// read file
|
||||
string line;
|
||||
ADDHEAD *paddhead;
|
||||
while(getline(AH, line)){
|
||||
if('#' == line[0]){
|
||||
continue;
|
||||
}
|
||||
if(line.empty()){
|
||||
continue;
|
||||
}
|
||||
// load a line
|
||||
istringstream ss(line);
|
||||
string key; // suffix(key)
|
||||
string head; // additional HTTP header
|
||||
string value; // header value
|
||||
if(0 == isblank(line[0])){
|
||||
ss >> key;
|
||||
}
|
||||
if(ss){
|
||||
ss >> head;
|
||||
if(ss && static_cast<size_t>(ss.tellg()) < line.size()){
|
||||
value = line.substr(static_cast<int>(ss.tellg()) + 1);
|
||||
}
|
||||
ifstream AH(file);
|
||||
if(!AH.good()){
|
||||
S3FS_PRN_WARN("Could not open file(%s).", file);
|
||||
return false;
|
||||
}
|
||||
|
||||
// check it
|
||||
if(head.empty()){
|
||||
if(key.empty()){
|
||||
continue;
|
||||
}
|
||||
S3FS_PRN_ERR("file format error: %s key(suffix) is no HTTP header value.", key.c_str());
|
||||
Unload();
|
||||
return false;
|
||||
// read file
|
||||
string line;
|
||||
ADDHEAD *paddhead;
|
||||
while(getline(AH, line)){
|
||||
if('#' == line[0]){
|
||||
continue;
|
||||
}
|
||||
if(line.empty()){
|
||||
continue;
|
||||
}
|
||||
// load a line
|
||||
istringstream ss(line);
|
||||
string key; // suffix(key)
|
||||
string head; // additional HTTP header
|
||||
string value; // header value
|
||||
if(0 == isblank(line[0])){
|
||||
ss >> key;
|
||||
}
|
||||
if(ss){
|
||||
ss >> head;
|
||||
if(ss && static_cast<size_t>(ss.tellg()) < line.size()){
|
||||
value = line.substr(static_cast<int>(ss.tellg()) + 1);
|
||||
}
|
||||
}
|
||||
|
||||
// check it
|
||||
if(head.empty()){
|
||||
if(key.empty()){
|
||||
continue;
|
||||
}
|
||||
S3FS_PRN_ERR("file format error: %s key(suffix) is no HTTP header value.", key.c_str());
|
||||
Unload();
|
||||
return false;
|
||||
}
|
||||
|
||||
paddhead = new ADDHEAD;
|
||||
if(0 == strncasecmp(key.c_str(), ADD_HEAD_REGEX, strlen(ADD_HEAD_REGEX))){
|
||||
// regex
|
||||
if(key.size() <= strlen(ADD_HEAD_REGEX)){
|
||||
S3FS_PRN_ERR("file format error: %s key(suffix) does not have key string.", key.c_str());
|
||||
delete paddhead;
|
||||
continue;
|
||||
}
|
||||
key = key.substr(strlen(ADD_HEAD_REGEX));
|
||||
|
||||
// compile
|
||||
regex_t* preg = new regex_t;
|
||||
int result;
|
||||
if(0 != (result = regcomp(preg, key.c_str(), REG_EXTENDED | REG_NOSUB))){ // we do not need matching info
|
||||
char errbuf[256];
|
||||
regerror(result, preg, errbuf, sizeof(errbuf));
|
||||
S3FS_PRN_ERR("failed to compile regex from %s key by %s.", key.c_str(), errbuf);
|
||||
delete preg;
|
||||
delete paddhead;
|
||||
continue;
|
||||
}
|
||||
|
||||
// set
|
||||
paddhead->pregex = preg;
|
||||
paddhead->basestring = key;
|
||||
paddhead->headkey = head;
|
||||
paddhead->headvalue = value;
|
||||
|
||||
}else{
|
||||
// not regex, directly comparing
|
||||
paddhead->pregex = NULL;
|
||||
paddhead->basestring = key;
|
||||
paddhead->headkey = head;
|
||||
paddhead->headvalue = value;
|
||||
}
|
||||
|
||||
// add list
|
||||
addheadlist.push_back(paddhead);
|
||||
|
||||
// set flag
|
||||
if(!is_enable){
|
||||
is_enable = true;
|
||||
}
|
||||
}
|
||||
|
||||
paddhead = new ADDHEAD;
|
||||
if(0 == strncasecmp(key.c_str(), ADD_HEAD_REGEX, strlen(ADD_HEAD_REGEX))){
|
||||
// regex
|
||||
if(key.size() <= strlen(ADD_HEAD_REGEX)){
|
||||
S3FS_PRN_ERR("file format error: %s key(suffix) does not have key string.", key.c_str());
|
||||
delete paddhead;
|
||||
continue;
|
||||
}
|
||||
key = key.substr(strlen(ADD_HEAD_REGEX));
|
||||
|
||||
// compile
|
||||
regex_t* preg = new regex_t;
|
||||
int result;
|
||||
if(0 != (result = regcomp(preg, key.c_str(), REG_EXTENDED | REG_NOSUB))){ // we do not need matching info
|
||||
char errbuf[256];
|
||||
regerror(result, preg, errbuf, sizeof(errbuf));
|
||||
S3FS_PRN_ERR("failed to compile regex from %s key by %s.", key.c_str(), errbuf);
|
||||
delete preg;
|
||||
delete paddhead;
|
||||
continue;
|
||||
}
|
||||
|
||||
// set
|
||||
paddhead->pregex = preg;
|
||||
paddhead->basestring = key;
|
||||
paddhead->headkey = head;
|
||||
paddhead->headvalue = value;
|
||||
|
||||
}else{
|
||||
// not regex, directly comparing
|
||||
paddhead->pregex = NULL;
|
||||
paddhead->basestring = key;
|
||||
paddhead->headkey = head;
|
||||
paddhead->headvalue = value;
|
||||
}
|
||||
|
||||
// add list
|
||||
addheadlist.push_back(paddhead);
|
||||
|
||||
// set flag
|
||||
if(!is_enable){
|
||||
is_enable = true;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
return true;
|
||||
}
|
||||
|
||||
void AdditionalHeader::Unload()
|
||||
{
|
||||
is_enable = false;
|
||||
is_enable = false;
|
||||
|
||||
for(addheadlist_t::iterator iter = addheadlist.begin(); iter != addheadlist.end(); ++iter){
|
||||
ADDHEAD *paddhead = *iter;
|
||||
if(paddhead){
|
||||
if(paddhead->pregex){
|
||||
regfree(paddhead->pregex);
|
||||
delete paddhead->pregex;
|
||||
}
|
||||
delete paddhead;
|
||||
for(addheadlist_t::iterator iter = addheadlist.begin(); iter != addheadlist.end(); ++iter){
|
||||
ADDHEAD *paddhead = *iter;
|
||||
if(paddhead){
|
||||
if(paddhead->pregex){
|
||||
regfree(paddhead->pregex);
|
||||
delete paddhead->pregex;
|
||||
}
|
||||
delete paddhead;
|
||||
}
|
||||
}
|
||||
}
|
||||
addheadlist.clear();
|
||||
addheadlist.clear();
|
||||
}
|
||||
|
||||
bool AdditionalHeader::AddHeader(headers_t& meta, const char* path) const
|
||||
{
|
||||
if(!is_enable){
|
||||
return true;
|
||||
}
|
||||
if(!path){
|
||||
S3FS_PRN_WARN("path is NULL.");
|
||||
return false;
|
||||
}
|
||||
|
||||
size_t pathlength = strlen(path);
|
||||
|
||||
// loop
|
||||
//
|
||||
// [NOTE]
|
||||
// Because to allow duplicate key, and then scanning the entire table.
|
||||
//
|
||||
for(addheadlist_t::const_iterator iter = addheadlist.begin(); iter != addheadlist.end(); ++iter){
|
||||
const ADDHEAD *paddhead = *iter;
|
||||
if(!paddhead){
|
||||
continue;
|
||||
if(!is_enable){
|
||||
return true;
|
||||
}
|
||||
if(!path){
|
||||
S3FS_PRN_WARN("path is NULL.");
|
||||
return false;
|
||||
}
|
||||
|
||||
if(paddhead->pregex){
|
||||
// regex
|
||||
regmatch_t match; // not use
|
||||
if(0 == regexec(paddhead->pregex, path, 1, &match, 0)){
|
||||
// match -> adding header
|
||||
meta[paddhead->headkey] = paddhead->headvalue;
|
||||
}
|
||||
}else{
|
||||
// directly comparing
|
||||
if(paddhead->basestring.length() < pathlength){
|
||||
if(0 == paddhead->basestring.length() || 0 == strcmp(&path[pathlength - paddhead->basestring.length()], paddhead->basestring.c_str())){
|
||||
// match -> adding header
|
||||
meta[paddhead->headkey] = paddhead->headvalue;
|
||||
size_t pathlength = strlen(path);
|
||||
|
||||
// loop
|
||||
//
|
||||
// [NOTE]
|
||||
// Because to allow duplicate key, and then scanning the entire table.
|
||||
//
|
||||
for(addheadlist_t::const_iterator iter = addheadlist.begin(); iter != addheadlist.end(); ++iter){
|
||||
const ADDHEAD *paddhead = *iter;
|
||||
if(!paddhead){
|
||||
continue;
|
||||
}
|
||||
|
||||
if(paddhead->pregex){
|
||||
// regex
|
||||
regmatch_t match; // not use
|
||||
if(0 == regexec(paddhead->pregex, path, 1, &match, 0)){
|
||||
// match -> adding header
|
||||
meta[paddhead->headkey] = paddhead->headvalue;
|
||||
}
|
||||
}else{
|
||||
// directly comparing
|
||||
if(paddhead->basestring.length() < pathlength){
|
||||
if(0 == paddhead->basestring.length() || 0 == strcmp(&path[pathlength - paddhead->basestring.length()], paddhead->basestring.c_str())){
|
||||
// match -> adding header
|
||||
meta[paddhead->headkey] = paddhead->headvalue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
return true;
|
||||
}
|
||||
|
||||
struct curl_slist* AdditionalHeader::AddHeader(struct curl_slist* list, const char* path) const
|
||||
{
|
||||
headers_t meta;
|
||||
headers_t meta;
|
||||
|
||||
if(!AddHeader(meta, path)){
|
||||
if(!AddHeader(meta, path)){
|
||||
return list;
|
||||
}
|
||||
for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){
|
||||
// Adding header
|
||||
list = curl_slist_sort_insert(list, iter->first.c_str(), iter->second.c_str());
|
||||
}
|
||||
meta.clear();
|
||||
S3FS_MALLOCTRIM(0);
|
||||
return list;
|
||||
}
|
||||
for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){
|
||||
// Adding header
|
||||
list = curl_slist_sort_insert(list, iter->first.c_str(), iter->second.c_str());
|
||||
}
|
||||
meta.clear();
|
||||
S3FS_MALLOCTRIM(0);
|
||||
return list;
|
||||
}
|
||||
|
||||
bool AdditionalHeader::Dump() const
|
||||
{
|
||||
if(!IS_S3FS_LOG_DBG()){
|
||||
return true;
|
||||
}
|
||||
|
||||
ostringstream ssdbg;
|
||||
int cnt = 1;
|
||||
|
||||
ssdbg << "Additional Header list[" << addheadlist.size() << "] = {" << endl;
|
||||
|
||||
for(addheadlist_t::const_iterator iter = addheadlist.begin(); iter != addheadlist.end(); ++iter, ++cnt){
|
||||
const ADDHEAD *paddhead = *iter;
|
||||
|
||||
ssdbg << " [" << cnt << "] = {" << endl;
|
||||
|
||||
if(paddhead){
|
||||
if(paddhead->pregex){
|
||||
ssdbg << " type\t\t--->\tregex" << endl;
|
||||
}else{
|
||||
ssdbg << " type\t\t--->\tsuffix matching" << endl;
|
||||
}
|
||||
ssdbg << " base string\t--->\t" << paddhead->basestring << endl;
|
||||
ssdbg << " add header\t--->\t" << paddhead->headkey << ": " << paddhead->headvalue << endl;
|
||||
if(!IS_S3FS_LOG_DBG()){
|
||||
return true;
|
||||
}
|
||||
|
||||
ostringstream ssdbg;
|
||||
int cnt = 1;
|
||||
|
||||
ssdbg << "Additional Header list[" << addheadlist.size() << "] = {" << endl;
|
||||
|
||||
for(addheadlist_t::const_iterator iter = addheadlist.begin(); iter != addheadlist.end(); ++iter, ++cnt){
|
||||
const ADDHEAD *paddhead = *iter;
|
||||
|
||||
ssdbg << " [" << cnt << "] = {" << endl;
|
||||
|
||||
if(paddhead){
|
||||
if(paddhead->pregex){
|
||||
ssdbg << " type\t\t--->\tregex" << endl;
|
||||
}else{
|
||||
ssdbg << " type\t\t--->\tsuffix matching" << endl;
|
||||
}
|
||||
ssdbg << " base string\t--->\t" << paddhead->basestring << endl;
|
||||
ssdbg << " add header\t--->\t" << paddhead->headkey << ": " << paddhead->headvalue << endl;
|
||||
}
|
||||
ssdbg << " }" << endl;
|
||||
}
|
||||
ssdbg << " }" << endl;
|
||||
}
|
||||
|
||||
|
||||
ssdbg << "}" << endl;
|
||||
ssdbg << "}" << endl;
|
||||
|
||||
// print all
|
||||
S3FS_PRN_DBG("%s", ssdbg.str().c_str());
|
||||
// print all
|
||||
S3FS_PRN_DBG("%s", ssdbg.str().c_str());
|
||||
|
||||
return true;
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 2
|
||||
* c-basic-offset: 2
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=2 ts=2 fdm=marker
|
||||
* vim<600: expandtab sw=2 ts=2
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
|
@ -23,48 +23,53 @@
|
||||
|
||||
#include <regex.h>
|
||||
|
||||
#include "metaheader.h"
|
||||
|
||||
//----------------------------------------------
|
||||
// class AdditionalHeader
|
||||
// Structure / Typedef
|
||||
//----------------------------------------------
|
||||
typedef struct add_header{
|
||||
regex_t* pregex; // not NULL means using regex, NULL means comparing suffix directly.
|
||||
std::string basestring;
|
||||
std::string headkey;
|
||||
std::string headvalue;
|
||||
regex_t* pregex; // not NULL means using regex, NULL means comparing suffix directly.
|
||||
std::string basestring;
|
||||
std::string headkey;
|
||||
std::string headvalue;
|
||||
}ADDHEAD;
|
||||
|
||||
typedef std::vector<ADDHEAD *> addheadlist_t;
|
||||
|
||||
//----------------------------------------------
|
||||
// Class AdditionalHeader
|
||||
//----------------------------------------------
|
||||
class AdditionalHeader
|
||||
{
|
||||
private:
|
||||
static AdditionalHeader singleton;
|
||||
bool is_enable;
|
||||
addheadlist_t addheadlist;
|
||||
private:
|
||||
static AdditionalHeader singleton;
|
||||
bool is_enable;
|
||||
addheadlist_t addheadlist;
|
||||
|
||||
protected:
|
||||
AdditionalHeader();
|
||||
~AdditionalHeader();
|
||||
protected:
|
||||
AdditionalHeader();
|
||||
~AdditionalHeader();
|
||||
|
||||
public:
|
||||
// Reference singleton
|
||||
static AdditionalHeader* get(void) { return &singleton; }
|
||||
public:
|
||||
// Reference singleton
|
||||
static AdditionalHeader* get(void) { return &singleton; }
|
||||
|
||||
bool Load(const char* file);
|
||||
void Unload(void);
|
||||
bool Load(const char* file);
|
||||
void Unload(void);
|
||||
|
||||
bool AddHeader(headers_t& meta, const char* path) const;
|
||||
struct curl_slist* AddHeader(struct curl_slist* list, const char* path) const;
|
||||
bool Dump(void) const;
|
||||
bool AddHeader(headers_t& meta, const char* path) const;
|
||||
struct curl_slist* AddHeader(struct curl_slist* list, const char* path) const;
|
||||
bool Dump(void) const;
|
||||
};
|
||||
|
||||
#endif // S3FS_ADDHEAD_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 2
|
||||
* c-basic-offset: 2
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=2 ts=2 fdm=marker
|
||||
* vim<600: expandtab sw=2 ts=2
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
|
82
src/autolock.cpp
Normal file
82
src/autolock.cpp
Normal file
@ -0,0 +1,82 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Takeshi Nakatani <ggtakec.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <cerrno>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "autolock.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Class AutoLock
|
||||
//-------------------------------------------------------------------
|
||||
AutoLock::AutoLock(pthread_mutex_t* pmutex, Type type) : auto_mutex(pmutex)
|
||||
{
|
||||
if (type == ALREADY_LOCKED) {
|
||||
is_lock_acquired = false;
|
||||
} else if (type == NO_WAIT) {
|
||||
int res = pthread_mutex_trylock(auto_mutex);
|
||||
if(res == 0){
|
||||
is_lock_acquired = true;
|
||||
}else if(res == EBUSY){
|
||||
is_lock_acquired = false;
|
||||
}else{
|
||||
S3FS_PRN_CRIT("pthread_mutex_trylock returned: %d", res);
|
||||
abort();
|
||||
}
|
||||
} else {
|
||||
int res = pthread_mutex_lock(auto_mutex);
|
||||
if(res == 0){
|
||||
is_lock_acquired = true;
|
||||
}else{
|
||||
S3FS_PRN_CRIT("pthread_mutex_lock returned: %d", res);
|
||||
abort();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool AutoLock::isLockAcquired() const
|
||||
{
|
||||
return is_lock_acquired;
|
||||
}
|
||||
|
||||
AutoLock::~AutoLock()
|
||||
{
|
||||
if (is_lock_acquired) {
|
||||
int res = pthread_mutex_unlock(auto_mutex);
|
||||
if(res != 0){
|
||||
S3FS_PRN_CRIT("pthread_mutex_lock returned: %d", res);
|
||||
abort();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
60
src/autolock.h
Normal file
60
src/autolock.h
Normal file
@ -0,0 +1,60 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_AUTOLOCK_H_
|
||||
#define S3FS_AUTOLOCK_H_
|
||||
|
||||
#include <pthread.h>
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// AutoLock Class
|
||||
//-------------------------------------------------------------------
|
||||
class AutoLock
|
||||
{
|
||||
public:
|
||||
enum Type {
|
||||
NO_WAIT = 1,
|
||||
ALREADY_LOCKED = 2,
|
||||
NONE = 0
|
||||
};
|
||||
|
||||
private:
|
||||
pthread_mutex_t* const auto_mutex;
|
||||
bool is_lock_acquired;
|
||||
|
||||
private:
|
||||
AutoLock(const AutoLock&);
|
||||
|
||||
public:
|
||||
explicit AutoLock(pthread_mutex_t* pmutex, Type type = NONE);
|
||||
~AutoLock();
|
||||
bool isLockAcquired() const;
|
||||
};
|
||||
|
||||
#endif // S3FS_AUTOLOCK_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
124
src/bodydata.cpp
Normal file
124
src/bodydata.cpp
Normal file
@ -0,0 +1,124 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "bodydata.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Variables
|
||||
//-------------------------------------------------------------------
|
||||
static const int BODYDATA_RESIZE_APPEND_MIN = 1024;
|
||||
static const int BODYDATA_RESIZE_APPEND_MID = 1024 * 1024;
|
||||
static const int BODYDATA_RESIZE_APPEND_MAX = 10 * 1024 * 1024;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Functions
|
||||
//-------------------------------------------------------------------
|
||||
static size_t adjust_block(size_t bytes, size_t block)
|
||||
{
|
||||
return ((bytes / block) + ((bytes % block) ? 1 : 0)) * block;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Class BodyData
|
||||
//-------------------------------------------------------------------
|
||||
bool BodyData::Resize(size_t addbytes)
|
||||
{
|
||||
if(IsSafeSize(addbytes)){
|
||||
return true;
|
||||
}
|
||||
|
||||
// New size
|
||||
size_t need_size = adjust_block((lastpos + addbytes + 1) - bufsize, sizeof(off_t));
|
||||
|
||||
if(BODYDATA_RESIZE_APPEND_MAX < bufsize){
|
||||
need_size = (BODYDATA_RESIZE_APPEND_MAX < need_size ? need_size : BODYDATA_RESIZE_APPEND_MAX);
|
||||
}else if(BODYDATA_RESIZE_APPEND_MID < bufsize){
|
||||
need_size = (BODYDATA_RESIZE_APPEND_MID < need_size ? need_size : BODYDATA_RESIZE_APPEND_MID);
|
||||
}else if(BODYDATA_RESIZE_APPEND_MIN < bufsize){
|
||||
need_size = ((bufsize * 2) < need_size ? need_size : (bufsize * 2));
|
||||
}else{
|
||||
need_size = (BODYDATA_RESIZE_APPEND_MIN < need_size ? need_size : BODYDATA_RESIZE_APPEND_MIN);
|
||||
}
|
||||
// realloc
|
||||
char* newtext;
|
||||
if(NULL == (newtext = (char*)realloc(text, (bufsize + need_size)))){
|
||||
S3FS_PRN_CRIT("not enough memory (realloc returned NULL)");
|
||||
free(text);
|
||||
text = NULL;
|
||||
return false;
|
||||
}
|
||||
text = newtext;
|
||||
bufsize += need_size;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void BodyData::Clear()
|
||||
{
|
||||
if(text){
|
||||
free(text);
|
||||
text = NULL;
|
||||
}
|
||||
lastpos = 0;
|
||||
bufsize = 0;
|
||||
}
|
||||
|
||||
bool BodyData::Append(void* ptr, size_t bytes)
|
||||
{
|
||||
if(!ptr){
|
||||
return false;
|
||||
}
|
||||
if(0 == bytes){
|
||||
return true;
|
||||
}
|
||||
if(!Resize(bytes)){
|
||||
return false;
|
||||
}
|
||||
memcpy(&text[lastpos], ptr, bytes);
|
||||
lastpos += bytes;
|
||||
text[lastpos] = '\0';
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
const char* BodyData::str() const
|
||||
{
|
||||
if(!text){
|
||||
static const char* strnull = "";
|
||||
return strnull;
|
||||
}
|
||||
return text;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
72
src/bodydata.h
Normal file
72
src/bodydata.h
Normal file
@ -0,0 +1,72 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_BODYDATA_H_
|
||||
#define S3FS_BODYDATA_H_
|
||||
|
||||
//----------------------------------------------
|
||||
// Class BodyData
|
||||
//----------------------------------------------
|
||||
// memory class for curl write memory callback
|
||||
//
|
||||
class BodyData
|
||||
{
|
||||
private:
|
||||
char* text;
|
||||
size_t lastpos;
|
||||
size_t bufsize;
|
||||
|
||||
private:
|
||||
bool IsSafeSize(size_t addbytes) const
|
||||
{
|
||||
return ((lastpos + addbytes + 1) > bufsize ? false : true);
|
||||
}
|
||||
bool Resize(size_t addbytes);
|
||||
|
||||
public:
|
||||
BodyData() : text(NULL), lastpos(0), bufsize(0) {}
|
||||
~BodyData()
|
||||
{
|
||||
Clear();
|
||||
}
|
||||
|
||||
void Clear(void);
|
||||
bool Append(void* ptr, size_t bytes);
|
||||
bool Append(void* ptr, size_t blockSize, size_t numBlocks)
|
||||
{
|
||||
return Append(ptr, (blockSize * numBlocks));
|
||||
}
|
||||
const char* str() const;
|
||||
size_t size() const
|
||||
{
|
||||
return lastpos;
|
||||
}
|
||||
};
|
||||
|
||||
#endif // S3FS_BODYDATA_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
1096
src/cache.cpp
1096
src/cache.cpp
File diff suppressed because it is too large
Load Diff
219
src/cache.h
219
src/cache.h
@ -21,26 +21,30 @@
|
||||
#ifndef S3FS_CACHE_H_
|
||||
#define S3FS_CACHE_H_
|
||||
|
||||
#include "common.h"
|
||||
#include "metaheader.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Structure
|
||||
//-------------------------------------------------------------------
|
||||
//
|
||||
// Struct for stats cache
|
||||
//
|
||||
struct stat_cache_entry {
|
||||
struct stat stbuf;
|
||||
unsigned long hit_count;
|
||||
struct timespec cache_date;
|
||||
headers_t meta;
|
||||
bool isforce;
|
||||
bool noobjcache; // Flag: cache is no object for no listing.
|
||||
unsigned long notruncate; // 0<: not remove automatically at checking truncate
|
||||
struct stat stbuf;
|
||||
unsigned long hit_count;
|
||||
struct timespec cache_date;
|
||||
headers_t meta;
|
||||
bool isforce;
|
||||
bool noobjcache; // Flag: cache is no object for no listing.
|
||||
unsigned long notruncate; // 0<: not remove automatically at checking truncate
|
||||
|
||||
stat_cache_entry() : hit_count(0), isforce(false), noobjcache(false), notruncate(0L) {
|
||||
memset(&stbuf, 0, sizeof(struct stat));
|
||||
cache_date.tv_sec = 0;
|
||||
cache_date.tv_nsec = 0;
|
||||
meta.clear();
|
||||
}
|
||||
stat_cache_entry() : hit_count(0), isforce(false), noobjcache(false), notruncate(0L)
|
||||
{
|
||||
memset(&stbuf, 0, sizeof(struct stat));
|
||||
cache_date.tv_sec = 0;
|
||||
cache_date.tv_nsec = 0;
|
||||
meta.clear();
|
||||
}
|
||||
};
|
||||
|
||||
typedef std::map<std::string, stat_cache_entry*> stat_cache_t; // key=path
|
||||
@ -49,21 +53,22 @@ typedef std::map<std::string, stat_cache_entry*> stat_cache_t; // key=path
|
||||
// Struct for symbolic link cache
|
||||
//
|
||||
struct symlink_cache_entry {
|
||||
std::string link;
|
||||
unsigned long hit_count;
|
||||
struct timespec cache_date; // The function that operates timespec uses the same as Stats
|
||||
std::string link;
|
||||
unsigned long hit_count;
|
||||
struct timespec cache_date; // The function that operates timespec uses the same as Stats
|
||||
|
||||
symlink_cache_entry() : link(""), hit_count(0) {
|
||||
cache_date.tv_sec = 0;
|
||||
cache_date.tv_nsec = 0;
|
||||
}
|
||||
symlink_cache_entry() : link(""), hit_count(0)
|
||||
{
|
||||
cache_date.tv_sec = 0;
|
||||
cache_date.tv_nsec = 0;
|
||||
}
|
||||
};
|
||||
|
||||
typedef std::map<std::string, symlink_cache_entry*> symlink_cache_t;
|
||||
|
||||
//
|
||||
// Class
|
||||
//
|
||||
//-------------------------------------------------------------------
|
||||
// Class StatCache
|
||||
//-------------------------------------------------------------------
|
||||
// [NOTE] About Symbolic link cache
|
||||
// The Stats cache class now also has a symbolic link cache.
|
||||
// It is possible to take out the Symbolic link cache in another class,
|
||||
@ -75,102 +80,112 @@ typedef std::map<std::string, symlink_cache_entry*> symlink_cache_t;
|
||||
//
|
||||
class StatCache
|
||||
{
|
||||
private:
|
||||
static StatCache singleton;
|
||||
static pthread_mutex_t stat_cache_lock;
|
||||
stat_cache_t stat_cache;
|
||||
bool IsExpireTime;
|
||||
bool IsExpireIntervalType; // if this flag is true, cache data is updated at last access time.
|
||||
time_t ExpireTime;
|
||||
unsigned long CacheSize;
|
||||
bool IsCacheNoObject;
|
||||
symlink_cache_t symlink_cache;
|
||||
private:
|
||||
static StatCache singleton;
|
||||
static pthread_mutex_t stat_cache_lock;
|
||||
stat_cache_t stat_cache;
|
||||
bool IsExpireTime;
|
||||
bool IsExpireIntervalType; // if this flag is true, cache data is updated at last access time.
|
||||
time_t ExpireTime;
|
||||
unsigned long CacheSize;
|
||||
bool IsCacheNoObject;
|
||||
symlink_cache_t symlink_cache;
|
||||
|
||||
private:
|
||||
StatCache();
|
||||
~StatCache();
|
||||
private:
|
||||
StatCache();
|
||||
~StatCache();
|
||||
|
||||
void Clear(void);
|
||||
bool GetStat(const std::string& key, struct stat* pst, headers_t* meta, bool overcheck, const char* petag, bool* pisforce);
|
||||
// Truncate stat cache
|
||||
bool TruncateCache(void);
|
||||
// Truncate symbolic link cache
|
||||
bool TruncateSymlink(void);
|
||||
void Clear(void);
|
||||
bool GetStat(const std::string& key, struct stat* pst, headers_t* meta, bool overcheck, const char* petag, bool* pisforce);
|
||||
// Truncate stat cache
|
||||
bool TruncateCache(void);
|
||||
// Truncate symbolic link cache
|
||||
bool TruncateSymlink(void);
|
||||
|
||||
public:
|
||||
// Reference singleton
|
||||
static StatCache* getStatCacheData(void) {
|
||||
return &singleton;
|
||||
}
|
||||
public:
|
||||
// Reference singleton
|
||||
static StatCache* getStatCacheData(void)
|
||||
{
|
||||
return &singleton;
|
||||
}
|
||||
|
||||
// Attribute
|
||||
unsigned long GetCacheSize(void) const;
|
||||
unsigned long SetCacheSize(unsigned long size);
|
||||
time_t GetExpireTime(void) const;
|
||||
time_t SetExpireTime(time_t expire, bool is_interval = false);
|
||||
time_t UnsetExpireTime(void);
|
||||
bool SetCacheNoObject(bool flag);
|
||||
bool EnableCacheNoObject(void) {
|
||||
return SetCacheNoObject(true);
|
||||
}
|
||||
bool DisableCacheNoObject(void) {
|
||||
return SetCacheNoObject(false);
|
||||
}
|
||||
bool GetCacheNoObject(void) const {
|
||||
return IsCacheNoObject;
|
||||
}
|
||||
// Attribute
|
||||
unsigned long GetCacheSize(void) const;
|
||||
unsigned long SetCacheSize(unsigned long size);
|
||||
time_t GetExpireTime(void) const;
|
||||
time_t SetExpireTime(time_t expire, bool is_interval = false);
|
||||
time_t UnsetExpireTime(void);
|
||||
bool SetCacheNoObject(bool flag);
|
||||
bool EnableCacheNoObject(void)
|
||||
{
|
||||
return SetCacheNoObject(true);
|
||||
}
|
||||
bool DisableCacheNoObject(void)
|
||||
{
|
||||
return SetCacheNoObject(false);
|
||||
}
|
||||
bool GetCacheNoObject(void) const
|
||||
{
|
||||
return IsCacheNoObject;
|
||||
}
|
||||
|
||||
// Get stat cache
|
||||
bool GetStat(const std::string& key, struct stat* pst, headers_t* meta, bool overcheck = true, bool* pisforce = NULL) {
|
||||
return GetStat(key, pst, meta, overcheck, NULL, pisforce);
|
||||
}
|
||||
bool GetStat(const std::string& key, struct stat* pst, bool overcheck = true) {
|
||||
return GetStat(key, pst, NULL, overcheck, NULL, NULL);
|
||||
}
|
||||
bool GetStat(const std::string& key, headers_t* meta, bool overcheck = true) {
|
||||
return GetStat(key, NULL, meta, overcheck, NULL, NULL);
|
||||
}
|
||||
bool HasStat(const std::string& key, bool overcheck = true) {
|
||||
return GetStat(key, NULL, NULL, overcheck, NULL, NULL);
|
||||
}
|
||||
bool HasStat(const std::string& key, const char* etag, bool overcheck = true) {
|
||||
return GetStat(key, NULL, NULL, overcheck, etag, NULL);
|
||||
}
|
||||
// Get stat cache
|
||||
bool GetStat(const std::string& key, struct stat* pst, headers_t* meta, bool overcheck = true, bool* pisforce = NULL)
|
||||
{
|
||||
return GetStat(key, pst, meta, overcheck, NULL, pisforce);
|
||||
}
|
||||
bool GetStat(const std::string& key, struct stat* pst, bool overcheck = true)
|
||||
{
|
||||
return GetStat(key, pst, NULL, overcheck, NULL, NULL);
|
||||
}
|
||||
bool GetStat(const std::string& key, headers_t* meta, bool overcheck = true)
|
||||
{
|
||||
return GetStat(key, NULL, meta, overcheck, NULL, NULL);
|
||||
}
|
||||
bool HasStat(const std::string& key, bool overcheck = true)
|
||||
{
|
||||
return GetStat(key, NULL, NULL, overcheck, NULL, NULL);
|
||||
}
|
||||
bool HasStat(const std::string& key, const char* etag, bool overcheck = true)
|
||||
{
|
||||
return GetStat(key, NULL, NULL, overcheck, etag, NULL);
|
||||
}
|
||||
|
||||
// Cache For no object
|
||||
bool IsNoObjectCache(const std::string& key, bool overcheck = true);
|
||||
bool AddNoObjectCache(const std::string& key);
|
||||
// Cache For no object
|
||||
bool IsNoObjectCache(const std::string& key, bool overcheck = true);
|
||||
bool AddNoObjectCache(const std::string& key);
|
||||
|
||||
// Add stat cache
|
||||
bool AddStat(const std::string& key, headers_t& meta, bool forcedir = false, bool no_truncate = false);
|
||||
// Add stat cache
|
||||
bool AddStat(const std::string& key, headers_t& meta, bool forcedir = false, bool no_truncate = false);
|
||||
|
||||
// Change no truncate flag
|
||||
void ChangeNoTruncateFlag(const std::string& key, bool no_truncate);
|
||||
// Change no truncate flag
|
||||
void ChangeNoTruncateFlag(const std::string& key, bool no_truncate);
|
||||
|
||||
// Delete stat cache
|
||||
bool DelStat(const char* key, bool lock_already_held = false);
|
||||
bool DelStat(std::string& key, bool lock_already_held = false) {
|
||||
return DelStat(key.c_str(), lock_already_held);
|
||||
}
|
||||
// Delete stat cache
|
||||
bool DelStat(const char* key, bool lock_already_held = false);
|
||||
bool DelStat(std::string& key, bool lock_already_held = false)
|
||||
{
|
||||
return DelStat(key.c_str(), lock_already_held);
|
||||
}
|
||||
|
||||
// Cache for symbolic link
|
||||
bool GetSymlink(const std::string& key, std::string& value);
|
||||
bool AddSymlink(const std::string& key, const std::string& value);
|
||||
bool DelSymlink(const char* key, bool lock_already_held = false);
|
||||
// Cache for symbolic link
|
||||
bool GetSymlink(const std::string& key, std::string& value);
|
||||
bool AddSymlink(const std::string& key, const std::string& value);
|
||||
bool DelSymlink(const char* key, bool lock_already_held = false);
|
||||
};
|
||||
|
||||
//
|
||||
//-------------------------------------------------------------------
|
||||
// Functions
|
||||
//
|
||||
//-------------------------------------------------------------------
|
||||
bool convert_header_to_stat(const char* path, const headers_t& meta, struct stat* pst, bool forcedir = false);
|
||||
|
||||
#endif // S3FS_CACHE_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 2
|
||||
* c-basic-offset: 2
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=2 ts=2 fdm=marker
|
||||
* vim<600: expandtab sw=2 ts=2
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
|
180
src/common.h
180
src/common.h
@ -21,192 +21,38 @@
|
||||
#ifndef S3FS_COMMON_H_
|
||||
#define S3FS_COMMON_H_
|
||||
|
||||
#include <stdlib.h>
|
||||
#include "../config.h"
|
||||
#include "types.h"
|
||||
#include "s3fs_logger.h"
|
||||
|
||||
//
|
||||
// Extended attribute
|
||||
//
|
||||
#ifdef HAVE_SYS_EXTATTR_H
|
||||
#include <sys/extattr.h>
|
||||
#elif HAVE_ATTR_XATTR_H
|
||||
#include <attr/xattr.h>
|
||||
#elif HAVE_SYS_XATTR_H
|
||||
#include <sys/xattr.h>
|
||||
#endif
|
||||
|
||||
//
|
||||
// Macro
|
||||
//
|
||||
static inline const char *SAFESTRPTR(const char *strptr) { return strptr ? strptr : ""; }
|
||||
|
||||
//
|
||||
// Debug level
|
||||
//
|
||||
enum s3fs_log_level{
|
||||
S3FS_LOG_CRIT = 0, // LOG_CRIT
|
||||
S3FS_LOG_ERR = 1, // LOG_ERR
|
||||
S3FS_LOG_WARN = 3, // LOG_WARNING
|
||||
S3FS_LOG_INFO = 7, // LOG_INFO
|
||||
S3FS_LOG_DBG = 15 // LOG_DEBUG
|
||||
};
|
||||
|
||||
//
|
||||
// Debug macros
|
||||
//
|
||||
#define IS_S3FS_LOG_CRIT() (S3FS_LOG_CRIT == debug_level)
|
||||
#define IS_S3FS_LOG_ERR() (S3FS_LOG_ERR == (debug_level & S3FS_LOG_DBG))
|
||||
#define IS_S3FS_LOG_WARN() (S3FS_LOG_WARN == (debug_level & S3FS_LOG_DBG))
|
||||
#define IS_S3FS_LOG_INFO() (S3FS_LOG_INFO == (debug_level & S3FS_LOG_DBG))
|
||||
#define IS_S3FS_LOG_DBG() (S3FS_LOG_DBG == (debug_level & S3FS_LOG_DBG))
|
||||
|
||||
#define S3FS_LOG_LEVEL_TO_SYSLOG(level) \
|
||||
( S3FS_LOG_DBG == (level & S3FS_LOG_DBG) ? LOG_DEBUG : \
|
||||
S3FS_LOG_INFO == (level & S3FS_LOG_DBG) ? LOG_INFO : \
|
||||
S3FS_LOG_WARN == (level & S3FS_LOG_DBG) ? LOG_WARNING : \
|
||||
S3FS_LOG_ERR == (level & S3FS_LOG_DBG) ? LOG_ERR : LOG_CRIT )
|
||||
|
||||
#define S3FS_LOG_LEVEL_STRING(level) \
|
||||
( S3FS_LOG_DBG == (level & S3FS_LOG_DBG) ? "[DBG] " : \
|
||||
S3FS_LOG_INFO == (level & S3FS_LOG_DBG) ? "[INF] " : \
|
||||
S3FS_LOG_WARN == (level & S3FS_LOG_DBG) ? "[WAN] " : \
|
||||
S3FS_LOG_ERR == (level & S3FS_LOG_DBG) ? "[ERR] " : "[CRT] " )
|
||||
|
||||
#define S3FS_LOG_NEST_MAX 4
|
||||
#define S3FS_LOG_NEST(nest) (nest < S3FS_LOG_NEST_MAX ? s3fs_log_nest[nest] : s3fs_log_nest[S3FS_LOG_NEST_MAX - 1])
|
||||
|
||||
#define S3FS_LOW_LOGPRN(level, fmt, ...) \
|
||||
do{ \
|
||||
if(S3FS_LOG_CRIT == level || (S3FS_LOG_CRIT != debug_level && level == (debug_level & level))){ \
|
||||
if(foreground){ \
|
||||
fprintf(stdout, "%s%s:%s(%d): " fmt "%s\n", S3FS_LOG_LEVEL_STRING(level), __FILE__, __func__, __LINE__, __VA_ARGS__); \
|
||||
}else{ \
|
||||
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(level), "%s%s:%s(%d): " fmt "%s", instance_name.c_str(), __FILE__, __func__, __LINE__, __VA_ARGS__); \
|
||||
} \
|
||||
} \
|
||||
}while(0)
|
||||
|
||||
#define S3FS_LOW_LOGPRN2(level, nest, fmt, ...) \
|
||||
do{ \
|
||||
if(S3FS_LOG_CRIT == level || (S3FS_LOG_CRIT != debug_level && level == (debug_level & level))){ \
|
||||
if(foreground){ \
|
||||
fprintf(stdout, "%s%s%s:%s(%d): " fmt "%s\n", S3FS_LOG_LEVEL_STRING(level), S3FS_LOG_NEST(nest), __FILE__, __func__, __LINE__, __VA_ARGS__); \
|
||||
}else{ \
|
||||
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(level), "%s%s" fmt "%s", instance_name.c_str(), S3FS_LOG_NEST(nest), __VA_ARGS__); \
|
||||
} \
|
||||
} \
|
||||
}while(0)
|
||||
|
||||
#define S3FS_LOW_CURLDBG(fmt, ...) \
|
||||
do{ \
|
||||
if(foreground){ \
|
||||
fprintf(stdout, "[CURL DBG] " fmt "%s\n", __VA_ARGS__); \
|
||||
}else{ \
|
||||
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(S3FS_LOG_CRIT), "%s" fmt "%s", instance_name.c_str(), __VA_ARGS__); \
|
||||
} \
|
||||
}while(0)
|
||||
|
||||
#define S3FS_LOW_LOGPRN_EXIT(fmt, ...) \
|
||||
do{ \
|
||||
if(foreground){ \
|
||||
fprintf(stderr, "s3fs: " fmt "%s\n", __VA_ARGS__); \
|
||||
}else{ \
|
||||
fprintf(stderr, "s3fs: " fmt "%s\n", __VA_ARGS__); \
|
||||
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(S3FS_LOG_CRIT), "%ss3fs: " fmt "%s", instance_name.c_str(), __VA_ARGS__); \
|
||||
} \
|
||||
}while(0)
|
||||
|
||||
// Special macro for init message
|
||||
#define S3FS_PRN_INIT_INFO(fmt, ...) \
|
||||
do{ \
|
||||
if(foreground){ \
|
||||
fprintf(stdout, "%s%s%s:%s(%d): " fmt "%s\n", S3FS_LOG_LEVEL_STRING(S3FS_LOG_INFO), S3FS_LOG_NEST(0), __FILE__, __func__, __LINE__, __VA_ARGS__, ""); \
|
||||
}else{ \
|
||||
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(S3FS_LOG_INFO), "%s%s" fmt "%s", instance_name.c_str(), S3FS_LOG_NEST(0), __VA_ARGS__, ""); \
|
||||
} \
|
||||
}while(0)
|
||||
|
||||
// Special macro for checking cache files
|
||||
#define S3FS_LOW_CACHE(fp, fmt, ...) \
|
||||
do{ \
|
||||
if(foreground){ \
|
||||
fprintf(fp, fmt "%s\n", __VA_ARGS__); \
|
||||
}else{ \
|
||||
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(S3FS_LOG_INFO), "%s: " fmt "%s", instance_name.c_str(), __VA_ARGS__); \
|
||||
} \
|
||||
}while(0)
|
||||
|
||||
// [NOTE]
|
||||
// small trick for VA_ARGS
|
||||
//
|
||||
#define S3FS_PRN_EXIT(fmt, ...) S3FS_LOW_LOGPRN_EXIT(fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_CRIT(fmt, ...) S3FS_LOW_LOGPRN(S3FS_LOG_CRIT, fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_ERR(fmt, ...) S3FS_LOW_LOGPRN(S3FS_LOG_ERR, fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_WARN(fmt, ...) S3FS_LOW_LOGPRN(S3FS_LOG_WARN, fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_DBG(fmt, ...) S3FS_LOW_LOGPRN(S3FS_LOG_DBG, fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_INFO(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_INFO, 0, fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_INFO0(fmt, ...) S3FS_LOG_INFO(fmt, __VA_ARGS__)
|
||||
#define S3FS_PRN_INFO1(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_INFO, 1, fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_INFO2(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_INFO, 2, fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_INFO3(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_INFO, 3, fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_CURL(fmt, ...) S3FS_LOW_CURLDBG(fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_CACHE(fp, ...) S3FS_LOW_CACHE(fp, ##__VA_ARGS__, "")
|
||||
|
||||
//
|
||||
// Typedef
|
||||
//
|
||||
struct header_nocase_cmp : public std::binary_function<std::string, std::string, bool>{
|
||||
bool operator()(const std::string &strleft, const std::string &strright) const
|
||||
{
|
||||
return (strcasecmp(strleft.c_str(), strright.c_str()) < 0);
|
||||
}
|
||||
};
|
||||
typedef std::map<std::string, std::string, header_nocase_cmp> headers_t;
|
||||
|
||||
//
|
||||
// Header "x-amz-meta-xattr" is for extended attributes.
|
||||
// This header is url encoded string which is json formatted.
|
||||
// x-amz-meta-xattr:urlencode({"xattr-1":"base64(value-1)","xattr-2":"base64(value-2)","xattr-3":"base64(value-3)"})
|
||||
//
|
||||
typedef struct xattr_value{
|
||||
unsigned char* pvalue;
|
||||
size_t length;
|
||||
|
||||
explicit xattr_value(unsigned char* pval = NULL, size_t len = 0) : pvalue(pval), length(len) {}
|
||||
~xattr_value()
|
||||
{
|
||||
delete[] pvalue;
|
||||
}
|
||||
}XATTRVAL, *PXATTRVAL;
|
||||
|
||||
typedef std::map<std::string, PXATTRVAL> xattrs_t;
|
||||
|
||||
//
|
||||
//-------------------------------------------------------------------
|
||||
// Global variables
|
||||
//
|
||||
//-------------------------------------------------------------------
|
||||
// TODO: namespace these
|
||||
extern int64_t FIVE_GB;
|
||||
extern off_t MIN_MULTIPART_SIZE;
|
||||
extern bool foreground;
|
||||
extern bool nomultipart;
|
||||
extern bool pathrequeststyle;
|
||||
extern bool complement_stat;
|
||||
extern bool noxmlns;
|
||||
extern std::string program_name;
|
||||
extern std::string service_path;
|
||||
extern std::string host;
|
||||
extern std::string s3host;
|
||||
extern std::string bucket;
|
||||
extern std::string mount_prefix;
|
||||
extern std::string endpoint;
|
||||
extern std::string cipher_suites;
|
||||
extern std::string instance_name;
|
||||
extern s3fs_log_level debug_level;
|
||||
extern const char* s3fs_log_nest[S3FS_LOG_NEST_MAX];
|
||||
extern std::string aws_profile;
|
||||
|
||||
#endif // S3FS_COMMON_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 2
|
||||
* c-basic-offset: 2
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=2 ts=2 fdm=marker
|
||||
* vim<600: expandtab sw=2 ts=2
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
|
@ -24,6 +24,8 @@
|
||||
#include <cstring>
|
||||
#include <string>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "s3fs_auth.h"
|
||||
#include "string_util.h"
|
||||
|
||||
@ -34,63 +36,63 @@ using namespace std;
|
||||
//-------------------------------------------------------------------
|
||||
string s3fs_get_content_md5(int fd)
|
||||
{
|
||||
unsigned char* md5hex;
|
||||
char* base64;
|
||||
string Signature;
|
||||
unsigned char* md5hex;
|
||||
char* base64;
|
||||
string Signature;
|
||||
|
||||
if(NULL == (md5hex = s3fs_md5hexsum(fd, 0, -1))){
|
||||
return string("");
|
||||
}
|
||||
if(NULL == (base64 = s3fs_base64(md5hex, get_md5_digest_length()))){
|
||||
return string(""); // ENOMEM
|
||||
}
|
||||
delete[] md5hex;
|
||||
if(NULL == (md5hex = s3fs_md5hexsum(fd, 0, -1))){
|
||||
return string("");
|
||||
}
|
||||
if(NULL == (base64 = s3fs_base64(md5hex, get_md5_digest_length()))){
|
||||
return string(""); // ENOMEM
|
||||
}
|
||||
delete[] md5hex;
|
||||
|
||||
Signature = base64;
|
||||
delete[] base64;
|
||||
Signature = base64;
|
||||
delete[] base64;
|
||||
|
||||
return Signature;
|
||||
return Signature;
|
||||
}
|
||||
|
||||
string s3fs_md5sum(int fd, off_t start, ssize_t size)
|
||||
{
|
||||
size_t digestlen = get_md5_digest_length();
|
||||
unsigned char* md5hex;
|
||||
size_t digestlen = get_md5_digest_length();
|
||||
unsigned char* md5hex;
|
||||
|
||||
if(NULL == (md5hex = s3fs_md5hexsum(fd, start, size))){
|
||||
return string("");
|
||||
}
|
||||
if(NULL == (md5hex = s3fs_md5hexsum(fd, start, size))){
|
||||
return string("");
|
||||
}
|
||||
|
||||
std::string md5 = s3fs_hex(md5hex, digestlen);
|
||||
delete[] md5hex;
|
||||
std::string md5 = s3fs_hex(md5hex, digestlen);
|
||||
delete[] md5hex;
|
||||
|
||||
return md5;
|
||||
return md5;
|
||||
}
|
||||
|
||||
string s3fs_sha256sum(int fd, off_t start, ssize_t size)
|
||||
{
|
||||
size_t digestlen = get_sha256_digest_length();
|
||||
char sha256[2 * digestlen + 1];
|
||||
unsigned char* sha256hex;
|
||||
size_t digestlen = get_sha256_digest_length();
|
||||
char sha256[2 * digestlen + 1];
|
||||
unsigned char* sha256hex;
|
||||
|
||||
if(NULL == (sha256hex = s3fs_sha256hexsum(fd, start, size))){
|
||||
return string("");
|
||||
}
|
||||
if(NULL == (sha256hex = s3fs_sha256hexsum(fd, start, size))){
|
||||
return string("");
|
||||
}
|
||||
|
||||
memset(sha256, 0, 2 * digestlen + 1);
|
||||
for(size_t pos = 0; pos < digestlen; pos++){
|
||||
snprintf(sha256 + 2 * pos, 3, "%02x", sha256hex[pos]);
|
||||
}
|
||||
delete[] sha256hex;
|
||||
memset(sha256, 0, 2 * digestlen + 1);
|
||||
for(size_t pos = 0; pos < digestlen; pos++){
|
||||
snprintf(sha256 + 2 * pos, 3, "%02x", sha256hex[pos]);
|
||||
}
|
||||
delete[] sha256hex;
|
||||
|
||||
return string(sha256);
|
||||
return string(sha256);
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 2
|
||||
* c-basic-offset: 2
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=2 ts=2 fdm=marker
|
||||
* vim<600: expandtab sw=2 ts=2
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
|
6794
src/curl.cpp
6794
src/curl.cpp
File diff suppressed because it is too large
Load Diff
788
src/curl.h
788
src/curl.h
@ -21,10 +21,13 @@
|
||||
#ifndef S3FS_CURL_H_
|
||||
#define S3FS_CURL_H_
|
||||
|
||||
#include <cassert>
|
||||
#include <curl/curl.h>
|
||||
|
||||
#include "curl_handlerpool.h"
|
||||
#include "bodydata.h"
|
||||
#include "psemaphore.h"
|
||||
#include "types.h"
|
||||
#include "metaheader.h"
|
||||
#include "fdcache_page.h"
|
||||
|
||||
//----------------------------------------------
|
||||
// Avoid dependency on libcurl version
|
||||
@ -42,153 +45,33 @@
|
||||
// a message is output.
|
||||
//
|
||||
#if defined(HAVE_CURLOPT_TCP_KEEPALIVE) && (HAVE_CURLOPT_TCP_KEEPALIVE == 1)
|
||||
#define S3FS_CURLOPT_TCP_KEEPALIVE CURLOPT_TCP_KEEPALIVE
|
||||
#define S3FS_CURLOPT_TCP_KEEPALIVE CURLOPT_TCP_KEEPALIVE
|
||||
#else
|
||||
#define S3FS_CURLOPT_TCP_KEEPALIVE static_cast<CURLoption>(213)
|
||||
#define S3FS_CURLOPT_TCP_KEEPALIVE static_cast<CURLoption>(213)
|
||||
#endif
|
||||
|
||||
#if defined(HAVE_CURLOPT_SSL_ENABLE_ALPN) && (HAVE_CURLOPT_SSL_ENABLE_ALPN == 1)
|
||||
#define S3FS_CURLOPT_SSL_ENABLE_ALPN CURLOPT_SSL_ENABLE_ALPN
|
||||
#define S3FS_CURLOPT_SSL_ENABLE_ALPN CURLOPT_SSL_ENABLE_ALPN
|
||||
#else
|
||||
#define S3FS_CURLOPT_SSL_ENABLE_ALPN static_cast<CURLoption>(226)
|
||||
#define S3FS_CURLOPT_SSL_ENABLE_ALPN static_cast<CURLoption>(226)
|
||||
#endif
|
||||
|
||||
#if defined(HAVE_CURLOPT_KEEP_SENDING_ON_ERROR) && (HAVE_CURLOPT_KEEP_SENDING_ON_ERROR == 1)
|
||||
#define S3FS_CURLOPT_KEEP_SENDING_ON_ERROR CURLOPT_KEEP_SENDING_ON_ERROR
|
||||
#define S3FS_CURLOPT_KEEP_SENDING_ON_ERROR CURLOPT_KEEP_SENDING_ON_ERROR
|
||||
#else
|
||||
#define S3FS_CURLOPT_KEEP_SENDING_ON_ERROR static_cast<CURLoption>(245)
|
||||
#define S3FS_CURLOPT_KEEP_SENDING_ON_ERROR static_cast<CURLoption>(245)
|
||||
#endif
|
||||
|
||||
//----------------------------------------------
|
||||
// Symbols
|
||||
// Structure / Typedefs
|
||||
//----------------------------------------------
|
||||
static const int MIN_MULTIPART_SIZE = 5 * 1024 * 1024;
|
||||
|
||||
//----------------------------------------------
|
||||
// class BodyData
|
||||
//----------------------------------------------
|
||||
// memory class for curl write memory callback
|
||||
//
|
||||
class BodyData
|
||||
{
|
||||
private:
|
||||
char* text;
|
||||
size_t lastpos;
|
||||
size_t bufsize;
|
||||
|
||||
private:
|
||||
bool IsSafeSize(size_t addbytes) const {
|
||||
return ((lastpos + addbytes + 1) > bufsize ? false : true);
|
||||
}
|
||||
bool Resize(size_t addbytes);
|
||||
|
||||
public:
|
||||
BodyData() : text(NULL), lastpos(0), bufsize(0) {}
|
||||
~BodyData() {
|
||||
Clear();
|
||||
}
|
||||
|
||||
void Clear(void);
|
||||
bool Append(void* ptr, size_t bytes);
|
||||
bool Append(void* ptr, size_t blockSize, size_t numBlocks) {
|
||||
return Append(ptr, (blockSize * numBlocks));
|
||||
}
|
||||
const char* str() const;
|
||||
size_t size() const {
|
||||
return lastpos;
|
||||
}
|
||||
};
|
||||
|
||||
//----------------------------------------------
|
||||
// Utility structs & typedefs
|
||||
//----------------------------------------------
|
||||
typedef std::vector<std::string> etaglist_t;
|
||||
|
||||
// Each part information for Multipart upload
|
||||
struct filepart
|
||||
{
|
||||
bool uploaded; // does finish uploading
|
||||
std::string etag; // expected etag value
|
||||
int fd; // base file(temporary full file) descriptor
|
||||
off_t startpos; // seek fd point for uploading
|
||||
off_t size; // uploading size
|
||||
etaglist_t* etaglist; // use only parallel upload
|
||||
int etagpos; // use only parallel upload
|
||||
|
||||
filepart() : uploaded(false), fd(-1), startpos(0), size(-1), etaglist(NULL), etagpos(-1) {}
|
||||
~filepart()
|
||||
{
|
||||
clear();
|
||||
}
|
||||
|
||||
void clear(void)
|
||||
{
|
||||
uploaded = false;
|
||||
etag = "";
|
||||
fd = -1;
|
||||
startpos = 0;
|
||||
size = -1;
|
||||
etaglist = NULL;
|
||||
etagpos = - 1;
|
||||
}
|
||||
|
||||
void add_etag_list(etaglist_t* list)
|
||||
{
|
||||
if(list){
|
||||
list->push_back(std::string(""));
|
||||
etaglist = list;
|
||||
etagpos = list->size() - 1;
|
||||
}else{
|
||||
etaglist = NULL;
|
||||
etagpos = - 1;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// for progress
|
||||
struct case_insensitive_compare_func
|
||||
{
|
||||
bool operator()(const std::string& a, const std::string& b) const {
|
||||
return strcasecmp(a.c_str(), b.c_str()) < 0;
|
||||
}
|
||||
};
|
||||
typedef std::map<std::string, std::string, case_insensitive_compare_func> mimes_t;
|
||||
typedef std::pair<double, double> progress_t;
|
||||
typedef std::map<CURL*, time_t> curltime_t;
|
||||
typedef std::map<CURL*, progress_t> curlprogress_t;
|
||||
|
||||
class S3fsMultiCurl;
|
||||
|
||||
//----------------------------------------------
|
||||
// class CurlHandlerPool
|
||||
//----------------------------------------------
|
||||
typedef std::list<CURL*> hcurllist_t;
|
||||
|
||||
class CurlHandlerPool
|
||||
{
|
||||
public:
|
||||
explicit CurlHandlerPool(int maxHandlers) : mMaxHandlers(maxHandlers)
|
||||
{
|
||||
assert(maxHandlers > 0);
|
||||
}
|
||||
|
||||
bool Init();
|
||||
bool Destroy();
|
||||
|
||||
CURL* GetHandler(bool only_pool);
|
||||
void ReturnHandler(CURL* hCurl, bool restore_pool);
|
||||
|
||||
private:
|
||||
int mMaxHandlers;
|
||||
pthread_mutex_t mLock;
|
||||
hcurllist_t mPool;
|
||||
};
|
||||
|
||||
//----------------------------------------------
|
||||
// class S3fsCurl
|
||||
//----------------------------------------------
|
||||
#include "fdcache.h" // for fdpage_list_t
|
||||
|
||||
class S3fsCurl;
|
||||
|
||||
// Prototype function for lazy setup options for curl handle
|
||||
@ -202,377 +85,326 @@ typedef std::list<sseckeymap_t> sseckeylist_t;
|
||||
//
|
||||
class S3fsCurl
|
||||
{
|
||||
friend class S3fsMultiCurl;
|
||||
friend class S3fsMultiCurl;
|
||||
|
||||
private:
|
||||
enum REQTYPE {
|
||||
REQTYPE_UNSET = -1,
|
||||
REQTYPE_DELETE = 0,
|
||||
REQTYPE_HEAD,
|
||||
REQTYPE_PUTHEAD,
|
||||
REQTYPE_PUT,
|
||||
REQTYPE_GET,
|
||||
REQTYPE_CHKBUCKET,
|
||||
REQTYPE_LISTBUCKET,
|
||||
REQTYPE_PREMULTIPOST,
|
||||
REQTYPE_COMPLETEMULTIPOST,
|
||||
REQTYPE_UPLOADMULTIPOST,
|
||||
REQTYPE_COPYMULTIPOST,
|
||||
REQTYPE_MULTILIST,
|
||||
REQTYPE_IAMCRED,
|
||||
REQTYPE_ABORTMULTIUPLOAD,
|
||||
REQTYPE_IAMROLE
|
||||
};
|
||||
private:
|
||||
enum REQTYPE {
|
||||
REQTYPE_UNSET = -1,
|
||||
REQTYPE_DELETE = 0,
|
||||
REQTYPE_HEAD,
|
||||
REQTYPE_PUTHEAD,
|
||||
REQTYPE_PUT,
|
||||
REQTYPE_GET,
|
||||
REQTYPE_CHKBUCKET,
|
||||
REQTYPE_LISTBUCKET,
|
||||
REQTYPE_PREMULTIPOST,
|
||||
REQTYPE_COMPLETEMULTIPOST,
|
||||
REQTYPE_UPLOADMULTIPOST,
|
||||
REQTYPE_COPYMULTIPOST,
|
||||
REQTYPE_MULTILIST,
|
||||
REQTYPE_IAMCRED,
|
||||
REQTYPE_ABORTMULTIUPLOAD,
|
||||
REQTYPE_IAMROLE
|
||||
};
|
||||
|
||||
// class variables
|
||||
static pthread_mutex_t curl_handles_lock;
|
||||
static struct callback_locks_t {
|
||||
pthread_mutex_t dns;
|
||||
pthread_mutex_t ssl_session;
|
||||
} callback_locks;
|
||||
static bool is_initglobal_done;
|
||||
static CurlHandlerPool* sCurlPool;
|
||||
static int sCurlPoolSize;
|
||||
static CURLSH* hCurlShare;
|
||||
static bool is_cert_check;
|
||||
static bool is_dns_cache;
|
||||
static bool is_ssl_session_cache;
|
||||
static long connect_timeout;
|
||||
static time_t readwrite_timeout;
|
||||
static int retries;
|
||||
static bool is_public_bucket;
|
||||
static acl_t default_acl;
|
||||
static storage_class_t storage_class;
|
||||
static sseckeylist_t sseckeys;
|
||||
static std::string ssekmsid;
|
||||
static sse_type_t ssetype;
|
||||
static bool is_content_md5;
|
||||
static bool is_verbose;
|
||||
static bool is_dump_body;
|
||||
static std::string AWSAccessKeyId;
|
||||
static std::string AWSSecretAccessKey;
|
||||
static std::string AWSAccessToken;
|
||||
static time_t AWSAccessTokenExpire;
|
||||
static bool is_ecs;
|
||||
static bool is_use_session_token;
|
||||
static bool is_ibm_iam_auth;
|
||||
static std::string IAM_cred_url;
|
||||
static size_t IAM_field_count;
|
||||
static std::string IAM_token_field;
|
||||
static std::string IAM_expiry_field;
|
||||
static std::string IAM_role;
|
||||
static long ssl_verify_hostname;
|
||||
static curltime_t curl_times;
|
||||
static curlprogress_t curl_progress;
|
||||
static std::string curl_ca_bundle;
|
||||
static mimes_t mimeTypes;
|
||||
static std::string userAgent;
|
||||
static int max_parallel_cnt;
|
||||
static int max_multireq;
|
||||
static off_t multipart_size;
|
||||
static bool is_sigv4;
|
||||
static bool is_ua; // User-Agent
|
||||
static bool requester_pays;
|
||||
// class variables
|
||||
static pthread_mutex_t curl_handles_lock;
|
||||
static struct callback_locks_t {
|
||||
pthread_mutex_t dns;
|
||||
pthread_mutex_t ssl_session;
|
||||
} callback_locks;
|
||||
static bool is_initglobal_done;
|
||||
static CurlHandlerPool* sCurlPool;
|
||||
static int sCurlPoolSize;
|
||||
static CURLSH* hCurlShare;
|
||||
static bool is_cert_check;
|
||||
static bool is_dns_cache;
|
||||
static bool is_ssl_session_cache;
|
||||
static long connect_timeout;
|
||||
static time_t readwrite_timeout;
|
||||
static int retries;
|
||||
static bool is_public_bucket;
|
||||
static acl_t default_acl;
|
||||
static storage_class_t storage_class;
|
||||
static sseckeylist_t sseckeys;
|
||||
static std::string ssekmsid;
|
||||
static sse_type_t ssetype;
|
||||
static bool is_content_md5;
|
||||
static bool is_verbose;
|
||||
static bool is_dump_body;
|
||||
static std::string AWSAccessKeyId;
|
||||
static std::string AWSSecretAccessKey;
|
||||
static std::string AWSAccessToken;
|
||||
static time_t AWSAccessTokenExpire;
|
||||
static bool is_ecs;
|
||||
static bool is_use_session_token;
|
||||
static bool is_ibm_iam_auth;
|
||||
static std::string IAM_cred_url;
|
||||
static size_t IAM_field_count;
|
||||
static std::string IAM_token_field;
|
||||
static std::string IAM_expiry_field;
|
||||
static std::string IAM_role;
|
||||
static long ssl_verify_hostname;
|
||||
static curltime_t curl_times;
|
||||
static curlprogress_t curl_progress;
|
||||
static std::string curl_ca_bundle;
|
||||
static mimes_t mimeTypes;
|
||||
static std::string userAgent;
|
||||
static int max_parallel_cnt;
|
||||
static int max_multireq;
|
||||
static off_t multipart_size;
|
||||
static bool is_sigv4;
|
||||
static bool is_ua; // User-Agent
|
||||
static bool requester_pays;
|
||||
|
||||
// variables
|
||||
CURL* hCurl;
|
||||
REQTYPE type; // type of request
|
||||
std::string path; // target object path
|
||||
std::string base_path; // base path (for multi curl head request)
|
||||
std::string saved_path; // saved path = cache key (for multi curl head request)
|
||||
std::string url; // target object path(url)
|
||||
struct curl_slist* requestHeaders;
|
||||
headers_t responseHeaders; // header data by HeaderCallback
|
||||
BodyData bodydata; // body data by WriteMemoryCallback
|
||||
BodyData headdata; // header data by WriteMemoryCallback
|
||||
volatile long LastResponseCode;
|
||||
const unsigned char* postdata; // use by post method and read callback function.
|
||||
int postdata_remaining; // use by post method and read callback function.
|
||||
filepart partdata; // use by multipart upload/get object callback
|
||||
bool is_use_ahbe; // additional header by extension
|
||||
int retry_count; // retry count for multipart
|
||||
FILE* b_infile; // backup for retrying
|
||||
const unsigned char* b_postdata; // backup for retrying
|
||||
int b_postdata_remaining; // backup for retrying
|
||||
off_t b_partdata_startpos; // backup for retrying
|
||||
ssize_t b_partdata_size; // backup for retrying
|
||||
int b_ssekey_pos; // backup for retrying
|
||||
std::string b_ssevalue; // backup for retrying
|
||||
sse_type_t b_ssetype; // backup for retrying
|
||||
std::string b_from; // backup for retrying(for copy request)
|
||||
headers_t b_meta; // backup for retrying(for copy request)
|
||||
std::string op; // the HTTP verb of the request ("PUT", "GET", etc.)
|
||||
std::string query_string; // request query string
|
||||
Semaphore *sem;
|
||||
pthread_mutex_t *completed_tids_lock;
|
||||
std::vector<pthread_t> *completed_tids;
|
||||
s3fscurl_lazy_setup fpLazySetup; // curl options for lazy setting function
|
||||
// variables
|
||||
CURL* hCurl;
|
||||
REQTYPE type; // type of request
|
||||
std::string path; // target object path
|
||||
std::string base_path; // base path (for multi curl head request)
|
||||
std::string saved_path; // saved path = cache key (for multi curl head request)
|
||||
std::string url; // target object path(url)
|
||||
struct curl_slist* requestHeaders;
|
||||
headers_t responseHeaders; // header data by HeaderCallback
|
||||
BodyData bodydata; // body data by WriteMemoryCallback
|
||||
BodyData headdata; // header data by WriteMemoryCallback
|
||||
volatile long LastResponseCode;
|
||||
const unsigned char* postdata; // use by post method and read callback function.
|
||||
int postdata_remaining; // use by post method and read callback function.
|
||||
filepart partdata; // use by multipart upload/get object callback
|
||||
bool is_use_ahbe; // additional header by extension
|
||||
int retry_count; // retry count for multipart
|
||||
FILE* b_infile; // backup for retrying
|
||||
const unsigned char* b_postdata; // backup for retrying
|
||||
int b_postdata_remaining; // backup for retrying
|
||||
off_t b_partdata_startpos; // backup for retrying
|
||||
ssize_t b_partdata_size; // backup for retrying
|
||||
int b_ssekey_pos; // backup for retrying
|
||||
std::string b_ssevalue; // backup for retrying
|
||||
sse_type_t b_ssetype; // backup for retrying
|
||||
std::string b_from; // backup for retrying(for copy request)
|
||||
headers_t b_meta; // backup for retrying(for copy request)
|
||||
std::string op; // the HTTP verb of the request ("PUT", "GET", etc.)
|
||||
std::string query_string; // request query string
|
||||
Semaphore *sem;
|
||||
pthread_mutex_t *completed_tids_lock;
|
||||
std::vector<pthread_t> *completed_tids;
|
||||
s3fscurl_lazy_setup fpLazySetup; // curl options for lazy setting function
|
||||
|
||||
public:
|
||||
// constructor/destructor
|
||||
explicit S3fsCurl(bool ahbe = false);
|
||||
~S3fsCurl();
|
||||
public:
|
||||
static const long S3FSCURL_RESPONSECODE_NOTSET = -1;
|
||||
static const long S3FSCURL_RESPONSECODE_FATAL_ERROR = -2;
|
||||
static const int S3FSCURL_PERFORM_RESULT_NOTSET = 1;
|
||||
|
||||
private:
|
||||
// class methods
|
||||
static bool InitGlobalCurl(void);
|
||||
static bool DestroyGlobalCurl(void);
|
||||
static bool InitShareCurl(void);
|
||||
static bool DestroyShareCurl(void);
|
||||
static void LockCurlShare(CURL* handle, curl_lock_data nLockData, curl_lock_access laccess, void* useptr);
|
||||
static void UnlockCurlShare(CURL* handle, curl_lock_data nLockData, void* useptr);
|
||||
static bool InitCryptMutex(void);
|
||||
static bool DestroyCryptMutex(void);
|
||||
static int CurlProgress(void *clientp, double dltotal, double dlnow, double ultotal, double ulnow);
|
||||
public:
|
||||
// constructor/destructor
|
||||
explicit S3fsCurl(bool ahbe = false);
|
||||
~S3fsCurl();
|
||||
|
||||
static bool LocateBundle(void);
|
||||
static size_t HeaderCallback(void *data, size_t blockSize, size_t numBlocks, void *userPtr);
|
||||
static size_t WriteMemoryCallback(void *ptr, size_t blockSize, size_t numBlocks, void *data);
|
||||
static size_t ReadCallback(void *ptr, size_t size, size_t nmemb, void *userp);
|
||||
static size_t UploadReadCallback(void *ptr, size_t size, size_t nmemb, void *userp);
|
||||
static size_t DownloadWriteCallback(void* ptr, size_t size, size_t nmemb, void* userp);
|
||||
private:
|
||||
// class methods
|
||||
static bool InitGlobalCurl(void);
|
||||
static bool DestroyGlobalCurl(void);
|
||||
static bool InitShareCurl(void);
|
||||
static bool DestroyShareCurl(void);
|
||||
static void LockCurlShare(CURL* handle, curl_lock_data nLockData, curl_lock_access laccess, void* useptr);
|
||||
static void UnlockCurlShare(CURL* handle, curl_lock_data nLockData, void* useptr);
|
||||
static bool InitCryptMutex(void);
|
||||
static bool DestroyCryptMutex(void);
|
||||
static int CurlProgress(void *clientp, double dltotal, double dlnow, double ultotal, double ulnow);
|
||||
|
||||
static bool UploadMultipartPostCallback(S3fsCurl* s3fscurl);
|
||||
static bool CopyMultipartPostCallback(S3fsCurl* s3fscurl);
|
||||
static bool MixMultipartPostCallback(S3fsCurl* s3fscurl);
|
||||
static S3fsCurl* UploadMultipartPostRetryCallback(S3fsCurl* s3fscurl);
|
||||
static S3fsCurl* CopyMultipartPostRetryCallback(S3fsCurl* s3fscurl);
|
||||
static S3fsCurl* MixMultipartPostRetryCallback(S3fsCurl* s3fscurl);
|
||||
static S3fsCurl* ParallelGetObjectRetryCallback(S3fsCurl* s3fscurl);
|
||||
static bool LocateBundle(void);
|
||||
static size_t HeaderCallback(void *data, size_t blockSize, size_t numBlocks, void *userPtr);
|
||||
static size_t WriteMemoryCallback(void *ptr, size_t blockSize, size_t numBlocks, void *data);
|
||||
static size_t ReadCallback(void *ptr, size_t size, size_t nmemb, void *userp);
|
||||
static size_t UploadReadCallback(void *ptr, size_t size, size_t nmemb, void *userp);
|
||||
static size_t DownloadWriteCallback(void* ptr, size_t size, size_t nmemb, void* userp);
|
||||
|
||||
// lazy functions for set curl options
|
||||
static bool UploadMultipartPostSetCurlOpts(S3fsCurl* s3fscurl);
|
||||
static bool CopyMultipartPostSetCurlOpts(S3fsCurl* s3fscurl);
|
||||
static bool PreGetObjectRequestSetCurlOpts(S3fsCurl* s3fscurl);
|
||||
static bool PreHeadRequestSetCurlOpts(S3fsCurl* s3fscurl);
|
||||
static bool UploadMultipartPostCallback(S3fsCurl* s3fscurl);
|
||||
static bool CopyMultipartPostCallback(S3fsCurl* s3fscurl);
|
||||
static bool MixMultipartPostCallback(S3fsCurl* s3fscurl);
|
||||
static S3fsCurl* UploadMultipartPostRetryCallback(S3fsCurl* s3fscurl);
|
||||
static S3fsCurl* CopyMultipartPostRetryCallback(S3fsCurl* s3fscurl);
|
||||
static S3fsCurl* MixMultipartPostRetryCallback(S3fsCurl* s3fscurl);
|
||||
static S3fsCurl* ParallelGetObjectRetryCallback(S3fsCurl* s3fscurl);
|
||||
|
||||
static bool ParseIAMCredentialResponse(const char* response, iamcredmap_t& keyval);
|
||||
static bool SetIAMCredentials(const char* response);
|
||||
static bool ParseIAMRoleFromMetaDataResponse(const char* response, std::string& rolename);
|
||||
static bool SetIAMRoleFromMetaData(const char* response);
|
||||
static bool LoadEnvSseCKeys(void);
|
||||
static bool LoadEnvSseKmsid(void);
|
||||
static bool PushbackSseKeys(std::string& onekey);
|
||||
static bool AddUserAgent(CURL* hCurl);
|
||||
// lazy functions for set curl options
|
||||
static bool UploadMultipartPostSetCurlOpts(S3fsCurl* s3fscurl);
|
||||
static bool CopyMultipartPostSetCurlOpts(S3fsCurl* s3fscurl);
|
||||
static bool PreGetObjectRequestSetCurlOpts(S3fsCurl* s3fscurl);
|
||||
static bool PreHeadRequestSetCurlOpts(S3fsCurl* s3fscurl);
|
||||
|
||||
static int CurlDebugFunc(CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr);
|
||||
static int CurlDebugBodyInFunc(CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr);
|
||||
static int CurlDebugBodyOutFunc(CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr);
|
||||
static int RawCurlDebugFunc(CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr, curl_infotype datatype);
|
||||
static bool ParseIAMCredentialResponse(const char* response, iamcredmap_t& keyval);
|
||||
static bool SetIAMCredentials(const char* response);
|
||||
static bool ParseIAMRoleFromMetaDataResponse(const char* response, std::string& rolename);
|
||||
static bool SetIAMRoleFromMetaData(const char* response);
|
||||
static bool LoadEnvSseCKeys(void);
|
||||
static bool LoadEnvSseKmsid(void);
|
||||
static bool PushbackSseKeys(std::string& onekey);
|
||||
static bool AddUserAgent(CURL* hCurl);
|
||||
|
||||
// methods
|
||||
bool ResetHandle(bool lock_already_held = false);
|
||||
bool RemakeHandle(void);
|
||||
bool ClearInternalData(void);
|
||||
void insertV4Headers();
|
||||
void insertV2Headers();
|
||||
void insertIBMIAMHeaders();
|
||||
void insertAuthHeaders();
|
||||
std::string CalcSignatureV2(const std::string& method, const std::string& strMD5, const std::string& content_type, const std::string& date, const std::string& resource);
|
||||
std::string CalcSignature(const std::string& method, const std::string& canonical_uri, const std::string& query_string, const std::string& strdate, const std::string& payload_hash, const std::string& date8601);
|
||||
int GetIAMCredentials(void);
|
||||
static int CurlDebugFunc(CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr);
|
||||
static int CurlDebugBodyInFunc(CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr);
|
||||
static int CurlDebugBodyOutFunc(CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr);
|
||||
static int RawCurlDebugFunc(CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr, curl_infotype datatype);
|
||||
|
||||
int UploadMultipartPostSetup(const char* tpath, int part_num, const std::string& upload_id);
|
||||
int CopyMultipartPostSetup(const char* from, const char* to, int part_num, const std::string& upload_id, headers_t& meta);
|
||||
bool UploadMultipartPostComplete();
|
||||
bool CopyMultipartPostComplete();
|
||||
bool MixMultipartPostComplete();
|
||||
// methods
|
||||
bool ResetHandle(bool lock_already_held = false);
|
||||
bool RemakeHandle(void);
|
||||
bool ClearInternalData(void);
|
||||
void insertV4Headers();
|
||||
void insertV2Headers();
|
||||
void insertIBMIAMHeaders();
|
||||
void insertAuthHeaders();
|
||||
std::string CalcSignatureV2(const std::string& method, const std::string& strMD5, const std::string& content_type, const std::string& date, const std::string& resource);
|
||||
std::string CalcSignature(const std::string& method, const std::string& canonical_uri, const std::string& query_string, const std::string& strdate, const std::string& payload_hash, const std::string& date8601);
|
||||
int GetIAMCredentials(void);
|
||||
|
||||
public:
|
||||
// class methods
|
||||
static bool InitS3fsCurl(void);
|
||||
static bool InitMimeType(const std::string& strFile);
|
||||
static bool DestroyS3fsCurl(void);
|
||||
static int ParallelMultipartUploadRequest(const char* tpath, headers_t& meta, int fd);
|
||||
static int ParallelMixMultipartUploadRequest(const char* tpath, headers_t& meta, int fd, const fdpage_list_t& mixuppages);
|
||||
static int ParallelGetObjectRequest(const char* tpath, int fd, off_t start, ssize_t size);
|
||||
static bool CheckIAMCredentialUpdate(void);
|
||||
int UploadMultipartPostSetup(const char* tpath, int part_num, const std::string& upload_id);
|
||||
int CopyMultipartPostSetup(const char* from, const char* to, int part_num, const std::string& upload_id, headers_t& meta);
|
||||
bool UploadMultipartPostComplete();
|
||||
bool CopyMultipartPostComplete();
|
||||
bool MixMultipartPostComplete();
|
||||
|
||||
// class methods(variables)
|
||||
static std::string LookupMimeType(const std::string& name);
|
||||
static bool SetCheckCertificate(bool isCertCheck);
|
||||
static bool SetDnsCache(bool isCache);
|
||||
static bool SetSslSessionCache(bool isCache);
|
||||
static long SetConnectTimeout(long timeout);
|
||||
static time_t SetReadwriteTimeout(time_t timeout);
|
||||
static time_t GetReadwriteTimeout(void) { return S3fsCurl::readwrite_timeout; }
|
||||
static int SetRetries(int count);
|
||||
static bool SetPublicBucket(bool flag);
|
||||
static bool IsPublicBucket(void) { return S3fsCurl::is_public_bucket; }
|
||||
static acl_t SetDefaultAcl(acl_t acl);
|
||||
static acl_t GetDefaultAcl();
|
||||
static storage_class_t SetStorageClass(storage_class_t storage_class);
|
||||
static storage_class_t GetStorageClass() { return S3fsCurl::storage_class; }
|
||||
static bool LoadEnvSse(void) { return (S3fsCurl::LoadEnvSseCKeys() && S3fsCurl::LoadEnvSseKmsid()); }
|
||||
static sse_type_t SetSseType(sse_type_t type);
|
||||
static sse_type_t GetSseType(void) { return S3fsCurl::ssetype; }
|
||||
static bool IsSseDisable(void) { return (sse_type_t::SSE_DISABLE == S3fsCurl::ssetype); }
|
||||
static bool IsSseS3Type(void) { return (sse_type_t::SSE_S3 == S3fsCurl::ssetype); }
|
||||
static bool IsSseCType(void) { return (sse_type_t::SSE_C == S3fsCurl::ssetype); }
|
||||
static bool IsSseKmsType(void) { return (sse_type_t::SSE_KMS == S3fsCurl::ssetype); }
|
||||
static bool FinalCheckSse(void);
|
||||
static bool SetSseCKeys(const char* filepath);
|
||||
static bool SetSseKmsid(const char* kmsid);
|
||||
static bool IsSetSseKmsId(void) { return !S3fsCurl::ssekmsid.empty(); }
|
||||
static const char* GetSseKmsId(void) { return S3fsCurl::ssekmsid.c_str(); }
|
||||
static bool GetSseKey(std::string& md5, std::string& ssekey);
|
||||
static bool GetSseKeyMd5(int pos, std::string& md5);
|
||||
static int GetSseKeyCount(void);
|
||||
static bool SetContentMd5(bool flag);
|
||||
static bool SetVerbose(bool flag);
|
||||
static bool GetVerbose(void) { return S3fsCurl::is_verbose; }
|
||||
static bool SetDumpBody(bool flag);
|
||||
static bool IsDumpBody(void) { return S3fsCurl::is_dump_body; }
|
||||
static bool SetAccessKey(const char* AccessKeyId, const char* SecretAccessKey);
|
||||
static bool SetAccessKeyWithSessionToken(const char* AccessKeyId, const char* SecretAccessKey, const char * SessionToken);
|
||||
static bool IsSetAccessKeyID(void){
|
||||
return (0 < S3fsCurl::AWSAccessKeyId.size());
|
||||
}
|
||||
static bool IsSetAccessKeys(void){
|
||||
return (0 < S3fsCurl::IAM_role.size() || ((0 < S3fsCurl::AWSAccessKeyId.size() || S3fsCurl::is_ibm_iam_auth) && 0 < S3fsCurl::AWSSecretAccessKey.size()));
|
||||
}
|
||||
static long SetSslVerifyHostname(long value);
|
||||
static long GetSslVerifyHostname(void) { return S3fsCurl::ssl_verify_hostname; }
|
||||
// maximum parallel GET and PUT requests
|
||||
static int SetMaxParallelCount(int value);
|
||||
static int GetMaxParallelCount(void) { return S3fsCurl::max_parallel_cnt; }
|
||||
// maximum parallel HEAD requests
|
||||
static int SetMaxMultiRequest(int max);
|
||||
static int GetMaxMultiRequest(void) { return S3fsCurl::max_multireq; }
|
||||
static bool SetIsECS(bool flag);
|
||||
static bool SetIsIBMIAMAuth(bool flag);
|
||||
static size_t SetIAMFieldCount(size_t field_count);
|
||||
static std::string SetIAMCredentialsURL(const char* url);
|
||||
static std::string SetIAMTokenField(const char* token_field);
|
||||
static std::string SetIAMExpiryField(const char* expiry_field);
|
||||
static std::string SetIAMRole(const char* role);
|
||||
static const char* GetIAMRole(void) { return S3fsCurl::IAM_role.c_str(); }
|
||||
static bool SetMultipartSize(off_t size);
|
||||
static off_t GetMultipartSize(void) { return S3fsCurl::multipart_size; }
|
||||
static bool SetSignatureV4(bool isset) { bool bresult = S3fsCurl::is_sigv4; S3fsCurl::is_sigv4 = isset; return bresult; }
|
||||
static bool IsSignatureV4(void) { return S3fsCurl::is_sigv4; }
|
||||
static bool SetUserAgentFlag(bool isset) { bool bresult = S3fsCurl::is_ua; S3fsCurl::is_ua = isset; return bresult; }
|
||||
static bool IsUserAgentFlag(void) { return S3fsCurl::is_ua; }
|
||||
static void InitUserAgent(void);
|
||||
static bool SetRequesterPays(bool flag) { bool old_flag = S3fsCurl::requester_pays; S3fsCurl::requester_pays = flag; return old_flag; }
|
||||
static bool IsRequesterPays(void) { return S3fsCurl::requester_pays; }
|
||||
public:
|
||||
// class methods
|
||||
static bool InitS3fsCurl(void);
|
||||
static bool InitMimeType(const std::string& strFile);
|
||||
static bool DestroyS3fsCurl(void);
|
||||
static int ParallelMultipartUploadRequest(const char* tpath, headers_t& meta, int fd);
|
||||
static int ParallelMixMultipartUploadRequest(const char* tpath, headers_t& meta, int fd, const fdpage_list_t& mixuppages);
|
||||
static int ParallelGetObjectRequest(const char* tpath, int fd, off_t start, ssize_t size);
|
||||
static bool CheckIAMCredentialUpdate(void);
|
||||
|
||||
// methods
|
||||
bool CreateCurlHandle(bool only_pool = false, bool remake = false);
|
||||
bool DestroyCurlHandle(bool restore_pool = true, bool clear_internal_data = true);
|
||||
// class methods(variables)
|
||||
static std::string LookupMimeType(const std::string& name);
|
||||
static bool SetCheckCertificate(bool isCertCheck);
|
||||
static bool SetDnsCache(bool isCache);
|
||||
static bool SetSslSessionCache(bool isCache);
|
||||
static long SetConnectTimeout(long timeout);
|
||||
static time_t SetReadwriteTimeout(time_t timeout);
|
||||
static time_t GetReadwriteTimeout(void) { return S3fsCurl::readwrite_timeout; }
|
||||
static int SetRetries(int count);
|
||||
static bool SetPublicBucket(bool flag);
|
||||
static bool IsPublicBucket(void) { return S3fsCurl::is_public_bucket; }
|
||||
static acl_t SetDefaultAcl(acl_t acl);
|
||||
static acl_t GetDefaultAcl();
|
||||
static storage_class_t SetStorageClass(storage_class_t storage_class);
|
||||
static storage_class_t GetStorageClass() { return S3fsCurl::storage_class; }
|
||||
static bool LoadEnvSse(void) { return (S3fsCurl::LoadEnvSseCKeys() && S3fsCurl::LoadEnvSseKmsid()); }
|
||||
static sse_type_t SetSseType(sse_type_t type);
|
||||
static sse_type_t GetSseType(void) { return S3fsCurl::ssetype; }
|
||||
static bool IsSseDisable(void) { return (sse_type_t::SSE_DISABLE == S3fsCurl::ssetype); }
|
||||
static bool IsSseS3Type(void) { return (sse_type_t::SSE_S3 == S3fsCurl::ssetype); }
|
||||
static bool IsSseCType(void) { return (sse_type_t::SSE_C == S3fsCurl::ssetype); }
|
||||
static bool IsSseKmsType(void) { return (sse_type_t::SSE_KMS == S3fsCurl::ssetype); }
|
||||
static bool FinalCheckSse(void);
|
||||
static bool SetSseCKeys(const char* filepath);
|
||||
static bool SetSseKmsid(const char* kmsid);
|
||||
static bool IsSetSseKmsId(void) { return !S3fsCurl::ssekmsid.empty(); }
|
||||
static const char* GetSseKmsId(void) { return S3fsCurl::ssekmsid.c_str(); }
|
||||
static bool GetSseKey(std::string& md5, std::string& ssekey);
|
||||
static bool GetSseKeyMd5(int pos, std::string& md5);
|
||||
static int GetSseKeyCount(void);
|
||||
static bool SetContentMd5(bool flag);
|
||||
static bool SetVerbose(bool flag);
|
||||
static bool GetVerbose(void) { return S3fsCurl::is_verbose; }
|
||||
static bool SetDumpBody(bool flag);
|
||||
static bool IsDumpBody(void) { return S3fsCurl::is_dump_body; }
|
||||
static bool SetAccessKey(const char* AccessKeyId, const char* SecretAccessKey);
|
||||
static bool SetAccessKeyWithSessionToken(const char* AccessKeyId, const char* SecretAccessKey, const char * SessionToken);
|
||||
static bool IsSetAccessKeyID(void)
|
||||
{
|
||||
return (0 < S3fsCurl::AWSAccessKeyId.size());
|
||||
}
|
||||
static bool IsSetAccessKeys(void)
|
||||
{
|
||||
return (0 < S3fsCurl::IAM_role.size() || ((0 < S3fsCurl::AWSAccessKeyId.size() || S3fsCurl::is_ibm_iam_auth) && 0 < S3fsCurl::AWSSecretAccessKey.size()));
|
||||
}
|
||||
static long SetSslVerifyHostname(long value);
|
||||
static long GetSslVerifyHostname(void) { return S3fsCurl::ssl_verify_hostname; }
|
||||
// maximum parallel GET and PUT requests
|
||||
static int SetMaxParallelCount(int value);
|
||||
static int GetMaxParallelCount(void) { return S3fsCurl::max_parallel_cnt; }
|
||||
// maximum parallel HEAD requests
|
||||
static int SetMaxMultiRequest(int max);
|
||||
static int GetMaxMultiRequest(void) { return S3fsCurl::max_multireq; }
|
||||
static bool SetIsECS(bool flag);
|
||||
static bool SetIsIBMIAMAuth(bool flag);
|
||||
static size_t SetIAMFieldCount(size_t field_count);
|
||||
static std::string SetIAMCredentialsURL(const char* url);
|
||||
static std::string SetIAMTokenField(const char* token_field);
|
||||
static std::string SetIAMExpiryField(const char* expiry_field);
|
||||
static std::string SetIAMRole(const char* role);
|
||||
static const char* GetIAMRole(void) { return S3fsCurl::IAM_role.c_str(); }
|
||||
static bool SetMultipartSize(off_t size);
|
||||
static off_t GetMultipartSize(void) { return S3fsCurl::multipart_size; }
|
||||
static bool SetSignatureV4(bool isset) { bool bresult = S3fsCurl::is_sigv4; S3fsCurl::is_sigv4 = isset; return bresult; }
|
||||
static bool IsSignatureV4(void) { return S3fsCurl::is_sigv4; }
|
||||
static bool SetUserAgentFlag(bool isset) { bool bresult = S3fsCurl::is_ua; S3fsCurl::is_ua = isset; return bresult; }
|
||||
static bool IsUserAgentFlag(void) { return S3fsCurl::is_ua; }
|
||||
static void InitUserAgent(void);
|
||||
static bool SetRequesterPays(bool flag) { bool old_flag = S3fsCurl::requester_pays; S3fsCurl::requester_pays = flag; return old_flag; }
|
||||
static bool IsRequesterPays(void) { return S3fsCurl::requester_pays; }
|
||||
|
||||
bool LoadIAMRoleFromMetaData(void);
|
||||
bool AddSseRequestHead(sse_type_t ssetype, std::string& ssevalue, bool is_only_c, bool is_copy);
|
||||
bool GetResponseCode(long& responseCode, bool from_curl_handle = true);
|
||||
int RequestPerform(bool dontAddAuthHeaders=false);
|
||||
int DeleteRequest(const char* tpath);
|
||||
bool PreHeadRequest(const char* tpath, const char* bpath = NULL, const char* savedpath = NULL, int ssekey_pos = -1);
|
||||
bool PreHeadRequest(std::string& tpath, std::string& bpath, std::string& savedpath, int ssekey_pos = -1) {
|
||||
return PreHeadRequest(tpath.c_str(), bpath.c_str(), savedpath.c_str(), ssekey_pos);
|
||||
}
|
||||
int HeadRequest(const char* tpath, headers_t& meta);
|
||||
int PutHeadRequest(const char* tpath, headers_t& meta, bool is_copy);
|
||||
int PutRequest(const char* tpath, headers_t& meta, int fd);
|
||||
int PreGetObjectRequest(const char* tpath, int fd, off_t start, ssize_t size, sse_type_t ssetype, std::string& ssevalue);
|
||||
int GetObjectRequest(const char* tpath, int fd, off_t start = -1, ssize_t size = -1);
|
||||
int CheckBucket(void);
|
||||
int ListBucketRequest(const char* tpath, const char* query);
|
||||
int PreMultipartPostRequest(const char* tpath, headers_t& meta, std::string& upload_id, bool is_copy);
|
||||
int CompleteMultipartPostRequest(const char* tpath, const std::string& upload_id, etaglist_t& parts);
|
||||
int UploadMultipartPostRequest(const char* tpath, int part_num, const std::string& upload_id);
|
||||
int MultipartListRequest(std::string& body);
|
||||
int AbortMultipartUpload(const char* tpath, const std::string& upload_id);
|
||||
int MultipartHeadRequest(const char* tpath, off_t size, headers_t& meta, bool is_copy);
|
||||
int MultipartUploadRequest(const char* tpath, headers_t& meta, int fd, bool is_copy);
|
||||
int MultipartUploadRequest(const std::string& upload_id, const char* tpath, int fd, off_t offset, off_t size, etaglist_t& list);
|
||||
int MultipartRenameRequest(const char* from, const char* to, headers_t& meta, off_t size);
|
||||
// methods
|
||||
bool CreateCurlHandle(bool only_pool = false, bool remake = false);
|
||||
bool DestroyCurlHandle(bool restore_pool = true, bool clear_internal_data = true);
|
||||
|
||||
// methods(variables)
|
||||
CURL* GetCurlHandle(void) const { return hCurl; }
|
||||
std::string GetPath(void) const { return path; }
|
||||
std::string GetBasePath(void) const { return base_path; }
|
||||
std::string GetSpacialSavedPath(void) const { return saved_path; }
|
||||
std::string GetUrl(void) const { return url; }
|
||||
std::string GetOp(void) const { return op; }
|
||||
headers_t* GetResponseHeaders(void) { return &responseHeaders; }
|
||||
BodyData* GetBodyData(void) { return &bodydata; }
|
||||
BodyData* GetHeadData(void) { return &headdata; }
|
||||
long GetLastResponseCode(void) const { return LastResponseCode; }
|
||||
bool SetUseAhbe(bool ahbe);
|
||||
bool EnableUseAhbe(void) { return SetUseAhbe(true); }
|
||||
bool DisableUseAhbe(void) { return SetUseAhbe(false); }
|
||||
bool IsUseAhbe(void) const { return is_use_ahbe; }
|
||||
int GetMultipartRetryCount(void) const { return retry_count; }
|
||||
void SetMultipartRetryCount(int retrycnt) { retry_count = retrycnt; }
|
||||
bool IsOverMultipartRetryCount(void) const { return (retry_count >= S3fsCurl::retries); }
|
||||
int GetLastPreHeadSeecKeyPos(void) const { return b_ssekey_pos; }
|
||||
bool LoadIAMRoleFromMetaData(void);
|
||||
bool AddSseRequestHead(sse_type_t ssetype, std::string& ssevalue, bool is_only_c, bool is_copy);
|
||||
bool GetResponseCode(long& responseCode, bool from_curl_handle = true);
|
||||
int RequestPerform(bool dontAddAuthHeaders=false);
|
||||
int DeleteRequest(const char* tpath);
|
||||
bool PreHeadRequest(const char* tpath, const char* bpath = NULL, const char* savedpath = NULL, int ssekey_pos = -1);
|
||||
bool PreHeadRequest(std::string& tpath, std::string& bpath, std::string& savedpath, int ssekey_pos = -1) {
|
||||
return PreHeadRequest(tpath.c_str(), bpath.c_str(), savedpath.c_str(), ssekey_pos);
|
||||
}
|
||||
int HeadRequest(const char* tpath, headers_t& meta);
|
||||
int PutHeadRequest(const char* tpath, headers_t& meta, bool is_copy);
|
||||
int PutRequest(const char* tpath, headers_t& meta, int fd);
|
||||
int PreGetObjectRequest(const char* tpath, int fd, off_t start, ssize_t size, sse_type_t ssetype, std::string& ssevalue);
|
||||
int GetObjectRequest(const char* tpath, int fd, off_t start = -1, ssize_t size = -1);
|
||||
int CheckBucket(void);
|
||||
int ListBucketRequest(const char* tpath, const char* query);
|
||||
int PreMultipartPostRequest(const char* tpath, headers_t& meta, std::string& upload_id, bool is_copy);
|
||||
int CompleteMultipartPostRequest(const char* tpath, const std::string& upload_id, etaglist_t& parts);
|
||||
int UploadMultipartPostRequest(const char* tpath, int part_num, const std::string& upload_id);
|
||||
int MultipartListRequest(std::string& body);
|
||||
int AbortMultipartUpload(const char* tpath, const std::string& upload_id);
|
||||
int MultipartHeadRequest(const char* tpath, off_t size, headers_t& meta, bool is_copy);
|
||||
int MultipartUploadRequest(const char* tpath, headers_t& meta, int fd, bool is_copy);
|
||||
int MultipartUploadRequest(const std::string& upload_id, const char* tpath, int fd, off_t offset, off_t size, etaglist_t& list);
|
||||
int MultipartRenameRequest(const char* from, const char* to, headers_t& meta, off_t size);
|
||||
|
||||
// methods(variables)
|
||||
CURL* GetCurlHandle(void) const { return hCurl; }
|
||||
std::string GetPath(void) const { return path; }
|
||||
std::string GetBasePath(void) const { return base_path; }
|
||||
std::string GetSpacialSavedPath(void) const { return saved_path; }
|
||||
std::string GetUrl(void) const { return url; }
|
||||
std::string GetOp(void) const { return op; }
|
||||
headers_t* GetResponseHeaders(void) { return &responseHeaders; }
|
||||
BodyData* GetBodyData(void) { return &bodydata; }
|
||||
BodyData* GetHeadData(void) { return &headdata; }
|
||||
long GetLastResponseCode(void) const { return LastResponseCode; }
|
||||
bool SetUseAhbe(bool ahbe);
|
||||
bool EnableUseAhbe(void) { return SetUseAhbe(true); }
|
||||
bool DisableUseAhbe(void) { return SetUseAhbe(false); }
|
||||
bool IsUseAhbe(void) const { return is_use_ahbe; }
|
||||
int GetMultipartRetryCount(void) const { return retry_count; }
|
||||
void SetMultipartRetryCount(int retrycnt) { retry_count = retrycnt; }
|
||||
bool IsOverMultipartRetryCount(void) const { return (retry_count >= S3fsCurl::retries); }
|
||||
int GetLastPreHeadSeecKeyPos(void) const { return b_ssekey_pos; }
|
||||
};
|
||||
|
||||
//----------------------------------------------
|
||||
// class S3fsMultiCurl
|
||||
//----------------------------------------------
|
||||
// Class for lapping multi curl
|
||||
//
|
||||
typedef std::vector<S3fsCurl*> s3fscurllist_t;
|
||||
typedef bool (*S3fsMultiSuccessCallback)(S3fsCurl* s3fscurl); // callback for succeed multi request
|
||||
typedef S3fsCurl* (*S3fsMultiRetryCallback)(S3fsCurl* s3fscurl); // callback for failure and retrying
|
||||
|
||||
class S3fsMultiCurl
|
||||
{
|
||||
private:
|
||||
const int maxParallelism;
|
||||
|
||||
s3fscurllist_t clist_all; // all of curl requests
|
||||
s3fscurllist_t clist_req; // curl requests are sent
|
||||
|
||||
S3fsMultiSuccessCallback SuccessCallback;
|
||||
S3fsMultiRetryCallback RetryCallback;
|
||||
|
||||
pthread_mutex_t completed_tids_lock;
|
||||
std::vector<pthread_t> completed_tids;
|
||||
|
||||
private:
|
||||
bool ClearEx(bool is_all);
|
||||
int MultiPerform(void);
|
||||
int MultiRead(void);
|
||||
|
||||
static void* RequestPerformWrapper(void* arg);
|
||||
|
||||
public:
|
||||
explicit S3fsMultiCurl(int maxParallelism);
|
||||
~S3fsMultiCurl();
|
||||
|
||||
int GetMaxParallelism() { return maxParallelism; }
|
||||
|
||||
S3fsMultiSuccessCallback SetSuccessCallback(S3fsMultiSuccessCallback function);
|
||||
S3fsMultiRetryCallback SetRetryCallback(S3fsMultiRetryCallback function);
|
||||
bool Clear(void) { return ClearEx(true); }
|
||||
bool SetS3fsCurlObject(S3fsCurl* s3fscurl);
|
||||
int Request(void);
|
||||
};
|
||||
|
||||
//----------------------------------------------
|
||||
// Utility Functions
|
||||
//----------------------------------------------
|
||||
std::string GetContentMD5(int fd);
|
||||
unsigned char* md5hexsum(int fd, off_t start, ssize_t size);
|
||||
std::string md5sum(int fd, off_t start, ssize_t size);
|
||||
struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* data);
|
||||
struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* key, const char* value);
|
||||
std::string get_sorted_header_keys(const struct curl_slist* list);
|
||||
std::string get_canonical_headers(const struct curl_slist* list, bool only_amz = false);
|
||||
std::string get_header_value(const struct curl_slist* list, const std::string &key);
|
||||
bool MakeUrlResource(const char* realpath, std::string& resourcepath, std::string& url);
|
||||
std::string prepare_url(const char* url);
|
||||
bool get_object_sse_type(const char* path, sse_type_t& ssetype, std::string& ssevalue); // implement in s3fs.cpp
|
||||
|
||||
#endif // S3FS_CURL_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 2
|
||||
* c-basic-offset: 2
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=2 ts=2 fdm=marker
|
||||
* vim<600: expandtab sw=2 ts=2
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
|
129
src/curl_handlerpool.cpp
Normal file
129
src/curl_handlerpool.cpp
Normal file
@ -0,0 +1,129 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "curl_handlerpool.h"
|
||||
#include "autolock.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Class CurlHandlerPool
|
||||
//-------------------------------------------------------------------
|
||||
bool CurlHandlerPool::Init()
|
||||
{
|
||||
pthread_mutexattr_t attr;
|
||||
pthread_mutexattr_init(&attr);
|
||||
#if S3FS_PTHREAD_ERRORCHECK
|
||||
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
|
||||
#endif
|
||||
if (0 != pthread_mutex_init(&mLock, &attr)) {
|
||||
S3FS_PRN_ERR("Init curl handlers lock failed");
|
||||
return false;
|
||||
}
|
||||
|
||||
for(int cnt = 0; cnt < mMaxHandlers; ++cnt){
|
||||
CURL* hCurl = curl_easy_init();
|
||||
if(!hCurl){
|
||||
S3FS_PRN_ERR("Init curl handlers pool failed");
|
||||
Destroy();
|
||||
return false;
|
||||
}
|
||||
mPool.push_back(hCurl);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CurlHandlerPool::Destroy()
|
||||
{
|
||||
while(!mPool.empty()){
|
||||
CURL* hCurl = mPool.back();
|
||||
mPool.pop_back();
|
||||
if(hCurl){
|
||||
curl_easy_cleanup(hCurl);
|
||||
}
|
||||
}
|
||||
if (0 != pthread_mutex_destroy(&mLock)) {
|
||||
S3FS_PRN_ERR("Destroy curl handlers lock failed");
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
CURL* CurlHandlerPool::GetHandler(bool only_pool)
|
||||
{
|
||||
CURL* hCurl = NULL;
|
||||
{
|
||||
AutoLock lock(&mLock);
|
||||
|
||||
if(!mPool.empty()){
|
||||
hCurl = mPool.back();
|
||||
mPool.pop_back();
|
||||
S3FS_PRN_DBG("Get handler from pool: rest = %d", static_cast<int>(mPool.size()));
|
||||
}
|
||||
}
|
||||
if(only_pool){
|
||||
return hCurl;
|
||||
}
|
||||
if(!hCurl){
|
||||
S3FS_PRN_INFO("Pool empty: force to create new handler");
|
||||
hCurl = curl_easy_init();
|
||||
}
|
||||
return hCurl;
|
||||
}
|
||||
|
||||
void CurlHandlerPool::ReturnHandler(CURL* hCurl, bool restore_pool)
|
||||
{
|
||||
if(!hCurl){
|
||||
return;
|
||||
}
|
||||
|
||||
if(restore_pool){
|
||||
AutoLock lock(&mLock);
|
||||
|
||||
S3FS_PRN_DBG("Return handler to pool");
|
||||
mPool.push_back(hCurl);
|
||||
|
||||
while(mMaxHandlers <= static_cast<int>(mPool.size())){
|
||||
CURL* hOldCurl = mPool.front();
|
||||
mPool.pop_front();
|
||||
if(hOldCurl){
|
||||
S3FS_PRN_INFO("Pool full: destroy the oldest handler");
|
||||
curl_easy_cleanup(hOldCurl);
|
||||
}
|
||||
}
|
||||
}else{
|
||||
S3FS_PRN_INFO("Pool full: destroy the handler");
|
||||
curl_easy_cleanup(hCurl);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
64
src/curl_handlerpool.h
Normal file
64
src/curl_handlerpool.h
Normal file
@ -0,0 +1,64 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_CURL_HANDLERPOOL_H_
|
||||
#define S3FS_CURL_HANDLERPOOL_H_
|
||||
|
||||
#include <cassert>
|
||||
#include <curl/curl.h>
|
||||
|
||||
//----------------------------------------------
|
||||
// Typedefs
|
||||
//----------------------------------------------
|
||||
typedef std::list<CURL*> hcurllist_t;
|
||||
|
||||
//----------------------------------------------
|
||||
// class CurlHandlerPool
|
||||
//----------------------------------------------
|
||||
class CurlHandlerPool
|
||||
{
|
||||
public:
|
||||
explicit CurlHandlerPool(int maxHandlers) : mMaxHandlers(maxHandlers)
|
||||
{
|
||||
assert(maxHandlers > 0);
|
||||
}
|
||||
|
||||
bool Init();
|
||||
bool Destroy();
|
||||
|
||||
CURL* GetHandler(bool only_pool);
|
||||
void ReturnHandler(CURL* hCurl, bool restore_pool);
|
||||
|
||||
private:
|
||||
int mMaxHandlers;
|
||||
pthread_mutex_t mLock;
|
||||
hcurllist_t mPool;
|
||||
};
|
||||
|
||||
#endif // S3FS_CURL_HANDLERPOOL_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
344
src/curl_multi.cpp
Normal file
344
src/curl_multi.cpp
Normal file
@ -0,0 +1,344 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <cerrno>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "curl_multi.h"
|
||||
#include "curl.h"
|
||||
#include "autolock.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Class S3fsMultiCurl
|
||||
//-------------------------------------------------------------------
|
||||
S3fsMultiCurl::S3fsMultiCurl(int maxParallelism) : maxParallelism(maxParallelism), SuccessCallback(NULL), RetryCallback(NULL)
|
||||
{
|
||||
int res;
|
||||
pthread_mutexattr_t attr;
|
||||
pthread_mutexattr_init(&attr);
|
||||
#if S3FS_PTHREAD_ERRORCHECK
|
||||
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
|
||||
#endif
|
||||
if (0 != (res = pthread_mutex_init(&completed_tids_lock, &attr))) {
|
||||
S3FS_PRN_ERR("could not initialize completed_tids_lock: %i", res);
|
||||
}
|
||||
}
|
||||
|
||||
S3fsMultiCurl::~S3fsMultiCurl()
|
||||
{
|
||||
Clear();
|
||||
int res;
|
||||
if(0 != (res = pthread_mutex_destroy(&completed_tids_lock))){
|
||||
S3FS_PRN_ERR("could not destroy completed_tids_lock: %i", res);
|
||||
}
|
||||
}
|
||||
|
||||
bool S3fsMultiCurl::ClearEx(bool is_all)
|
||||
{
|
||||
s3fscurllist_t::iterator iter;
|
||||
for(iter = clist_req.begin(); iter != clist_req.end(); ++iter){
|
||||
S3fsCurl* s3fscurl = *iter;
|
||||
if(s3fscurl){
|
||||
s3fscurl->DestroyCurlHandle();
|
||||
delete s3fscurl; // with destroy curl handle.
|
||||
}
|
||||
}
|
||||
clist_req.clear();
|
||||
|
||||
if(is_all){
|
||||
for(iter = clist_all.begin(); iter != clist_all.end(); ++iter){
|
||||
S3fsCurl* s3fscurl = *iter;
|
||||
s3fscurl->DestroyCurlHandle();
|
||||
delete s3fscurl;
|
||||
}
|
||||
clist_all.clear();
|
||||
}
|
||||
|
||||
S3FS_MALLOCTRIM(0);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
S3fsMultiSuccessCallback S3fsMultiCurl::SetSuccessCallback(S3fsMultiSuccessCallback function)
|
||||
{
|
||||
S3fsMultiSuccessCallback old = SuccessCallback;
|
||||
SuccessCallback = function;
|
||||
return old;
|
||||
}
|
||||
|
||||
S3fsMultiRetryCallback S3fsMultiCurl::SetRetryCallback(S3fsMultiRetryCallback function)
|
||||
{
|
||||
S3fsMultiRetryCallback old = RetryCallback;
|
||||
RetryCallback = function;
|
||||
return old;
|
||||
}
|
||||
|
||||
bool S3fsMultiCurl::SetS3fsCurlObject(S3fsCurl* s3fscurl)
|
||||
{
|
||||
if(!s3fscurl){
|
||||
return false;
|
||||
}
|
||||
clist_all.push_back(s3fscurl);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int S3fsMultiCurl::MultiPerform()
|
||||
{
|
||||
std::vector<pthread_t> threads;
|
||||
bool success = true;
|
||||
bool isMultiHead = false;
|
||||
Semaphore sem(GetMaxParallelism());
|
||||
int rc;
|
||||
|
||||
for(s3fscurllist_t::iterator iter = clist_req.begin(); iter != clist_req.end(); ++iter) {
|
||||
pthread_t thread;
|
||||
S3fsCurl* s3fscurl = *iter;
|
||||
if(!s3fscurl){
|
||||
continue;
|
||||
}
|
||||
|
||||
sem.wait();
|
||||
|
||||
{
|
||||
AutoLock lock(&completed_tids_lock);
|
||||
for(std::vector<pthread_t>::iterator it = completed_tids.begin(); it != completed_tids.end(); ++it){
|
||||
void* retval;
|
||||
|
||||
rc = pthread_join(*it, &retval);
|
||||
if (rc) {
|
||||
success = false;
|
||||
S3FS_PRN_ERR("failed pthread_join - rc(%d) %s", rc, strerror(rc));
|
||||
} else {
|
||||
int int_retval = (int)(intptr_t)(retval);
|
||||
if (int_retval && !(int_retval == -ENOENT && isMultiHead)) {
|
||||
S3FS_PRN_WARN("thread failed - rc(%d)", int_retval);
|
||||
}
|
||||
}
|
||||
}
|
||||
completed_tids.clear();
|
||||
}
|
||||
s3fscurl->sem = &sem;
|
||||
s3fscurl->completed_tids_lock = &completed_tids_lock;
|
||||
s3fscurl->completed_tids = &completed_tids;
|
||||
|
||||
isMultiHead |= s3fscurl->GetOp() == "HEAD";
|
||||
|
||||
rc = pthread_create(&thread, NULL, S3fsMultiCurl::RequestPerformWrapper, static_cast<void*>(s3fscurl));
|
||||
if (rc != 0) {
|
||||
success = false;
|
||||
S3FS_PRN_ERR("failed pthread_create - rc(%d)", rc);
|
||||
break;
|
||||
}
|
||||
threads.push_back(thread);
|
||||
}
|
||||
|
||||
for(int i = 0; i < sem.get_value(); ++i){
|
||||
sem.wait();
|
||||
}
|
||||
|
||||
AutoLock lock(&completed_tids_lock);
|
||||
for (std::vector<pthread_t>::iterator titer = completed_tids.begin(); titer != completed_tids.end(); ++titer) {
|
||||
void* retval;
|
||||
|
||||
rc = pthread_join(*titer, &retval);
|
||||
if (rc) {
|
||||
success = false;
|
||||
S3FS_PRN_ERR("failed pthread_join - rc(%d)", rc);
|
||||
} else {
|
||||
int int_retval = (int)(intptr_t)(retval);
|
||||
if (int_retval && !(int_retval == -ENOENT && isMultiHead)) {
|
||||
S3FS_PRN_WARN("thread failed - rc(%d)", int_retval);
|
||||
}
|
||||
}
|
||||
}
|
||||
completed_tids.clear();
|
||||
|
||||
return success ? 0 : -EIO;
|
||||
}
|
||||
|
||||
int S3fsMultiCurl::MultiRead()
|
||||
{
|
||||
int result = 0;
|
||||
|
||||
for(s3fscurllist_t::iterator iter = clist_req.begin(); iter != clist_req.end(); ){
|
||||
S3fsCurl* s3fscurl = *iter;
|
||||
|
||||
bool isRetry = false;
|
||||
bool isPostpone = false;
|
||||
long responseCode = S3fsCurl::S3FSCURL_RESPONSECODE_NOTSET;
|
||||
if(s3fscurl->GetResponseCode(responseCode, false)){
|
||||
if(S3fsCurl::S3FSCURL_RESPONSECODE_NOTSET == responseCode){
|
||||
// This is a case where the processing result has not yet been updated (should be very rare).
|
||||
isPostpone = true;
|
||||
}else if(400 > responseCode){
|
||||
// add into stat cache
|
||||
if(SuccessCallback && !SuccessCallback(s3fscurl)){
|
||||
S3FS_PRN_WARN("error from callback function(%s).", s3fscurl->url.c_str());
|
||||
}
|
||||
}else if(400 == responseCode){
|
||||
// as possibly in multipart
|
||||
S3FS_PRN_WARN("failed a request(%ld: %s)", responseCode, s3fscurl->url.c_str());
|
||||
isRetry = true;
|
||||
}else if(404 == responseCode){
|
||||
// not found
|
||||
// HEAD requests on readdir_multi_head can return 404
|
||||
if(s3fscurl->GetOp() != "HEAD"){
|
||||
S3FS_PRN_WARN("failed a request(%ld: %s)", responseCode, s3fscurl->url.c_str());
|
||||
}
|
||||
}else if(500 == responseCode){
|
||||
// case of all other result, do retry.(11/13/2013)
|
||||
// because it was found that s3fs got 500 error from S3, but could success
|
||||
// to retry it.
|
||||
S3FS_PRN_WARN("failed a request(%ld: %s)", responseCode, s3fscurl->url.c_str());
|
||||
isRetry = true;
|
||||
}else{
|
||||
// Retry in other case.
|
||||
S3FS_PRN_WARN("failed a request(%ld: %s)", responseCode, s3fscurl->url.c_str());
|
||||
isRetry = true;
|
||||
}
|
||||
}else{
|
||||
S3FS_PRN_ERR("failed a request(Unknown response code: %s)", s3fscurl->url.c_str());
|
||||
}
|
||||
|
||||
if(isPostpone){
|
||||
clist_req.erase(iter);
|
||||
clist_req.push_back(s3fscurl); // Re-evaluate at the end
|
||||
iter = clist_req.begin();
|
||||
}else{
|
||||
if(!isRetry || 0 != result){
|
||||
// If an EIO error has already occurred, it will be terminated
|
||||
// immediately even if retry processing is required.
|
||||
s3fscurl->DestroyCurlHandle();
|
||||
delete s3fscurl;
|
||||
}else{
|
||||
S3fsCurl* retrycurl = NULL;
|
||||
|
||||
// For retry
|
||||
if(RetryCallback){
|
||||
retrycurl = RetryCallback(s3fscurl);
|
||||
if(NULL != retrycurl){
|
||||
clist_all.push_back(retrycurl);
|
||||
}else{
|
||||
// set EIO and wait for other parts.
|
||||
result = -EIO;
|
||||
}
|
||||
}
|
||||
if(s3fscurl != retrycurl){
|
||||
s3fscurl->DestroyCurlHandle();
|
||||
delete s3fscurl;
|
||||
}
|
||||
}
|
||||
iter = clist_req.erase(iter);
|
||||
}
|
||||
}
|
||||
clist_req.clear();
|
||||
|
||||
if(0 != result){
|
||||
// If an EIO error has already occurred, clear all retry objects.
|
||||
for(s3fscurllist_t::iterator iter = clist_all.begin(); iter != clist_all.end(); ++iter){
|
||||
S3fsCurl* s3fscurl = *iter;
|
||||
s3fscurl->DestroyCurlHandle();
|
||||
delete s3fscurl;
|
||||
}
|
||||
clist_all.clear();
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
int S3fsMultiCurl::Request()
|
||||
{
|
||||
S3FS_PRN_INFO3("[count=%zu]", clist_all.size());
|
||||
|
||||
// Make request list.
|
||||
//
|
||||
// Send multi request loop( with retry )
|
||||
// (When many request is sends, sometimes gets "Couldn't connect to server")
|
||||
//
|
||||
while(!clist_all.empty()){
|
||||
// set curl handle to multi handle
|
||||
int result;
|
||||
s3fscurllist_t::iterator iter;
|
||||
for(iter = clist_all.begin(); iter != clist_all.end(); ++iter){
|
||||
S3fsCurl* s3fscurl = *iter;
|
||||
clist_req.push_back(s3fscurl);
|
||||
}
|
||||
clist_all.clear();
|
||||
|
||||
// Send multi request.
|
||||
if(0 != (result = MultiPerform())){
|
||||
Clear();
|
||||
return result;
|
||||
}
|
||||
|
||||
// Read the result
|
||||
if(0 != (result = MultiRead())){
|
||||
Clear();
|
||||
return result;
|
||||
}
|
||||
|
||||
// Cleanup curl handle in multi handle
|
||||
ClearEx(false);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
//
|
||||
// thread function for performing an S3fsCurl request
|
||||
//
|
||||
void* S3fsMultiCurl::RequestPerformWrapper(void* arg)
|
||||
{
|
||||
S3fsCurl* s3fscurl= static_cast<S3fsCurl*>(arg);
|
||||
void* result = NULL;
|
||||
if(!s3fscurl){
|
||||
return (void*)(intptr_t)(-EIO);
|
||||
}
|
||||
if(s3fscurl->fpLazySetup){
|
||||
if(!s3fscurl->fpLazySetup(s3fscurl)){
|
||||
S3FS_PRN_ERR("Failed to lazy setup, then respond EIO.");
|
||||
result = (void*)(intptr_t)(-EIO);
|
||||
}
|
||||
}
|
||||
|
||||
if(!result){
|
||||
result = (void*)(intptr_t)(s3fscurl->RequestPerform());
|
||||
s3fscurl->DestroyCurlHandle(true, false);
|
||||
}
|
||||
|
||||
AutoLock lock(s3fscurl->completed_tids_lock);
|
||||
s3fscurl->completed_tids->push_back(pthread_self());
|
||||
s3fscurl->sem->post();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
79
src/curl_multi.h
Normal file
79
src/curl_multi.h
Normal file
@ -0,0 +1,79 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_CURL_MULTI_H_
|
||||
#define S3FS_CURL_MULTI_H_
|
||||
|
||||
//----------------------------------------------
|
||||
// Typedef
|
||||
//----------------------------------------------
|
||||
class S3fsCurl;
|
||||
|
||||
typedef std::vector<S3fsCurl*> s3fscurllist_t;
|
||||
typedef bool (*S3fsMultiSuccessCallback)(S3fsCurl* s3fscurl); // callback for succeed multi request
|
||||
typedef S3fsCurl* (*S3fsMultiRetryCallback)(S3fsCurl* s3fscurl); // callback for failure and retrying
|
||||
|
||||
//----------------------------------------------
|
||||
// class S3fsMultiCurl
|
||||
//----------------------------------------------
|
||||
class S3fsMultiCurl
|
||||
{
|
||||
private:
|
||||
const int maxParallelism;
|
||||
|
||||
s3fscurllist_t clist_all; // all of curl requests
|
||||
s3fscurllist_t clist_req; // curl requests are sent
|
||||
|
||||
S3fsMultiSuccessCallback SuccessCallback;
|
||||
S3fsMultiRetryCallback RetryCallback;
|
||||
|
||||
pthread_mutex_t completed_tids_lock;
|
||||
std::vector<pthread_t> completed_tids;
|
||||
|
||||
private:
|
||||
bool ClearEx(bool is_all);
|
||||
int MultiPerform(void);
|
||||
int MultiRead(void);
|
||||
|
||||
static void* RequestPerformWrapper(void* arg);
|
||||
|
||||
public:
|
||||
explicit S3fsMultiCurl(int maxParallelism);
|
||||
~S3fsMultiCurl();
|
||||
|
||||
int GetMaxParallelism() { return maxParallelism; }
|
||||
|
||||
S3fsMultiSuccessCallback SetSuccessCallback(S3fsMultiSuccessCallback function);
|
||||
S3fsMultiRetryCallback SetRetryCallback(S3fsMultiRetryCallback function);
|
||||
bool Clear(void) { return ClearEx(true); }
|
||||
bool SetS3fsCurlObject(S3fsCurl* s3fscurl);
|
||||
int Request(void);
|
||||
};
|
||||
|
||||
#endif // S3FS_CURL_MULTI_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
397
src/curl_util.cpp
Normal file
397
src/curl_util.cpp
Normal file
@ -0,0 +1,397 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <curl/curl.h>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "curl_util.h"
|
||||
#include "string_util.h"
|
||||
#include "s3fs_auth.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Functions
|
||||
//-------------------------------------------------------------------
|
||||
//
|
||||
// curl_slist_sort_insert
|
||||
// This function is like curl_slist_append function, but this adds data by a-sorting.
|
||||
// Because AWS signature needs sorted header.
|
||||
//
|
||||
struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* data)
|
||||
{
|
||||
if(!data){
|
||||
return list;
|
||||
}
|
||||
string strkey = data;
|
||||
string strval;
|
||||
|
||||
string::size_type pos = strkey.find(':', 0);
|
||||
if(string::npos != pos){
|
||||
strval = strkey.substr(pos + 1);
|
||||
strkey = strkey.substr(0, pos);
|
||||
}
|
||||
|
||||
return curl_slist_sort_insert(list, strkey.c_str(), strval.c_str());
|
||||
}
|
||||
|
||||
struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* key, const char* value)
|
||||
{
|
||||
struct curl_slist* curpos;
|
||||
struct curl_slist* lastpos;
|
||||
struct curl_slist* new_item;
|
||||
|
||||
if(!key){
|
||||
return list;
|
||||
}
|
||||
if(NULL == (new_item = reinterpret_cast<struct curl_slist*>(malloc(sizeof(struct curl_slist))))){
|
||||
return list;
|
||||
}
|
||||
|
||||
// key & value are trimmed and lower (only key)
|
||||
string strkey = trim(string(key));
|
||||
string strval = trim(string(value ? value : ""));
|
||||
string strnew = key + string(": ") + strval;
|
||||
if(NULL == (new_item->data = strdup(strnew.c_str()))){
|
||||
free(new_item);
|
||||
return list;
|
||||
}
|
||||
new_item->next = NULL;
|
||||
|
||||
for(lastpos = NULL, curpos = list; curpos; lastpos = curpos, curpos = curpos->next){
|
||||
string strcur = curpos->data;
|
||||
size_t pos;
|
||||
if(string::npos != (pos = strcur.find(':', 0))){
|
||||
strcur = strcur.substr(0, pos);
|
||||
}
|
||||
|
||||
int result = strcasecmp(strkey.c_str(), strcur.c_str());
|
||||
if(0 == result){
|
||||
// same data, so replace it.
|
||||
if(lastpos){
|
||||
lastpos->next = new_item;
|
||||
}else{
|
||||
list = new_item;
|
||||
}
|
||||
new_item->next = curpos->next;
|
||||
free(curpos->data);
|
||||
free(curpos);
|
||||
break;
|
||||
|
||||
}else if(0 > result){
|
||||
// add data before curpos.
|
||||
if(lastpos){
|
||||
lastpos->next = new_item;
|
||||
}else{
|
||||
list = new_item;
|
||||
}
|
||||
new_item->next = curpos;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if(!curpos){
|
||||
// append to last pos
|
||||
if(lastpos){
|
||||
lastpos->next = new_item;
|
||||
}else{
|
||||
// a case of list is null
|
||||
list = new_item;
|
||||
}
|
||||
}
|
||||
return list;
|
||||
}
|
||||
|
||||
string get_sorted_header_keys(const struct curl_slist* list)
|
||||
{
|
||||
string sorted_headers;
|
||||
|
||||
if(!list){
|
||||
return sorted_headers;
|
||||
}
|
||||
|
||||
for( ; list; list = list->next){
|
||||
string strkey = list->data;
|
||||
size_t pos;
|
||||
if(string::npos != (pos = strkey.find(':', 0))){
|
||||
if (trim(strkey.substr(pos + 1)).empty()) {
|
||||
// skip empty-value headers (as they are discarded by libcurl)
|
||||
continue;
|
||||
}
|
||||
strkey = strkey.substr(0, pos);
|
||||
}
|
||||
if(0 < sorted_headers.length()){
|
||||
sorted_headers += ";";
|
||||
}
|
||||
sorted_headers += lower(strkey);
|
||||
}
|
||||
|
||||
return sorted_headers;
|
||||
}
|
||||
|
||||
string get_header_value(const struct curl_slist* list, const string &key)
|
||||
{
|
||||
if(!list){
|
||||
return "";
|
||||
}
|
||||
|
||||
for( ; list; list = list->next){
|
||||
string strkey = list->data;
|
||||
size_t pos;
|
||||
if(string::npos != (pos = strkey.find(':', 0))){
|
||||
if(0 == strcasecmp(trim(strkey.substr(0, pos)).c_str(), key.c_str())){
|
||||
return trim(strkey.substr(pos+1));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return "";
|
||||
}
|
||||
|
||||
string get_canonical_headers(const struct curl_slist* list)
|
||||
{
|
||||
string canonical_headers;
|
||||
|
||||
if(!list){
|
||||
canonical_headers = "\n";
|
||||
return canonical_headers;
|
||||
}
|
||||
|
||||
for( ; list; list = list->next){
|
||||
string strhead = list->data;
|
||||
size_t pos;
|
||||
if(string::npos != (pos = strhead.find(':', 0))){
|
||||
string strkey = trim(lower(strhead.substr(0, pos)));
|
||||
string strval = trim(strhead.substr(pos + 1));
|
||||
if (strval.empty()) {
|
||||
// skip empty-value headers (as they are discarded by libcurl)
|
||||
continue;
|
||||
}
|
||||
strhead = strkey.append(":").append(strval);
|
||||
}else{
|
||||
strhead = trim(lower(strhead));
|
||||
}
|
||||
canonical_headers += strhead;
|
||||
canonical_headers += "\n";
|
||||
}
|
||||
return canonical_headers;
|
||||
}
|
||||
|
||||
string get_canonical_headers(const struct curl_slist* list, bool only_amz)
|
||||
{
|
||||
string canonical_headers;
|
||||
|
||||
if(!list){
|
||||
canonical_headers = "\n";
|
||||
return canonical_headers;
|
||||
}
|
||||
|
||||
for( ; list; list = list->next){
|
||||
string strhead = list->data;
|
||||
size_t pos;
|
||||
if(string::npos != (pos = strhead.find(':', 0))){
|
||||
string strkey = trim(lower(strhead.substr(0, pos)));
|
||||
string strval = trim(strhead.substr(pos + 1));
|
||||
if (strval.empty()) {
|
||||
// skip empty-value headers (as they are discarded by libcurl)
|
||||
continue;
|
||||
}
|
||||
strhead = strkey.append(":").append(strval);
|
||||
}else{
|
||||
strhead = trim(lower(strhead));
|
||||
}
|
||||
if(only_amz && strhead.substr(0, 5) != "x-amz"){
|
||||
continue;
|
||||
}
|
||||
canonical_headers += strhead;
|
||||
canonical_headers += "\n";
|
||||
}
|
||||
return canonical_headers;
|
||||
}
|
||||
|
||||
// function for using global values
|
||||
bool MakeUrlResource(const char* realpath, string& resourcepath, string& url)
|
||||
{
|
||||
if(!realpath){
|
||||
return false;
|
||||
}
|
||||
resourcepath = urlEncode(service_path + bucket + realpath);
|
||||
url = s3host + resourcepath;
|
||||
return true;
|
||||
}
|
||||
|
||||
string prepare_url(const char* url)
|
||||
{
|
||||
S3FS_PRN_INFO3("URL is %s", url);
|
||||
|
||||
string uri;
|
||||
string hostname;
|
||||
string path;
|
||||
string url_str = string(url);
|
||||
string token = string("/") + bucket;
|
||||
int bucket_pos;
|
||||
int bucket_length = token.size();
|
||||
int uri_length = 0;
|
||||
|
||||
if(!strncasecmp(url_str.c_str(), "https://", 8)){
|
||||
uri_length = 8;
|
||||
} else if(!strncasecmp(url_str.c_str(), "http://", 7)) {
|
||||
uri_length = 7;
|
||||
}
|
||||
uri = url_str.substr(0, uri_length);
|
||||
bucket_pos = url_str.find(token, uri_length);
|
||||
|
||||
if(!pathrequeststyle){
|
||||
hostname = bucket + "." + url_str.substr(uri_length, bucket_pos - uri_length);
|
||||
path = url_str.substr((bucket_pos + bucket_length));
|
||||
}else{
|
||||
hostname = url_str.substr(uri_length, bucket_pos - uri_length);
|
||||
string part = url_str.substr((bucket_pos + bucket_length));
|
||||
if('/' != part[0]){
|
||||
part = "/" + part;
|
||||
}
|
||||
path = "/" + bucket + part;
|
||||
}
|
||||
|
||||
url_str = uri + hostname + path;
|
||||
|
||||
S3FS_PRN_INFO3("URL changed is %s", url_str.c_str());
|
||||
|
||||
return url_str;
|
||||
}
|
||||
|
||||
// [TODO]
|
||||
// This function uses temporary file, but should not use it.
|
||||
// For not using it, we implement function in each auth file(openssl, nss. gnutls).
|
||||
//
|
||||
bool make_md5_from_binary(const char* pstr, size_t length, string& md5)
|
||||
{
|
||||
if(!pstr || '\0' == pstr[0]){
|
||||
S3FS_PRN_ERR("Parameter is wrong.");
|
||||
return false;
|
||||
}
|
||||
FILE* fp;
|
||||
if(NULL == (fp = tmpfile())){
|
||||
S3FS_PRN_ERR("Could not make tmpfile.");
|
||||
return false;
|
||||
}
|
||||
if(length != fwrite(pstr, sizeof(char), length, fp)){
|
||||
S3FS_PRN_ERR("Failed to write tmpfile.");
|
||||
fclose(fp);
|
||||
return false;
|
||||
}
|
||||
int fd;
|
||||
if(0 != fflush(fp) || 0 != fseek(fp, 0L, SEEK_SET) || -1 == (fd = fileno(fp))){
|
||||
S3FS_PRN_ERR("Failed to make MD5.");
|
||||
fclose(fp);
|
||||
return false;
|
||||
}
|
||||
// base64 md5
|
||||
md5 = s3fs_get_content_md5(fd);
|
||||
if(0 == md5.length()){
|
||||
S3FS_PRN_ERR("Failed to make MD5.");
|
||||
fclose(fp);
|
||||
return false;
|
||||
}
|
||||
fclose(fp);
|
||||
return true;
|
||||
}
|
||||
|
||||
string url_to_host(const string &url)
|
||||
{
|
||||
S3FS_PRN_INFO3("url is %s", url.c_str());
|
||||
|
||||
static const string http = "http://";
|
||||
static const string https = "https://";
|
||||
std::string hostname;
|
||||
|
||||
if (url.compare(0, http.size(), http) == 0) {
|
||||
hostname = url.substr(http.size());
|
||||
} else if (url.compare(0, https.size(), https) == 0) {
|
||||
hostname = url.substr(https.size());
|
||||
} else {
|
||||
S3FS_PRN_EXIT("url does not begin with http:// or https://");
|
||||
abort();
|
||||
}
|
||||
|
||||
size_t idx;
|
||||
if ((idx = hostname.find('/')) != string::npos) {
|
||||
return hostname.substr(0, idx);
|
||||
} else {
|
||||
return hostname;
|
||||
}
|
||||
}
|
||||
|
||||
string get_bucket_host()
|
||||
{
|
||||
if(!pathrequeststyle){
|
||||
return bucket + "." + url_to_host(s3host);
|
||||
}
|
||||
return url_to_host(s3host);
|
||||
}
|
||||
|
||||
const char* getCurlDebugHead(curl_infotype type)
|
||||
{
|
||||
const char* unknown = "";
|
||||
const char* dataIn = "BODY <";
|
||||
const char* dataOut = "BODY >";
|
||||
const char* headIn = "<";
|
||||
const char* headOut = ">";
|
||||
|
||||
switch(type){
|
||||
case CURLINFO_DATA_IN:
|
||||
return dataIn;
|
||||
case CURLINFO_DATA_OUT:
|
||||
return dataOut;
|
||||
case CURLINFO_HEADER_IN:
|
||||
return headIn;
|
||||
case CURLINFO_HEADER_OUT:
|
||||
return headOut;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return unknown;
|
||||
}
|
||||
|
||||
//
|
||||
// compare ETag ignoring quotes and case
|
||||
//
|
||||
bool etag_equals(string s1, string s2)
|
||||
{
|
||||
if(s1.length() > 1 && s1[0] == '\"' && s1[s1.length() - 1] == '\"'){
|
||||
s1 = s1.substr(1, s1.size() - 2);
|
||||
}
|
||||
if(s2.length() > 1 && s2[0] == '\"' && s2[s2.length() - 1] == '\"'){
|
||||
s2 = s2.substr(1, s2.size() - 2);
|
||||
}
|
||||
return 0 == strcasecmp(s1.c_str(), s2.c_str());
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
57
src/curl_util.h
Normal file
57
src/curl_util.h
Normal file
@ -0,0 +1,57 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_CURL_UTIL_H_
|
||||
#define S3FS_CURL_UTIL_H_
|
||||
|
||||
#include <curl/curl.h>
|
||||
|
||||
//----------------------------------------------
|
||||
// Functions
|
||||
//----------------------------------------------
|
||||
std::string GetContentMD5(int fd);
|
||||
unsigned char* md5hexsum(int fd, off_t start, ssize_t size);
|
||||
std::string md5sum(int fd, off_t start, ssize_t size);
|
||||
struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* data);
|
||||
struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* key, const char* value);
|
||||
std::string get_sorted_header_keys(const struct curl_slist* list);
|
||||
std::string get_canonical_headers(const struct curl_slist* list, bool only_amz = false);
|
||||
std::string get_header_value(const struct curl_slist* list, const std::string &key);
|
||||
bool MakeUrlResource(const char* realpath, std::string& resourcepath, std::string& url);
|
||||
std::string prepare_url(const char* url);
|
||||
bool get_object_sse_type(const char* path, sse_type_t& ssetype, std::string& ssevalue); // implement in s3fs.cpp
|
||||
|
||||
bool make_md5_from_binary(const char* pstr, size_t length, std::string& md5);
|
||||
std::string url_to_host(const std::string &url);
|
||||
std::string get_bucket_host(void);
|
||||
const char* getCurlDebugHead(curl_infotype type);
|
||||
|
||||
bool etag_equals(std::string s1, std::string s2);
|
||||
|
||||
#endif // S3FS_CURL_UTIL_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
3890
src/fdcache.cpp
3890
src/fdcache.cpp
File diff suppressed because it is too large
Load Diff
302
src/fdcache.h
302
src/fdcache.h
@ -17,205 +17,11 @@
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
#ifndef FD_CACHE_H_
|
||||
#define FD_CACHE_H_
|
||||
|
||||
#include <sys/statvfs.h>
|
||||
#ifndef S3FS_FDCACHE_H_
|
||||
#define S3FS_FDCACHE_H_
|
||||
|
||||
//------------------------------------------------
|
||||
// CacheFileStat
|
||||
//------------------------------------------------
|
||||
class CacheFileStat
|
||||
{
|
||||
private:
|
||||
std::string path;
|
||||
int fd;
|
||||
|
||||
private:
|
||||
static bool MakeCacheFileStatPath(const char* path, std::string& sfile_path, bool is_create_dir = true);
|
||||
|
||||
bool RawOpen(bool readonly);
|
||||
|
||||
public:
|
||||
static std::string GetCacheFileStatTopDir(void);
|
||||
static bool DeleteCacheFileStat(const char* path);
|
||||
static bool CheckCacheFileStatTopDir(void);
|
||||
static bool DeleteCacheFileStatDirectory(void);
|
||||
static bool RenameCacheFileStat(const char* oldpath, const char* newpath);
|
||||
|
||||
explicit CacheFileStat(const char* tpath = NULL);
|
||||
~CacheFileStat();
|
||||
|
||||
bool Open(void);
|
||||
bool ReadOnlyOpen(void);
|
||||
bool Release(void);
|
||||
bool SetPath(const char* tpath, bool is_open = true);
|
||||
int GetFd(void) const { return fd; }
|
||||
};
|
||||
|
||||
//------------------------------------------------
|
||||
// fdpage & PageList
|
||||
//------------------------------------------------
|
||||
// page block information
|
||||
struct fdpage
|
||||
{
|
||||
off_t offset;
|
||||
off_t bytes;
|
||||
bool loaded;
|
||||
bool modified;
|
||||
|
||||
fdpage(off_t start = 0, off_t size = 0, bool is_loaded = false, bool is_modified = false)
|
||||
: offset(start), bytes(size), loaded(is_loaded), modified(is_modified) {}
|
||||
|
||||
off_t next(void) const { return (offset + bytes); }
|
||||
off_t end(void) const { return (0 < bytes ? offset + bytes - 1 : 0); }
|
||||
};
|
||||
typedef std::list<struct fdpage> fdpage_list_t;
|
||||
|
||||
class FdEntity;
|
||||
|
||||
//
|
||||
// Management of loading area/modifying
|
||||
//
|
||||
// cppcheck-suppress copyCtorAndEqOperator
|
||||
class PageList
|
||||
{
|
||||
friend class FdEntity; // only one method access directly pages.
|
||||
|
||||
private:
|
||||
fdpage_list_t pages;
|
||||
|
||||
public:
|
||||
enum page_status{
|
||||
PAGE_NOT_LOAD_MODIFIED = 0,
|
||||
PAGE_LOADED,
|
||||
PAGE_MODIFIED,
|
||||
PAGE_LOAD_MODIFIED
|
||||
};
|
||||
|
||||
private:
|
||||
static bool GetSparseFilePages(int fd, size_t file_size, fdpage_list_t& sparse_list);
|
||||
static bool CheckZeroAreaInFile(int fd, off_t start, size_t bytes);
|
||||
static bool CheckAreaInSparseFile(const struct fdpage& checkpage, const fdpage_list_t& sparse_list, int fd, fdpage_list_t& err_area_list, fdpage_list_t& warn_area_list);
|
||||
|
||||
void Clear(void);
|
||||
bool Compress();
|
||||
bool Parse(off_t new_pos);
|
||||
|
||||
public:
|
||||
static void FreeList(fdpage_list_t& list);
|
||||
|
||||
explicit PageList(off_t size = 0, bool is_loaded = false, bool is_modified = false);
|
||||
explicit PageList(const PageList& other);
|
||||
~PageList();
|
||||
|
||||
bool Init(off_t size, bool is_loaded, bool is_modified);
|
||||
off_t Size(void) const;
|
||||
bool Resize(off_t size, bool is_loaded, bool is_modified);
|
||||
|
||||
bool IsPageLoaded(off_t start = 0, off_t size = 0) const; // size=0 is checking to end of list
|
||||
bool SetPageLoadedStatus(off_t start, off_t size, PageList::page_status pstatus = PAGE_LOADED, bool is_compress = true);
|
||||
bool FindUnloadedPage(off_t start, off_t& resstart, off_t& ressize) const;
|
||||
off_t GetTotalUnloadedPageSize(off_t start = 0, off_t size = 0) const; // size=0 is checking to end of list
|
||||
int GetUnloadedPages(fdpage_list_t& unloaded_list, off_t start = 0, off_t size = 0) const; // size=0 is checking to end of list
|
||||
bool GetPageListsForMultipartUpload(fdpage_list_t& dlpages, fdpage_list_t& mixuppages, off_t max_partsize);
|
||||
|
||||
bool IsModified(void) const;
|
||||
bool ClearAllModified(void);
|
||||
|
||||
bool Serialize(CacheFileStat& file, bool is_output, ino_t inode);
|
||||
void Dump(void) const;
|
||||
bool CompareSparseFile(int fd, size_t file_size, fdpage_list_t& err_area_list, fdpage_list_t& warn_area_list);
|
||||
};
|
||||
|
||||
//------------------------------------------------
|
||||
// class FdEntity
|
||||
//------------------------------------------------
|
||||
typedef std::list<headers_t> headers_list_t;
|
||||
|
||||
class FdEntity
|
||||
{
|
||||
private:
|
||||
static bool mixmultipart; // whether multipart uploading can use copy api.
|
||||
|
||||
pthread_mutex_t fdent_lock;
|
||||
bool is_lock_init;
|
||||
int refcnt; // reference count
|
||||
std::string path; // object path
|
||||
int fd; // file descriptor(tmp file or cache file)
|
||||
FILE* pfile; // file pointer(tmp file or cache file)
|
||||
ino_t inode; // inode number for cache file
|
||||
headers_t orgmeta; // original headers at opening
|
||||
off_t size_orgmeta; // original file size in original headers
|
||||
|
||||
pthread_mutex_t fdent_data_lock;// protects the following members
|
||||
PageList pagelist;
|
||||
std::string upload_id; // for no cached multipart uploading when no disk space
|
||||
etaglist_t etaglist; // for no cached multipart uploading when no disk space
|
||||
off_t mp_start; // start position for no cached multipart(write method only)
|
||||
off_t mp_size; // size for no cached multipart(write method only)
|
||||
std::string cachepath; // local cache file path
|
||||
// (if this is empty, does not load/save pagelist.)
|
||||
std::string mirrorpath; // mirror file path to local cache file path
|
||||
headers_list_t pending_headers;// pending update headers
|
||||
|
||||
private:
|
||||
static int FillFile(int fd, unsigned char byte, off_t size, off_t start);
|
||||
static ino_t GetInode(int fd);
|
||||
|
||||
void Clear(void);
|
||||
ino_t GetInode(void);
|
||||
int OpenMirrorFile(void);
|
||||
bool SetAllStatus(bool is_loaded); // [NOTE] not locking
|
||||
bool SetAllStatusUnloaded(void) { return SetAllStatus(false); }
|
||||
int UploadPendingMeta(void);
|
||||
|
||||
public:
|
||||
static bool SetNoMixMultipart(void);
|
||||
|
||||
explicit FdEntity(const char* tpath = NULL, const char* cpath = NULL);
|
||||
~FdEntity();
|
||||
|
||||
void Close(void);
|
||||
bool IsOpen(void) const { return (-1 != fd); }
|
||||
int Open(headers_t* pmeta = NULL, off_t size = -1, time_t time = -1, bool no_fd_lock_wait = false);
|
||||
bool OpenAndLoadAll(headers_t* pmeta = NULL, off_t* size = NULL, bool force_load = false);
|
||||
int Dup(bool lock_already_held = false);
|
||||
|
||||
const char* GetPath(void) const { return path.c_str(); }
|
||||
bool RenamePath(const std::string& newpath, std::string& fentmapkey);
|
||||
int GetFd(void) const { return fd; }
|
||||
bool IsModified(void) const;
|
||||
bool MergeOrgMeta(headers_t& updatemeta);
|
||||
|
||||
bool GetStats(struct stat& st, bool lock_already_held = false);
|
||||
int SetCtime(time_t time, bool lock_already_held = false);
|
||||
int SetMtime(time_t time, bool lock_already_held = false);
|
||||
bool UpdateCtime(void);
|
||||
bool UpdateMtime(void);
|
||||
bool GetSize(off_t& size);
|
||||
bool GetXattr(std::string& xattr);
|
||||
bool SetXattr(const std::string& xattr);
|
||||
bool SetMode(mode_t mode);
|
||||
bool SetUId(uid_t uid);
|
||||
bool SetGId(gid_t gid);
|
||||
bool SetContentType(const char* path);
|
||||
|
||||
int Load(off_t start = 0, off_t size = 0, bool lock_already_held = false, bool is_modified_flag = false); // size=0 means loading to end
|
||||
int NoCacheLoadAndPost(off_t start = 0, off_t size = 0); // size=0 means loading to end
|
||||
int NoCachePreMultipartPost(void);
|
||||
int NoCacheMultipartPost(int tgfd, off_t start, off_t size);
|
||||
int NoCacheCompleteMultipartPost(void);
|
||||
|
||||
int RowFlush(const char* tpath, bool force_sync = false);
|
||||
int Flush(bool force_sync = false) { return RowFlush(NULL, force_sync); }
|
||||
|
||||
ssize_t Read(char* bytes, off_t start, size_t size, bool force_load = false);
|
||||
ssize_t Write(const char* bytes, off_t start, size_t size);
|
||||
|
||||
bool ReserveDiskSpace(off_t size);
|
||||
};
|
||||
typedef std::map<std::string, class FdEntity*> fdent_map_t; // key=path, value=FdEntity*
|
||||
#include "fdcache_entity.h"
|
||||
|
||||
//------------------------------------------------
|
||||
// class FdManager
|
||||
@ -223,71 +29,71 @@ typedef std::map<std::string, class FdEntity*> fdent_map_t; // key=path, value
|
||||
class FdManager
|
||||
{
|
||||
private:
|
||||
static FdManager singleton;
|
||||
static pthread_mutex_t fd_manager_lock;
|
||||
static pthread_mutex_t cache_cleanup_lock;
|
||||
static pthread_mutex_t reserved_diskspace_lock;
|
||||
static bool is_lock_init;
|
||||
static std::string cache_dir;
|
||||
static bool check_cache_dir_exist;
|
||||
static off_t free_disk_space; // limit free disk space
|
||||
static std::string check_cache_output;
|
||||
static bool checked_lseek;
|
||||
static bool have_lseek_hole;
|
||||
static FdManager singleton;
|
||||
static pthread_mutex_t fd_manager_lock;
|
||||
static pthread_mutex_t cache_cleanup_lock;
|
||||
static pthread_mutex_t reserved_diskspace_lock;
|
||||
static bool is_lock_init;
|
||||
static std::string cache_dir;
|
||||
static bool check_cache_dir_exist;
|
||||
static off_t free_disk_space; // limit free disk space
|
||||
static std::string check_cache_output;
|
||||
static bool checked_lseek;
|
||||
static bool have_lseek_hole;
|
||||
|
||||
fdent_map_t fent;
|
||||
fdent_map_t fent;
|
||||
|
||||
private:
|
||||
static off_t GetFreeDiskSpace(const char* path);
|
||||
void CleanupCacheDirInternal(const std::string &path = "");
|
||||
bool RawCheckAllCache(FILE* fp, const char* cache_stat_top_dir, const char* sub_path, int& total_file_cnt, int& err_file_cnt, int& err_dir_cnt);
|
||||
static off_t GetFreeDiskSpace(const char* path);
|
||||
void CleanupCacheDirInternal(const std::string &path = "");
|
||||
bool RawCheckAllCache(FILE* fp, const char* cache_stat_top_dir, const char* sub_path, int& total_file_cnt, int& err_file_cnt, int& err_dir_cnt);
|
||||
|
||||
public:
|
||||
FdManager();
|
||||
~FdManager();
|
||||
FdManager();
|
||||
~FdManager();
|
||||
|
||||
// Reference singleton
|
||||
static FdManager* get(void) { return &singleton; }
|
||||
// Reference singleton
|
||||
static FdManager* get(void) { return &singleton; }
|
||||
|
||||
static bool DeleteCacheDirectory(void);
|
||||
static int DeleteCacheFile(const char* path);
|
||||
static bool SetCacheDir(const char* dir);
|
||||
static bool IsCacheDir(void) { return !FdManager::cache_dir.empty(); }
|
||||
static const char* GetCacheDir(void) { return FdManager::cache_dir.c_str(); }
|
||||
static bool SetCacheCheckOutput(const char* path);
|
||||
static const char* GetCacheCheckOutput(void) { return FdManager::check_cache_output.c_str(); }
|
||||
static bool MakeCachePath(const char* path, std::string& cache_path, bool is_create_dir = true, bool is_mirror_path = false);
|
||||
static bool CheckCacheTopDir(void);
|
||||
static bool MakeRandomTempPath(const char* path, std::string& tmppath);
|
||||
static bool SetCheckCacheDirExist(bool is_check);
|
||||
static bool CheckCacheDirExist(void);
|
||||
static bool DeleteCacheDirectory(void);
|
||||
static int DeleteCacheFile(const char* path);
|
||||
static bool SetCacheDir(const char* dir);
|
||||
static bool IsCacheDir(void) { return !FdManager::cache_dir.empty(); }
|
||||
static const char* GetCacheDir(void) { return FdManager::cache_dir.c_str(); }
|
||||
static bool SetCacheCheckOutput(const char* path);
|
||||
static const char* GetCacheCheckOutput(void) { return FdManager::check_cache_output.c_str(); }
|
||||
static bool MakeCachePath(const char* path, std::string& cache_path, bool is_create_dir = true, bool is_mirror_path = false);
|
||||
static bool CheckCacheTopDir(void);
|
||||
static bool MakeRandomTempPath(const char* path, std::string& tmppath);
|
||||
static bool SetCheckCacheDirExist(bool is_check);
|
||||
static bool CheckCacheDirExist(void);
|
||||
|
||||
static off_t GetEnsureFreeDiskSpace();
|
||||
static off_t SetEnsureFreeDiskSpace(off_t size);
|
||||
static bool IsSafeDiskSpace(const char* path, off_t size);
|
||||
static void FreeReservedDiskSpace(off_t size);
|
||||
static bool ReserveDiskSpace(off_t size);
|
||||
static bool HaveLseekHole(void);
|
||||
static off_t GetEnsureFreeDiskSpace();
|
||||
static off_t SetEnsureFreeDiskSpace(off_t size);
|
||||
static bool IsSafeDiskSpace(const char* path, off_t size);
|
||||
static void FreeReservedDiskSpace(off_t size);
|
||||
static bool ReserveDiskSpace(off_t size);
|
||||
static bool HaveLseekHole(void);
|
||||
|
||||
// Return FdEntity associated with path, returning NULL on error. This operation increments the reference count; callers must decrement via Close after use.
|
||||
FdEntity* GetFdEntity(const char* path, int existfd = -1);
|
||||
FdEntity* Open(const char* path, headers_t* pmeta = NULL, off_t size = -1, time_t time = -1, bool force_tmpfile = false, bool is_create = true, bool no_fd_lock_wait = false);
|
||||
FdEntity* ExistOpen(const char* path, int existfd = -1, bool ignore_existfd = false);
|
||||
void Rename(const std::string &from, const std::string &to);
|
||||
bool Close(FdEntity* ent);
|
||||
bool ChangeEntityToTempPath(FdEntity* ent, const char* path);
|
||||
void CleanupCacheDir();
|
||||
// Return FdEntity associated with path, returning NULL on error. This operation increments the reference count; callers must decrement via Close after use.
|
||||
FdEntity* GetFdEntity(const char* path, int existfd = -1);
|
||||
FdEntity* Open(const char* path, headers_t* pmeta = NULL, off_t size = -1, time_t time = -1, bool force_tmpfile = false, bool is_create = true, bool no_fd_lock_wait = false);
|
||||
FdEntity* ExistOpen(const char* path, int existfd = -1, bool ignore_existfd = false);
|
||||
void Rename(const std::string &from, const std::string &to);
|
||||
bool Close(FdEntity* ent);
|
||||
bool ChangeEntityToTempPath(FdEntity* ent, const char* path);
|
||||
void CleanupCacheDir();
|
||||
|
||||
bool CheckAllCache(void);
|
||||
bool CheckAllCache(void);
|
||||
};
|
||||
|
||||
#endif // FD_CACHE_H_
|
||||
#endif // S3FS_FDCACHE_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 2
|
||||
* c-basic-offset: 2
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=2 ts=2 fdm=marker
|
||||
* vim<600: expandtab sw=2 ts=2
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
|
1536
src/fdcache_entity.cpp
Normal file
1536
src/fdcache_entity.cpp
Normal file
File diff suppressed because it is too large
Load Diff
124
src/fdcache_entity.h
Normal file
124
src/fdcache_entity.h
Normal file
@ -0,0 +1,124 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_FDCACHE_ENTITY_H_
|
||||
#define S3FS_FDCACHE_ENTITY_H_
|
||||
|
||||
#include "fdcache_page.h"
|
||||
#include "metaheader.h"
|
||||
|
||||
//------------------------------------------------
|
||||
// class FdEntity
|
||||
//------------------------------------------------
|
||||
class FdEntity
|
||||
{
|
||||
private:
|
||||
static bool mixmultipart; // whether multipart uploading can use copy api.
|
||||
|
||||
pthread_mutex_t fdent_lock;
|
||||
bool is_lock_init;
|
||||
int refcnt; // reference count
|
||||
std::string path; // object path
|
||||
int fd; // file descriptor(tmp file or cache file)
|
||||
FILE* pfile; // file pointer(tmp file or cache file)
|
||||
ino_t inode; // inode number for cache file
|
||||
headers_t orgmeta; // original headers at opening
|
||||
off_t size_orgmeta; // original file size in original headers
|
||||
|
||||
pthread_mutex_t fdent_data_lock;// protects the following members
|
||||
PageList pagelist;
|
||||
std::string upload_id; // for no cached multipart uploading when no disk space
|
||||
etaglist_t etaglist; // for no cached multipart uploading when no disk space
|
||||
off_t mp_start; // start position for no cached multipart(write method only)
|
||||
off_t mp_size; // size for no cached multipart(write method only)
|
||||
std::string cachepath; // local cache file path
|
||||
// (if this is empty, does not load/save pagelist.)
|
||||
std::string mirrorpath; // mirror file path to local cache file path
|
||||
headers_list_t pending_headers;// pending update headers
|
||||
|
||||
private:
|
||||
static int FillFile(int fd, unsigned char byte, off_t size, off_t start);
|
||||
static ino_t GetInode(int fd);
|
||||
|
||||
void Clear(void);
|
||||
ino_t GetInode(void);
|
||||
int OpenMirrorFile(void);
|
||||
bool SetAllStatus(bool is_loaded); // [NOTE] not locking
|
||||
bool SetAllStatusUnloaded(void) { return SetAllStatus(false); }
|
||||
int UploadPendingMeta(void);
|
||||
|
||||
public:
|
||||
static bool SetNoMixMultipart(void);
|
||||
|
||||
explicit FdEntity(const char* tpath = NULL, const char* cpath = NULL);
|
||||
~FdEntity();
|
||||
|
||||
void Close(void);
|
||||
bool IsOpen(void) const { return (-1 != fd); }
|
||||
int Open(headers_t* pmeta = NULL, off_t size = -1, time_t time = -1, bool no_fd_lock_wait = false);
|
||||
bool OpenAndLoadAll(headers_t* pmeta = NULL, off_t* size = NULL, bool force_load = false);
|
||||
int Dup(bool lock_already_held = false);
|
||||
|
||||
const char* GetPath(void) const { return path.c_str(); }
|
||||
bool RenamePath(const std::string& newpath, std::string& fentmapkey);
|
||||
int GetFd(void) const { return fd; }
|
||||
bool IsModified(void) const;
|
||||
bool MergeOrgMeta(headers_t& updatemeta);
|
||||
|
||||
bool GetStats(struct stat& st, bool lock_already_held = false);
|
||||
int SetCtime(time_t time, bool lock_already_held = false);
|
||||
int SetMtime(time_t time, bool lock_already_held = false);
|
||||
bool UpdateCtime(void);
|
||||
bool UpdateMtime(void);
|
||||
bool GetSize(off_t& size);
|
||||
bool GetXattr(std::string& xattr);
|
||||
bool SetXattr(const std::string& xattr);
|
||||
bool SetMode(mode_t mode);
|
||||
bool SetUId(uid_t uid);
|
||||
bool SetGId(gid_t gid);
|
||||
bool SetContentType(const char* path);
|
||||
|
||||
int Load(off_t start = 0, off_t size = 0, bool lock_already_held = false, bool is_modified_flag = false); // size=0 means loading to end
|
||||
int NoCacheLoadAndPost(off_t start = 0, off_t size = 0); // size=0 means loading to end
|
||||
int NoCachePreMultipartPost(void);
|
||||
int NoCacheMultipartPost(int tgfd, off_t start, off_t size);
|
||||
int NoCacheCompleteMultipartPost(void);
|
||||
|
||||
int RowFlush(const char* tpath, bool force_sync = false);
|
||||
int Flush(bool force_sync = false) { return RowFlush(NULL, force_sync); }
|
||||
|
||||
ssize_t Read(char* bytes, off_t start, size_t size, bool force_load = false);
|
||||
ssize_t Write(const char* bytes, off_t start, size_t size);
|
||||
|
||||
bool ReserveDiskSpace(off_t size);
|
||||
};
|
||||
|
||||
typedef std::map<std::string, class FdEntity*> fdent_map_t; // key=path, value=FdEntity*
|
||||
|
||||
#endif // S3FS_FDCACHE_ENTITY_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
925
src/fdcache_page.cpp
Normal file
925
src/fdcache_page.cpp
Normal file
@ -0,0 +1,925 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Takeshi Nakatani <ggtakec.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <cerrno>
|
||||
#include <unistd.h>
|
||||
#include <sstream>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "fdcache_page.h"
|
||||
#include "string_util.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
//------------------------------------------------
|
||||
// Symbols
|
||||
//------------------------------------------------
|
||||
static const int CHECK_CACHEFILE_PART_SIZE = 1024 * 16; // Buffer size in PageList::CheckZeroAreaInFile()
|
||||
|
||||
//------------------------------------------------
|
||||
// fdpage_list_t utility
|
||||
//------------------------------------------------
|
||||
// Inline function for repeated processing
|
||||
inline void raw_add_compress_fdpage_list(fdpage_list_t& pagelist, fdpage& page, bool ignore_load, bool ignore_modify, bool default_load, bool default_modify)
|
||||
{
|
||||
if(0 < page.bytes){
|
||||
// [NOTE]
|
||||
// The page variable is subject to change here.
|
||||
//
|
||||
if(ignore_load){
|
||||
page.loaded = default_load;
|
||||
}
|
||||
if(ignore_modify){
|
||||
page.modified = default_modify;
|
||||
}
|
||||
pagelist.push_back(page);
|
||||
}
|
||||
}
|
||||
|
||||
// Compress the page list
|
||||
//
|
||||
// ignore_load: Ignore the flag of loaded member and compress
|
||||
// ignore_modify: Ignore the flag of modified member and compress
|
||||
// default_load: loaded flag value in the list after compression when ignore_load=true
|
||||
// default_modify: modified flag value in the list after compression when default_modify=true
|
||||
//
|
||||
// NOTE: ignore_modify and ignore_load cannot both be true.
|
||||
//
|
||||
static fdpage_list_t raw_compress_fdpage_list(const fdpage_list_t& pages, bool ignore_load, bool ignore_modify, bool default_load, bool default_modify)
|
||||
{
|
||||
fdpage_list_t compressed_pages;
|
||||
fdpage tmppage;
|
||||
bool is_first = true;
|
||||
for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){
|
||||
if(!is_first){
|
||||
if( (!ignore_load && (tmppage.loaded != iter->loaded )) ||
|
||||
(!ignore_modify && (tmppage.modified != iter->modified)) )
|
||||
{
|
||||
// Different from the previous area, add it to list
|
||||
raw_add_compress_fdpage_list(compressed_pages, tmppage, ignore_load, ignore_modify, default_load, default_modify);
|
||||
|
||||
// keep current area
|
||||
tmppage = fdpage(iter->offset, iter->bytes, (ignore_load ? default_load : iter->loaded), (ignore_modify ? default_modify : iter->modified));
|
||||
}else{
|
||||
// Same as the previous area
|
||||
if(tmppage.next() != iter->offset){
|
||||
// These are not contiguous areas, add it to list
|
||||
raw_add_compress_fdpage_list(compressed_pages, tmppage, ignore_load, ignore_modify, default_load, default_modify);
|
||||
|
||||
// keep current area
|
||||
tmppage = fdpage(iter->offset, iter->bytes, (ignore_load ? default_load : iter->loaded), (ignore_modify ? default_modify : iter->modified));
|
||||
}else{
|
||||
// These are contiguous areas
|
||||
|
||||
// add current area
|
||||
tmppage.bytes += iter->bytes;
|
||||
}
|
||||
}
|
||||
}else{
|
||||
// first erea
|
||||
is_first = false;
|
||||
|
||||
// keep current area
|
||||
tmppage = fdpage(iter->offset, iter->bytes, (ignore_load ? default_load : iter->loaded), (ignore_modify ? default_modify : iter->modified));
|
||||
}
|
||||
}
|
||||
// add lastest area
|
||||
if(!is_first){
|
||||
raw_add_compress_fdpage_list(compressed_pages, tmppage, ignore_load, ignore_modify, default_load, default_modify);
|
||||
}
|
||||
return compressed_pages;
|
||||
}
|
||||
|
||||
static fdpage_list_t compress_fdpage_list_ignore_modify(const fdpage_list_t& pages, bool default_modify)
|
||||
{
|
||||
return raw_compress_fdpage_list(pages, /* ignore_load= */ false, /* ignore_modify= */ true, /* default_load= */false, /* default_modify= */default_modify);
|
||||
}
|
||||
|
||||
static fdpage_list_t compress_fdpage_list_ignore_load(const fdpage_list_t& pages, bool default_load)
|
||||
{
|
||||
return raw_compress_fdpage_list(pages, /* ignore_load= */ true, /* ignore_modify= */ false, /* default_load= */default_load, /* default_modify= */false);
|
||||
}
|
||||
|
||||
static fdpage_list_t compress_fdpage_list(const fdpage_list_t& pages)
|
||||
{
|
||||
return raw_compress_fdpage_list(pages, /* ignore_load= */ false, /* ignore_modify= */ false, /* default_load= */false, /* default_modify= */false);
|
||||
}
|
||||
|
||||
static fdpage_list_t parse_partsize_fdpage_list(const fdpage_list_t& pages, off_t max_partsize)
|
||||
{
|
||||
fdpage_list_t parsed_pages;
|
||||
for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){
|
||||
if(iter->modified){
|
||||
// modified page
|
||||
fdpage tmppage = *iter;
|
||||
for(off_t start = iter->offset, rest_bytes = iter->bytes; 0 < rest_bytes; ){
|
||||
if((max_partsize * 2) < rest_bytes){
|
||||
// do parse
|
||||
tmppage.offset = start;
|
||||
tmppage.bytes = max_partsize;
|
||||
parsed_pages.push_back(tmppage);
|
||||
|
||||
start += max_partsize;
|
||||
rest_bytes -= max_partsize;
|
||||
}else{
|
||||
// Since the number of remaining bytes is less than twice max_partsize,
|
||||
// one of the divided areas will be smaller than max_partsize.
|
||||
// Therefore, this area at the end should not be divided.
|
||||
tmppage.offset = start;
|
||||
tmppage.bytes = rest_bytes;
|
||||
parsed_pages.push_back(tmppage);
|
||||
|
||||
start += rest_bytes;
|
||||
rest_bytes = 0;
|
||||
}
|
||||
}
|
||||
}else{
|
||||
// not modified page is not parsed
|
||||
parsed_pages.push_back(*iter);
|
||||
}
|
||||
}
|
||||
return parsed_pages;
|
||||
}
|
||||
|
||||
//------------------------------------------------
|
||||
// PageList class methods
|
||||
//------------------------------------------------
|
||||
//
|
||||
// Examine and return the status of each block in the file.
|
||||
//
|
||||
// Assuming the file is a sparse file, check the HOLE and DATA areas
|
||||
// and return it in fdpage_list_t. The loaded flag of each fdpage is
|
||||
// set to false for HOLE blocks and true for DATA blocks.
|
||||
//
|
||||
bool PageList::GetSparseFilePages(int fd, size_t file_size, fdpage_list_t& sparse_list)
|
||||
{
|
||||
// [NOTE]
|
||||
// Express the status of the cache file using fdpage_list_t.
|
||||
// There is a hole in the cache file(sparse file), and the
|
||||
// state of this hole is expressed by the "loaded" member of
|
||||
// struct fdpage. (the "modified" member is not used)
|
||||
//
|
||||
if(0 == file_size){
|
||||
// file is empty
|
||||
return true;
|
||||
}
|
||||
|
||||
bool is_hole = false;
|
||||
int hole_pos = lseek(fd, 0, SEEK_HOLE);
|
||||
int data_pos = lseek(fd, 0, SEEK_DATA);
|
||||
if(-1 == hole_pos && -1 == data_pos){
|
||||
S3FS_PRN_ERR("Could not find the first position both HOLE and DATA in the file(fd=%d).", fd);
|
||||
return false;
|
||||
}else if(-1 == hole_pos){
|
||||
is_hole = false;
|
||||
}else if(-1 == data_pos){
|
||||
is_hole = true;
|
||||
}else if(hole_pos < data_pos){
|
||||
is_hole = true;
|
||||
}else{
|
||||
is_hole = false;
|
||||
}
|
||||
|
||||
for(int cur_pos = 0, next_pos = 0; 0 <= cur_pos; cur_pos = next_pos, is_hole = !is_hole){
|
||||
fdpage page;
|
||||
page.offset = cur_pos;
|
||||
page.loaded = !is_hole;
|
||||
page.modified = false;
|
||||
|
||||
next_pos = lseek(fd, cur_pos, (is_hole ? SEEK_DATA : SEEK_HOLE));
|
||||
if(-1 == next_pos){
|
||||
page.bytes = static_cast<off_t>(file_size - cur_pos);
|
||||
}else{
|
||||
page.bytes = next_pos - cur_pos;
|
||||
}
|
||||
sparse_list.push_back(page);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
//
|
||||
// Confirm that the specified area is ZERO
|
||||
//
|
||||
bool PageList::CheckZeroAreaInFile(int fd, off_t start, size_t bytes)
|
||||
{
|
||||
char* readbuff = new char[CHECK_CACHEFILE_PART_SIZE];
|
||||
|
||||
for(size_t comp_bytes = 0, check_bytes = 0; comp_bytes < bytes; comp_bytes += check_bytes){
|
||||
if(CHECK_CACHEFILE_PART_SIZE < (bytes - comp_bytes)){
|
||||
check_bytes = CHECK_CACHEFILE_PART_SIZE;
|
||||
}else{
|
||||
check_bytes = bytes - comp_bytes;
|
||||
}
|
||||
bool found_bad_data = false;
|
||||
ssize_t read_bytes;
|
||||
if(-1 == (read_bytes = pread(fd, readbuff, check_bytes, (start + comp_bytes)))){
|
||||
S3FS_PRN_ERR("Something error is occurred in reading %zu bytes at %lld from file(%d).", check_bytes, static_cast<long long int>(start + comp_bytes), fd);
|
||||
found_bad_data = true;
|
||||
}else{
|
||||
check_bytes = static_cast<size_t>(read_bytes);
|
||||
for(size_t tmppos = 0; tmppos < check_bytes; ++tmppos){
|
||||
if('\0' != readbuff[tmppos]){
|
||||
// found not ZERO data.
|
||||
found_bad_data = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if(found_bad_data){
|
||||
delete[] readbuff;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
delete[] readbuff;
|
||||
return true;
|
||||
}
|
||||
|
||||
//
|
||||
// Checks that the specified area matches the state of the sparse file.
|
||||
//
|
||||
// [Parameters]
|
||||
// checkpage: This is one state of the cache file, it is loaded from the stats file.
|
||||
// sparse_list: This is a list of the results of directly checking the cache file status(HOLE/DATA).
|
||||
// In the HOLE area, the "loaded" flag of fdpage is false. The DATA area has it set to true.
|
||||
// fd: opened file discriptor to target cache file.
|
||||
//
|
||||
bool PageList::CheckAreaInSparseFile(const struct fdpage& checkpage, const fdpage_list_t& sparse_list, int fd, fdpage_list_t& err_area_list, fdpage_list_t& warn_area_list)
|
||||
{
|
||||
// Check the block status of a part(Check Area: checkpage) of the target file.
|
||||
// The elements of sparse_list have 5 patterns that overlap this block area.
|
||||
//
|
||||
// File |<---...--------------------------------------...--->|
|
||||
// Check Area (offset)<-------------------->(offset + bytes - 1)
|
||||
// Area case(0) <------->
|
||||
// Area case(1) <------->
|
||||
// Area case(2) <-------->
|
||||
// Area case(3) <---------->
|
||||
// Area case(4) <----------->
|
||||
// Area case(5) <----------------------------->
|
||||
//
|
||||
bool result = true;
|
||||
|
||||
for(fdpage_list_t::const_iterator iter = sparse_list.begin(); iter != sparse_list.end(); ++iter){
|
||||
off_t check_start = 0;
|
||||
off_t check_bytes = 0;
|
||||
if((iter->offset + iter->bytes) <= checkpage.offset){
|
||||
// case 0
|
||||
continue; // next
|
||||
|
||||
}else if((checkpage.offset + checkpage.bytes) <= iter->offset){
|
||||
// case 1
|
||||
break; // finish
|
||||
|
||||
}else if(iter->offset < checkpage.offset && (iter->offset + iter->bytes) < (checkpage.offset + checkpage.bytes)){
|
||||
// case 2
|
||||
check_start = checkpage.offset;
|
||||
check_bytes = iter->bytes - (checkpage.offset - iter->offset);
|
||||
|
||||
}else if(iter->offset < (checkpage.offset + checkpage.bytes) && (checkpage.offset + checkpage.bytes) < (iter->offset + iter->bytes)){
|
||||
// case 3
|
||||
check_start = iter->offset;
|
||||
check_bytes = checkpage.bytes - (iter->offset - checkpage.offset);
|
||||
|
||||
}else if(checkpage.offset < iter->offset && (iter->offset + iter->bytes) < (checkpage.offset + checkpage.bytes)){
|
||||
// case 4
|
||||
check_start = iter->offset;
|
||||
check_bytes = iter->bytes;
|
||||
|
||||
}else{ // (iter->offset <= checkpage.offset && (checkpage.offset + checkpage.bytes) <= (iter->offset + iter->bytes))
|
||||
// case 5
|
||||
check_start = checkpage.offset;
|
||||
check_bytes = checkpage.bytes;
|
||||
}
|
||||
|
||||
// check target area type
|
||||
if(checkpage.loaded || checkpage.modified){
|
||||
// target area must be not HOLE(DATA) area.
|
||||
if(!iter->loaded){
|
||||
// Found bad area, it is HOLE area.
|
||||
fdpage page(check_start, check_bytes, false, false);
|
||||
err_area_list.push_back(page);
|
||||
result = false;
|
||||
}
|
||||
}else{
|
||||
// target area should be HOLE area.(If it is not a block boundary, it may be a DATA area.)
|
||||
if(iter->loaded){
|
||||
// need to check this area's each data, it should be ZERO.
|
||||
if(!PageList::CheckZeroAreaInFile(fd, check_start, static_cast<size_t>(check_bytes))){
|
||||
// Discovered an area that has un-initial status data but it probably does not effect bad.
|
||||
fdpage page(check_start, check_bytes, true, false);
|
||||
warn_area_list.push_back(page);
|
||||
result = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
//------------------------------------------------
|
||||
// PageList methods
|
||||
//------------------------------------------------
|
||||
void PageList::FreeList(fdpage_list_t& list)
|
||||
{
|
||||
list.clear();
|
||||
}
|
||||
|
||||
PageList::PageList(off_t size, bool is_loaded, bool is_modified)
|
||||
{
|
||||
Init(size, is_loaded, is_modified);
|
||||
}
|
||||
|
||||
PageList::PageList(const PageList& other)
|
||||
{
|
||||
for(fdpage_list_t::const_iterator iter = other.pages.begin(); iter != other.pages.end(); ++iter){
|
||||
pages.push_back(*iter);
|
||||
}
|
||||
}
|
||||
|
||||
PageList::~PageList()
|
||||
{
|
||||
Clear();
|
||||
}
|
||||
|
||||
void PageList::Clear()
|
||||
{
|
||||
PageList::FreeList(pages);
|
||||
}
|
||||
|
||||
bool PageList::Init(off_t size, bool is_loaded, bool is_modified)
|
||||
{
|
||||
Clear();
|
||||
if(0 < size){
|
||||
fdpage page(0, size, is_loaded, is_modified);
|
||||
pages.push_back(page);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
off_t PageList::Size() const
|
||||
{
|
||||
if(pages.empty()){
|
||||
return 0;
|
||||
}
|
||||
fdpage_list_t::const_reverse_iterator riter = pages.rbegin();
|
||||
return riter->next();
|
||||
}
|
||||
|
||||
bool PageList::Compress()
|
||||
{
|
||||
pages = compress_fdpage_list(pages);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PageList::Parse(off_t new_pos)
|
||||
{
|
||||
for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ++iter){
|
||||
if(new_pos == iter->offset){
|
||||
// nothing to do
|
||||
return true;
|
||||
}else if(iter->offset < new_pos && new_pos < iter->next()){
|
||||
fdpage page(iter->offset, new_pos - iter->offset, iter->loaded, iter->modified);
|
||||
iter->bytes -= (new_pos - iter->offset);
|
||||
iter->offset = new_pos;
|
||||
pages.insert(iter, page);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool PageList::Resize(off_t size, bool is_loaded, bool is_modified)
|
||||
{
|
||||
off_t total = Size();
|
||||
|
||||
if(0 == total){
|
||||
Init(size, is_loaded, is_modified);
|
||||
|
||||
}else if(total < size){
|
||||
// add new area
|
||||
fdpage page(total, (size - total), is_loaded, is_modified);
|
||||
pages.push_back(page);
|
||||
|
||||
}else if(size < total){
|
||||
// cut area
|
||||
for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ){
|
||||
if(iter->next() <= size){
|
||||
++iter;
|
||||
}else{
|
||||
if(size <= iter->offset){
|
||||
iter = pages.erase(iter);
|
||||
}else{
|
||||
iter->bytes = size - iter->offset;
|
||||
}
|
||||
}
|
||||
}
|
||||
}else{ // total == size
|
||||
// nothing to do
|
||||
}
|
||||
// compress area
|
||||
return Compress();
|
||||
}
|
||||
|
||||
bool PageList::IsPageLoaded(off_t start, off_t size) const
|
||||
{
|
||||
for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){
|
||||
if(iter->end() < start){
|
||||
continue;
|
||||
}
|
||||
if(!iter->loaded){
|
||||
return false;
|
||||
}
|
||||
if(0 != size && start + size <= iter->next()){
|
||||
break;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PageList::SetPageLoadedStatus(off_t start, off_t size, PageList::page_status pstatus, bool is_compress)
|
||||
{
|
||||
off_t now_size = Size();
|
||||
bool is_loaded = (PAGE_LOAD_MODIFIED == pstatus || PAGE_LOADED == pstatus);
|
||||
bool is_modified = (PAGE_LOAD_MODIFIED == pstatus || PAGE_MODIFIED == pstatus);
|
||||
|
||||
if(now_size <= start){
|
||||
if(now_size < start){
|
||||
// add
|
||||
Resize(start, false, is_modified); // set modified flag from now end pos to specified start pos.
|
||||
}
|
||||
Resize(start + size, is_loaded, is_modified);
|
||||
|
||||
}else if(now_size <= start + size){
|
||||
// cut
|
||||
Resize(start, false, false); // not changed loaded/modified flags in existing area.
|
||||
// add
|
||||
Resize(start + size, is_loaded, is_modified);
|
||||
|
||||
}else{
|
||||
// start-size are inner pages area
|
||||
// parse "start", and "start + size" position
|
||||
Parse(start);
|
||||
Parse(start + size);
|
||||
|
||||
// set loaded flag
|
||||
for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ++iter){
|
||||
if(iter->end() < start){
|
||||
continue;
|
||||
}else if(start + size <= iter->offset){
|
||||
break;
|
||||
}else{
|
||||
iter->loaded = is_loaded;
|
||||
iter->modified = is_modified;
|
||||
}
|
||||
}
|
||||
}
|
||||
// compress area
|
||||
return (is_compress ? Compress() : true);
|
||||
}
|
||||
|
||||
bool PageList::FindUnloadedPage(off_t start, off_t& resstart, off_t& ressize) const
|
||||
{
|
||||
for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){
|
||||
if(start <= iter->end()){
|
||||
if(!iter->loaded && !iter->modified){ // Do not load unloaded and modified areas
|
||||
resstart = iter->offset;
|
||||
ressize = iter->bytes;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
off_t PageList::GetTotalUnloadedPageSize(off_t start, off_t size) const
|
||||
{
|
||||
off_t restsize = 0;
|
||||
off_t next = start + size;
|
||||
for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){
|
||||
if(iter->next() <= start){
|
||||
continue;
|
||||
}
|
||||
if(next <= iter->offset){
|
||||
break;
|
||||
}
|
||||
if(iter->loaded || iter->modified){
|
||||
continue;
|
||||
}
|
||||
off_t tmpsize;
|
||||
if(iter->offset <= start){
|
||||
if(iter->next() <= next){
|
||||
tmpsize = (iter->next() - start);
|
||||
}else{
|
||||
tmpsize = next - start; // = size
|
||||
}
|
||||
}else{
|
||||
if(iter->next() <= next){
|
||||
tmpsize = iter->next() - iter->offset; // = iter->bytes
|
||||
}else{
|
||||
tmpsize = next - iter->offset;
|
||||
}
|
||||
}
|
||||
restsize += tmpsize;
|
||||
}
|
||||
return restsize;
|
||||
}
|
||||
|
||||
int PageList::GetUnloadedPages(fdpage_list_t& unloaded_list, off_t start, off_t size) const
|
||||
{
|
||||
// If size is 0, it means loading to end.
|
||||
if(0 == size){
|
||||
if(start < Size()){
|
||||
size = Size() - start;
|
||||
}
|
||||
}
|
||||
off_t next = start + size;
|
||||
|
||||
for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){
|
||||
if(iter->next() <= start){
|
||||
continue;
|
||||
}
|
||||
if(next <= iter->offset){
|
||||
break;
|
||||
}
|
||||
if(iter->loaded || iter->modified){
|
||||
continue; // already loaded or modified
|
||||
}
|
||||
|
||||
// page area
|
||||
off_t page_start = max(iter->offset, start);
|
||||
off_t page_next = min(iter->next(), next);
|
||||
off_t page_size = page_next - page_start;
|
||||
|
||||
// add list
|
||||
fdpage_list_t::reverse_iterator riter = unloaded_list.rbegin();
|
||||
if(riter != unloaded_list.rend() && riter->next() == page_start){
|
||||
// merge to before page
|
||||
riter->bytes += page_size;
|
||||
}else{
|
||||
fdpage page(page_start, page_size, false, false);
|
||||
unloaded_list.push_back(page);
|
||||
}
|
||||
}
|
||||
return unloaded_list.size();
|
||||
}
|
||||
|
||||
// [NOTE]
|
||||
// This method is called in advance when mixing POST and COPY in multi-part upload.
|
||||
// The minimum size of each part must be 5 MB, and the data area below this must be
|
||||
// downloaded from S3.
|
||||
// This method checks the current PageList status and returns the area that needs
|
||||
// to be downloaded so that each part is at least 5 MB.
|
||||
//
|
||||
bool PageList::GetPageListsForMultipartUpload(fdpage_list_t& dlpages, fdpage_list_t& mixuppages, off_t max_partsize)
|
||||
{
|
||||
// compress before this processing
|
||||
if(!Compress()){
|
||||
return false;
|
||||
}
|
||||
|
||||
// make a list by modified flag
|
||||
fdpage_list_t modified_pages = compress_fdpage_list_ignore_load(pages, false);
|
||||
fdpage_list_t download_pages; // A non-contiguous page list showing the areas that need to be downloaded
|
||||
fdpage_list_t mixupload_pages; // A continuous page list showing only modified flags for mixupload
|
||||
fdpage prev_page;
|
||||
for(fdpage_list_t::const_iterator iter = modified_pages.begin(); iter != modified_pages.end(); ++iter){
|
||||
if(iter->modified){
|
||||
// current is modified area
|
||||
if(!prev_page.modified){
|
||||
// previous is not modified area
|
||||
if(prev_page.bytes < MIN_MULTIPART_SIZE){
|
||||
// previous(not modified) area is too small for one multipart size,
|
||||
// then all of previous area is needed to download.
|
||||
download_pages.push_back(prev_page);
|
||||
|
||||
// previous(not modified) area is set upload area.
|
||||
prev_page.modified = true;
|
||||
mixupload_pages.push_back(prev_page);
|
||||
}else{
|
||||
// previous(not modified) area is set copy area.
|
||||
prev_page.modified = false;
|
||||
mixupload_pages.push_back(prev_page);
|
||||
}
|
||||
// set current to previous
|
||||
prev_page = *iter;
|
||||
}else{
|
||||
// previous is modified area, too
|
||||
prev_page.bytes += iter->bytes;
|
||||
}
|
||||
|
||||
}else{
|
||||
// current is not modified area
|
||||
if(!prev_page.modified){
|
||||
// previous is not modified area, too
|
||||
prev_page.bytes += iter->bytes;
|
||||
|
||||
}else{
|
||||
// previous is modified area
|
||||
if(prev_page.bytes < MIN_MULTIPART_SIZE){
|
||||
// previous(modified) area is too small for one multipart size,
|
||||
// then part or all of current area is needed to download.
|
||||
off_t missing_bytes = MIN_MULTIPART_SIZE - prev_page.bytes;
|
||||
|
||||
if((missing_bytes + MIN_MULTIPART_SIZE) < iter-> bytes){
|
||||
// The current size is larger than the missing size, and the remainder
|
||||
// after deducting the missing size is larger than the minimum size.
|
||||
|
||||
fdpage missing_page(iter->offset, missing_bytes, false, false);
|
||||
download_pages.push_back(missing_page);
|
||||
|
||||
// previous(not modified) area is set upload area.
|
||||
prev_page.bytes = MIN_MULTIPART_SIZE;
|
||||
mixupload_pages.push_back(prev_page);
|
||||
|
||||
// set current to previous
|
||||
prev_page = *iter;
|
||||
prev_page.offset += missing_bytes;
|
||||
prev_page.bytes -= missing_bytes;
|
||||
|
||||
}else{
|
||||
// The current size is less than the missing size, or the remaining
|
||||
// size less the missing size is less than the minimum size.
|
||||
download_pages.push_back(*iter);
|
||||
|
||||
// add current to previous
|
||||
prev_page.bytes += iter->bytes;
|
||||
}
|
||||
|
||||
}else{
|
||||
// previous(modified) area is enough size for one multipart size.
|
||||
mixupload_pages.push_back(prev_page);
|
||||
|
||||
// set current to previous
|
||||
prev_page = *iter;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// lastest area
|
||||
if(0 < prev_page.bytes){
|
||||
mixupload_pages.push_back(prev_page);
|
||||
}
|
||||
|
||||
// compress
|
||||
dlpages = compress_fdpage_list_ignore_modify(download_pages, false);
|
||||
mixuppages = compress_fdpage_list_ignore_load(mixupload_pages, false);
|
||||
|
||||
// parse by max pagesize
|
||||
dlpages = parse_partsize_fdpage_list(dlpages, max_partsize);
|
||||
mixuppages = parse_partsize_fdpage_list(mixuppages, max_partsize);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PageList::IsModified() const
|
||||
{
|
||||
for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){
|
||||
if(iter->modified){
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool PageList::ClearAllModified()
|
||||
{
|
||||
for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ++iter){
|
||||
if(iter->modified){
|
||||
iter->modified = false;
|
||||
}
|
||||
}
|
||||
return Compress();
|
||||
}
|
||||
|
||||
bool PageList::Serialize(CacheFileStat& file, bool is_output, ino_t inode)
|
||||
{
|
||||
if(!file.Open()){
|
||||
return false;
|
||||
}
|
||||
if(is_output){
|
||||
//
|
||||
// put to file
|
||||
//
|
||||
ostringstream ssall;
|
||||
ssall << inode << ":" << Size();
|
||||
|
||||
for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ++iter){
|
||||
ssall << "\n" << iter->offset << ":" << iter->bytes << ":" << (iter->loaded ? "1" : "0") << ":" << (iter->modified ? "1" : "0");
|
||||
}
|
||||
|
||||
if(-1 == ftruncate(file.GetFd(), 0)){
|
||||
S3FS_PRN_ERR("failed to truncate file(to 0) for stats(%d)", errno);
|
||||
return false;
|
||||
}
|
||||
string strall = ssall.str();
|
||||
if(0 >= pwrite(file.GetFd(), strall.c_str(), strall.length(), 0)){
|
||||
S3FS_PRN_ERR("failed to write stats(%d)", errno);
|
||||
return false;
|
||||
}
|
||||
|
||||
}else{
|
||||
//
|
||||
// loading from file
|
||||
//
|
||||
struct stat st;
|
||||
memset(&st, 0, sizeof(struct stat));
|
||||
if(-1 == fstat(file.GetFd(), &st)){
|
||||
S3FS_PRN_ERR("fstat is failed. errno(%d)", errno);
|
||||
return false;
|
||||
}
|
||||
if(0 >= st.st_size){
|
||||
// nothing
|
||||
Init(0, false, false);
|
||||
return true;
|
||||
}
|
||||
char* ptmp = new char[st.st_size + 1];
|
||||
ptmp[st.st_size] = '\0';
|
||||
// read from file
|
||||
if(0 >= pread(file.GetFd(), ptmp, st.st_size, 0)){
|
||||
S3FS_PRN_ERR("failed to read stats(%d)", errno);
|
||||
delete[] ptmp;
|
||||
return false;
|
||||
}
|
||||
string oneline;
|
||||
istringstream ssall(ptmp);
|
||||
|
||||
// loaded
|
||||
Clear();
|
||||
|
||||
// load head line(for size and inode)
|
||||
off_t total;
|
||||
ino_t cache_inode; // if this value is 0, it means old format.
|
||||
if(!getline(ssall, oneline, '\n')){
|
||||
S3FS_PRN_ERR("failed to parse stats.");
|
||||
delete[] ptmp;
|
||||
return false;
|
||||
}else{
|
||||
istringstream sshead(oneline);
|
||||
string strhead1;
|
||||
string strhead2;
|
||||
|
||||
// get first part in head line.
|
||||
if(!getline(sshead, strhead1, ':')){
|
||||
S3FS_PRN_ERR("failed to parse stats.");
|
||||
delete[] ptmp;
|
||||
return false;
|
||||
}
|
||||
// get second part in head line.
|
||||
if(!getline(sshead, strhead2, ':')){
|
||||
// old head format is "<size>\n"
|
||||
total = cvt_strtoofft(strhead1.c_str(), /* base= */10);
|
||||
cache_inode = 0;
|
||||
}else{
|
||||
// current head format is "<inode>:<size>\n"
|
||||
total = cvt_strtoofft(strhead2.c_str(), /* base= */10);
|
||||
cache_inode = static_cast<ino_t>(cvt_strtoofft(strhead1.c_str(), /* base= */10));
|
||||
if(0 == cache_inode){
|
||||
S3FS_PRN_ERR("wrong inode number in parsed cache stats.");
|
||||
delete[] ptmp;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
// check inode number
|
||||
if(0 != cache_inode && cache_inode != inode){
|
||||
S3FS_PRN_ERR("differ inode and inode number in parsed cache stats.");
|
||||
delete[] ptmp;
|
||||
return false;
|
||||
}
|
||||
|
||||
// load each part
|
||||
bool is_err = false;
|
||||
while(getline(ssall, oneline, '\n')){
|
||||
string part;
|
||||
istringstream ssparts(oneline);
|
||||
// offset
|
||||
if(!getline(ssparts, part, ':')){
|
||||
is_err = true;
|
||||
break;
|
||||
}
|
||||
off_t offset = cvt_strtoofft(part.c_str(), /* base= */10);
|
||||
// size
|
||||
if(!getline(ssparts, part, ':')){
|
||||
is_err = true;
|
||||
break;
|
||||
}
|
||||
off_t size = cvt_strtoofft(part.c_str(), /* base= */10);
|
||||
// loaded
|
||||
if(!getline(ssparts, part, ':')){
|
||||
is_err = true;
|
||||
break;
|
||||
}
|
||||
bool is_loaded = (1 == cvt_strtoofft(part.c_str(), /* base= */10) ? true : false);
|
||||
bool is_modified;
|
||||
if(!getline(ssparts, part, ':')){
|
||||
is_modified = false; // old version does not have this part.
|
||||
}else{
|
||||
is_modified = (1 == cvt_strtoofft(part.c_str(), /* base= */10) ? true : false);
|
||||
}
|
||||
// add new area
|
||||
PageList::page_status pstatus =
|
||||
( is_loaded && is_modified ? PageList::PAGE_LOAD_MODIFIED :
|
||||
!is_loaded && is_modified ? PageList::PAGE_MODIFIED :
|
||||
is_loaded && !is_modified ? PageList::PAGE_LOADED : PageList::PAGE_NOT_LOAD_MODIFIED );
|
||||
|
||||
SetPageLoadedStatus(offset, size, pstatus);
|
||||
}
|
||||
delete[] ptmp;
|
||||
if(is_err){
|
||||
S3FS_PRN_ERR("failed to parse stats.");
|
||||
Clear();
|
||||
return false;
|
||||
}
|
||||
|
||||
// check size
|
||||
if(total != Size()){
|
||||
S3FS_PRN_ERR("different size(%lld - %lld).", static_cast<long long int>(total), static_cast<long long int>(Size()));
|
||||
Clear();
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void PageList::Dump() const
|
||||
{
|
||||
int cnt = 0;
|
||||
|
||||
S3FS_PRN_DBG("pages = {");
|
||||
for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter, ++cnt){
|
||||
S3FS_PRN_DBG(" [%08d] -> {%014lld - %014lld : %s / %s}", cnt, static_cast<long long int>(iter->offset), static_cast<long long int>(iter->bytes), iter->loaded ? "loaded" : "unloaded", iter->modified ? "modified" : "not modified");
|
||||
}
|
||||
S3FS_PRN_DBG("}");
|
||||
}
|
||||
|
||||
//
|
||||
// Compare the fdpage_list_t pages of the object with the state of the file.
|
||||
//
|
||||
// The loaded=true or modified=true area of pages must be a DATA block
|
||||
// (not a HOLE block) in the file.
|
||||
// The other area is a HOLE block in the file or is a DATA block(but the
|
||||
// data of the target area in that block should be ZERO).
|
||||
// If it is a bad area in the previous case, it will be reported as an error.
|
||||
// If the latter case does not match, it will be reported as a warning.
|
||||
//
|
||||
bool PageList::CompareSparseFile(int fd, size_t file_size, fdpage_list_t& err_area_list, fdpage_list_t& warn_area_list)
|
||||
{
|
||||
err_area_list.clear();
|
||||
warn_area_list.clear();
|
||||
|
||||
// First, list the block disk allocation area of the cache file.
|
||||
// The cache file has holes(sparse file) and no disk block areas
|
||||
// are assigned to any holes.
|
||||
fdpage_list_t sparse_list;
|
||||
if(!PageList::GetSparseFilePages(fd, file_size, sparse_list)){
|
||||
S3FS_PRN_ERR("Something error is occurred in parsing hole/data of the cache file(%d).", fd);
|
||||
|
||||
fdpage page(0, static_cast<off_t>(file_size), false, false);
|
||||
err_area_list.push_back(page);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
if(sparse_list.empty() && pages.empty()){
|
||||
// both file and stats information are empty, it means cache file size is ZERO.
|
||||
return true;
|
||||
}
|
||||
|
||||
// Compare each pages and sparse_list
|
||||
bool result = true;
|
||||
for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){
|
||||
if(!PageList::CheckAreaInSparseFile(*iter, sparse_list, fd, err_area_list, warn_area_list)){
|
||||
result = false;
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
130
src/fdcache_page.h
Normal file
130
src/fdcache_page.h
Normal file
@ -0,0 +1,130 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_FDCACHE_PAGE_H_
|
||||
#define S3FS_FDCACHE_PAGE_H_
|
||||
|
||||
#include "fdcache_stat.h"
|
||||
|
||||
//------------------------------------------------
|
||||
// Symbols
|
||||
//------------------------------------------------
|
||||
// [NOTE]
|
||||
// If the following symbols in lseek whence are undefined, define them.
|
||||
// If it is not supported by lseek, s3fs judges by the processing result of lseek.
|
||||
//
|
||||
#ifndef SEEK_DATA
|
||||
#define SEEK_DATA 3
|
||||
#endif
|
||||
#ifndef SEEK_HOLE
|
||||
#define SEEK_HOLE 4
|
||||
#endif
|
||||
|
||||
//------------------------------------------------
|
||||
// Structure fdpage
|
||||
//------------------------------------------------
|
||||
// page block information
|
||||
struct fdpage
|
||||
{
|
||||
off_t offset;
|
||||
off_t bytes;
|
||||
bool loaded;
|
||||
bool modified;
|
||||
|
||||
fdpage(off_t start = 0, off_t size = 0, bool is_loaded = false, bool is_modified = false) :
|
||||
offset(start), bytes(size), loaded(is_loaded), modified(is_modified) {}
|
||||
|
||||
off_t next(void) const
|
||||
{
|
||||
return (offset + bytes);
|
||||
}
|
||||
off_t end(void) const
|
||||
{
|
||||
return (0 < bytes ? offset + bytes - 1 : 0);
|
||||
}
|
||||
};
|
||||
typedef std::list<struct fdpage> fdpage_list_t;
|
||||
|
||||
//------------------------------------------------
|
||||
// Class PageList
|
||||
//------------------------------------------------
|
||||
class FdEntity;
|
||||
|
||||
// cppcheck-suppress copyCtorAndEqOperator
|
||||
class PageList
|
||||
{
|
||||
friend class FdEntity; // only one method access directly pages.
|
||||
|
||||
private:
|
||||
fdpage_list_t pages;
|
||||
|
||||
public:
|
||||
enum page_status{
|
||||
PAGE_NOT_LOAD_MODIFIED = 0,
|
||||
PAGE_LOADED,
|
||||
PAGE_MODIFIED,
|
||||
PAGE_LOAD_MODIFIED
|
||||
};
|
||||
|
||||
private:
|
||||
static bool GetSparseFilePages(int fd, size_t file_size, fdpage_list_t& sparse_list);
|
||||
static bool CheckZeroAreaInFile(int fd, off_t start, size_t bytes);
|
||||
static bool CheckAreaInSparseFile(const struct fdpage& checkpage, const fdpage_list_t& sparse_list, int fd, fdpage_list_t& err_area_list, fdpage_list_t& warn_area_list);
|
||||
|
||||
void Clear(void);
|
||||
bool Compress();
|
||||
bool Parse(off_t new_pos);
|
||||
|
||||
public:
|
||||
static void FreeList(fdpage_list_t& list);
|
||||
|
||||
explicit PageList(off_t size = 0, bool is_loaded = false, bool is_modified = false);
|
||||
explicit PageList(const PageList& other);
|
||||
~PageList();
|
||||
|
||||
bool Init(off_t size, bool is_loaded, bool is_modified);
|
||||
off_t Size(void) const;
|
||||
bool Resize(off_t size, bool is_loaded, bool is_modified);
|
||||
|
||||
bool IsPageLoaded(off_t start = 0, off_t size = 0) const; // size=0 is checking to end of list
|
||||
bool SetPageLoadedStatus(off_t start, off_t size, PageList::page_status pstatus = PAGE_LOADED, bool is_compress = true);
|
||||
bool FindUnloadedPage(off_t start, off_t& resstart, off_t& ressize) const;
|
||||
off_t GetTotalUnloadedPageSize(off_t start = 0, off_t size = 0) const; // size=0 is checking to end of list
|
||||
int GetUnloadedPages(fdpage_list_t& unloaded_list, off_t start = 0, off_t size = 0) const; // size=0 is checking to end of list
|
||||
bool GetPageListsForMultipartUpload(fdpage_list_t& dlpages, fdpage_list_t& mixuppages, off_t max_partsize);
|
||||
|
||||
bool IsModified(void) const;
|
||||
bool ClearAllModified(void);
|
||||
|
||||
bool Serialize(CacheFileStat& file, bool is_output, ino_t inode);
|
||||
void Dump(void) const;
|
||||
bool CompareSparseFile(int fd, size_t file_size, fdpage_list_t& err_area_list, fdpage_list_t& warn_area_list);
|
||||
};
|
||||
|
||||
#endif // S3FS_FDCACHE_PAGE_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
282
src/fdcache_stat.cpp
Normal file
282
src/fdcache_stat.cpp
Normal file
@ -0,0 +1,282 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Takeshi Nakatani <ggtakec.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <cerrno>
|
||||
#include <unistd.h>
|
||||
#include <sys/file.h>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "fdcache_stat.h"
|
||||
#include "fdcache.h"
|
||||
#include "s3fs_util.h"
|
||||
#include "string_util.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
//------------------------------------------------
|
||||
// CacheFileStat class methods
|
||||
//------------------------------------------------
|
||||
string CacheFileStat::GetCacheFileStatTopDir()
|
||||
{
|
||||
string top_path("");
|
||||
if(!FdManager::IsCacheDir() || bucket.empty()){
|
||||
return top_path;
|
||||
}
|
||||
|
||||
// stat top dir( "/<cache_dir>/.<bucket_name>.stat" )
|
||||
top_path += FdManager::GetCacheDir();
|
||||
top_path += "/.";
|
||||
top_path += bucket;
|
||||
top_path += ".stat";
|
||||
return top_path;
|
||||
}
|
||||
|
||||
bool CacheFileStat::MakeCacheFileStatPath(const char* path, string& sfile_path, bool is_create_dir)
|
||||
{
|
||||
string top_path = CacheFileStat::GetCacheFileStatTopDir();
|
||||
if(top_path.empty()){
|
||||
S3FS_PRN_ERR("The path to cache top dir is empty.");
|
||||
return false;
|
||||
}
|
||||
|
||||
if(is_create_dir){
|
||||
int result;
|
||||
if(0 != (result = mkdirp(top_path + mydirname(path), 0777))){
|
||||
S3FS_PRN_ERR("failed to create dir(%s) by errno(%d).", path, result);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if(!path || '\0' == path[0]){
|
||||
sfile_path = top_path;
|
||||
}else{
|
||||
sfile_path = top_path + SAFESTRPTR(path);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CacheFileStat::CheckCacheFileStatTopDir()
|
||||
{
|
||||
string top_path = CacheFileStat::GetCacheFileStatTopDir();
|
||||
if(top_path.empty()){
|
||||
S3FS_PRN_INFO("The path to cache top dir is empty, thus not need to check permission.");
|
||||
return true;
|
||||
}
|
||||
|
||||
return check_exist_dir_permission(top_path.c_str());
|
||||
}
|
||||
|
||||
bool CacheFileStat::DeleteCacheFileStat(const char* path)
|
||||
{
|
||||
if(!path || '\0' == path[0]){
|
||||
return false;
|
||||
}
|
||||
// stat path
|
||||
string sfile_path;
|
||||
if(!CacheFileStat::MakeCacheFileStatPath(path, sfile_path, false)){
|
||||
S3FS_PRN_ERR("failed to create cache stat file path(%s)", path);
|
||||
return false;
|
||||
}
|
||||
if(0 != unlink(sfile_path.c_str())){
|
||||
if(ENOENT == errno){
|
||||
S3FS_PRN_DBG("failed to delete file(%s): errno=%d", path, errno);
|
||||
}else{
|
||||
S3FS_PRN_ERR("failed to delete file(%s): errno=%d", path, errno);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// [NOTE]
|
||||
// If remove stat file directory, it should do before removing
|
||||
// file cache directory.
|
||||
//
|
||||
bool CacheFileStat::DeleteCacheFileStatDirectory()
|
||||
{
|
||||
string top_path = CacheFileStat::GetCacheFileStatTopDir();
|
||||
if(top_path.empty()){
|
||||
S3FS_PRN_INFO("The path to cache top dir is empty, thus not need to remove it.");
|
||||
return true;
|
||||
}
|
||||
return delete_files_in_dir(top_path.c_str(), true);
|
||||
}
|
||||
|
||||
bool CacheFileStat::RenameCacheFileStat(const char* oldpath, const char* newpath)
|
||||
{
|
||||
if(!oldpath || '\0' == oldpath[0] || !newpath || '\0' == newpath[0]){
|
||||
return false;
|
||||
}
|
||||
|
||||
// stat path
|
||||
string old_filestat;
|
||||
string new_filestat;
|
||||
if(!CacheFileStat::MakeCacheFileStatPath(oldpath, old_filestat, false) || !CacheFileStat::MakeCacheFileStatPath(newpath, new_filestat, false)){
|
||||
return false;
|
||||
}
|
||||
|
||||
// check new stat path
|
||||
struct stat st;
|
||||
if(0 == stat(new_filestat.c_str(), &st)){
|
||||
// new stat path is existed, then unlink it.
|
||||
if(-1 == unlink(new_filestat.c_str())){
|
||||
S3FS_PRN_ERR("failed to unlink new cache file stat path(%s) by errno(%d).", new_filestat.c_str(), errno);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// check old stat path
|
||||
if(0 != stat(old_filestat.c_str(), &st)){
|
||||
// old stat path is not existed, then nothing to do any more.
|
||||
return true;
|
||||
}
|
||||
|
||||
// link and unlink
|
||||
if(-1 == link(old_filestat.c_str(), new_filestat.c_str())){
|
||||
S3FS_PRN_ERR("failed to link old cache file stat path(%s) to new cache file stat path(%s) by errno(%d).", old_filestat.c_str(), new_filestat.c_str(), errno);
|
||||
return false;
|
||||
}
|
||||
if(-1 == unlink(old_filestat.c_str())){
|
||||
S3FS_PRN_ERR("failed to unlink old cache file stat path(%s) by errno(%d).", old_filestat.c_str(), errno);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
//------------------------------------------------
|
||||
// CacheFileStat methods
|
||||
//------------------------------------------------
|
||||
CacheFileStat::CacheFileStat(const char* tpath) : path(""), fd(-1)
|
||||
{
|
||||
if(tpath && '\0' != tpath[0]){
|
||||
SetPath(tpath, true);
|
||||
}
|
||||
}
|
||||
|
||||
CacheFileStat::~CacheFileStat()
|
||||
{
|
||||
Release();
|
||||
}
|
||||
|
||||
bool CacheFileStat::SetPath(const char* tpath, bool is_open)
|
||||
{
|
||||
if(!tpath || '\0' == tpath[0]){
|
||||
return false;
|
||||
}
|
||||
if(!Release()){
|
||||
// could not close old stat file.
|
||||
return false;
|
||||
}
|
||||
path = tpath;
|
||||
if(!is_open){
|
||||
return true;
|
||||
}
|
||||
return Open();
|
||||
}
|
||||
|
||||
bool CacheFileStat::RawOpen(bool readonly)
|
||||
{
|
||||
if(path.empty()){
|
||||
return false;
|
||||
}
|
||||
if(-1 != fd){
|
||||
// already opened
|
||||
return true;
|
||||
}
|
||||
// stat path
|
||||
string sfile_path;
|
||||
if(!CacheFileStat::MakeCacheFileStatPath(path.c_str(), sfile_path, true)){
|
||||
S3FS_PRN_ERR("failed to create cache stat file path(%s)", path.c_str());
|
||||
return false;
|
||||
}
|
||||
// open
|
||||
if(readonly){
|
||||
if(-1 == (fd = open(sfile_path.c_str(), O_RDONLY))){
|
||||
S3FS_PRN_ERR("failed to read only open cache stat file path(%s) - errno(%d)", path.c_str(), errno);
|
||||
return false;
|
||||
}
|
||||
}else{
|
||||
if(-1 == (fd = open(sfile_path.c_str(), O_CREAT|O_RDWR, 0600))){
|
||||
S3FS_PRN_ERR("failed to open cache stat file path(%s) - errno(%d)", path.c_str(), errno);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
// lock
|
||||
if(-1 == flock(fd, LOCK_EX)){
|
||||
S3FS_PRN_ERR("failed to lock cache stat file(%s) - errno(%d)", path.c_str(), errno);
|
||||
close(fd);
|
||||
fd = -1;
|
||||
return false;
|
||||
}
|
||||
// seek top
|
||||
if(0 != lseek(fd, 0, SEEK_SET)){
|
||||
S3FS_PRN_ERR("failed to lseek cache stat file(%s) - errno(%d)", path.c_str(), errno);
|
||||
flock(fd, LOCK_UN);
|
||||
close(fd);
|
||||
fd = -1;
|
||||
return false;
|
||||
}
|
||||
S3FS_PRN_DBG("file locked(%s - %s)", path.c_str(), sfile_path.c_str());
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CacheFileStat::Open()
|
||||
{
|
||||
return RawOpen(false);
|
||||
}
|
||||
|
||||
bool CacheFileStat::ReadOnlyOpen()
|
||||
{
|
||||
return RawOpen(true);
|
||||
}
|
||||
|
||||
bool CacheFileStat::Release()
|
||||
{
|
||||
if(-1 == fd){
|
||||
// already release
|
||||
return true;
|
||||
}
|
||||
// unlock
|
||||
if(-1 == flock(fd, LOCK_UN)){
|
||||
S3FS_PRN_ERR("failed to unlock cache stat file(%s) - errno(%d)", path.c_str(), errno);
|
||||
return false;
|
||||
}
|
||||
S3FS_PRN_DBG("file unlocked(%s)", path.c_str());
|
||||
|
||||
if(-1 == close(fd)){
|
||||
S3FS_PRN_ERR("failed to close cache stat file(%s) - errno(%d)", path.c_str(), errno);
|
||||
return false;
|
||||
}
|
||||
fd = -1;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
64
src/fdcache_stat.h
Normal file
64
src/fdcache_stat.h
Normal file
@ -0,0 +1,64 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_FDCACHE_STAT_H_
|
||||
#define S3FS_FDCACHE_STAT_H_
|
||||
|
||||
//------------------------------------------------
|
||||
// CacheFileStat
|
||||
//------------------------------------------------
|
||||
class CacheFileStat
|
||||
{
|
||||
private:
|
||||
std::string path;
|
||||
int fd;
|
||||
|
||||
private:
|
||||
static bool MakeCacheFileStatPath(const char* path, std::string& sfile_path, bool is_create_dir = true);
|
||||
|
||||
bool RawOpen(bool readonly);
|
||||
|
||||
public:
|
||||
static std::string GetCacheFileStatTopDir(void);
|
||||
static bool DeleteCacheFileStat(const char* path);
|
||||
static bool CheckCacheFileStatTopDir(void);
|
||||
static bool DeleteCacheFileStatDirectory(void);
|
||||
static bool RenameCacheFileStat(const char* oldpath, const char* newpath);
|
||||
|
||||
explicit CacheFileStat(const char* tpath = NULL);
|
||||
~CacheFileStat();
|
||||
|
||||
bool Open(void);
|
||||
bool ReadOnlyOpen(void);
|
||||
bool Release(void);
|
||||
bool SetPath(const char* tpath, bool is_open = true);
|
||||
int GetFd(void) const { return fd; }
|
||||
};
|
||||
|
||||
#endif // S3FS_FDCACHE_STAT_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
@ -30,7 +30,7 @@
|
||||
#include <gcrypt.h>
|
||||
#include <gnutls/gnutls.h>
|
||||
#include <gnutls/crypto.h>
|
||||
#ifdef USE_GNUTLS_NETTLE
|
||||
#ifdef USE_GNUTLS_NETTLE
|
||||
#include <nettle/md5.h>
|
||||
#include <nettle/sha1.h>
|
||||
#include <nettle/hmac.h>
|
||||
@ -39,6 +39,7 @@
|
||||
#include <map>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "s3fs_auth.h"
|
||||
|
||||
using namespace std;
|
||||
@ -46,46 +47,46 @@ using namespace std;
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for version
|
||||
//-------------------------------------------------------------------
|
||||
#ifdef USE_GNUTLS_NETTLE
|
||||
#ifdef USE_GNUTLS_NETTLE
|
||||
|
||||
const char* s3fs_crypt_lib_name(void)
|
||||
{
|
||||
static const char version[] = "GnuTLS(nettle)";
|
||||
static const char version[] = "GnuTLS(nettle)";
|
||||
|
||||
return version;
|
||||
return version;
|
||||
}
|
||||
|
||||
#else // USE_GNUTLS_NETTLE
|
||||
#else // USE_GNUTLS_NETTLE
|
||||
|
||||
const char* s3fs_crypt_lib_name()
|
||||
{
|
||||
static const char version[] = "GnuTLS(gcrypt)";
|
||||
static const char version[] = "GnuTLS(gcrypt)";
|
||||
|
||||
return version;
|
||||
return version;
|
||||
}
|
||||
|
||||
#endif // USE_GNUTLS_NETTLE
|
||||
#endif // USE_GNUTLS_NETTLE
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for global init
|
||||
//-------------------------------------------------------------------
|
||||
bool s3fs_init_global_ssl()
|
||||
{
|
||||
if(GNUTLS_E_SUCCESS != gnutls_global_init()){
|
||||
return false;
|
||||
}
|
||||
if(GNUTLS_E_SUCCESS != gnutls_global_init()){
|
||||
return false;
|
||||
}
|
||||
#ifndef USE_GNUTLS_NETTLE
|
||||
if(NULL == gcry_check_version(NULL)){
|
||||
return false;
|
||||
}
|
||||
#endif // USE_GNUTLS_NETTLE
|
||||
return true;
|
||||
if(NULL == gcry_check_version(NULL)){
|
||||
return false;
|
||||
}
|
||||
#endif // USE_GNUTLS_NETTLE
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_destroy_global_ssl()
|
||||
{
|
||||
gnutls_global_deinit();
|
||||
return true;
|
||||
gnutls_global_deinit();
|
||||
return true;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
@ -93,304 +94,304 @@ bool s3fs_destroy_global_ssl()
|
||||
//-------------------------------------------------------------------
|
||||
bool s3fs_init_crypt_mutex()
|
||||
{
|
||||
return true;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_destroy_crypt_mutex()
|
||||
{
|
||||
return true;
|
||||
return true;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for HMAC
|
||||
//-------------------------------------------------------------------
|
||||
#ifdef USE_GNUTLS_NETTLE
|
||||
#ifdef USE_GNUTLS_NETTLE
|
||||
|
||||
bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
if(!key || !data || !digest || !digestlen){
|
||||
return false;
|
||||
}
|
||||
if(!key || !data || !digest || !digestlen){
|
||||
return false;
|
||||
}
|
||||
|
||||
*digest = new unsigned char[SHA1_DIGEST_SIZE];
|
||||
*digest = new unsigned char[SHA1_DIGEST_SIZE];
|
||||
|
||||
struct hmac_sha1_ctx ctx_hmac;
|
||||
hmac_sha1_set_key(&ctx_hmac, keylen, reinterpret_cast<const uint8_t*>(key));
|
||||
hmac_sha1_update(&ctx_hmac, datalen, reinterpret_cast<const uint8_t*>(data));
|
||||
hmac_sha1_digest(&ctx_hmac, SHA1_DIGEST_SIZE, reinterpret_cast<uint8_t*>(*digest));
|
||||
*digestlen = SHA1_DIGEST_SIZE;
|
||||
struct hmac_sha1_ctx ctx_hmac;
|
||||
hmac_sha1_set_key(&ctx_hmac, keylen, reinterpret_cast<const uint8_t*>(key));
|
||||
hmac_sha1_update(&ctx_hmac, datalen, reinterpret_cast<const uint8_t*>(data));
|
||||
hmac_sha1_digest(&ctx_hmac, SHA1_DIGEST_SIZE, reinterpret_cast<uint8_t*>(*digest));
|
||||
*digestlen = SHA1_DIGEST_SIZE;
|
||||
|
||||
return true;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
if(!key || !data || !digest || !digestlen){
|
||||
return false;
|
||||
}
|
||||
if(!key || !data || !digest || !digestlen){
|
||||
return false;
|
||||
}
|
||||
|
||||
*digest = new unsigned char[SHA256_DIGEST_SIZE];
|
||||
*digest = new unsigned char[SHA256_DIGEST_SIZE];
|
||||
|
||||
struct hmac_sha256_ctx ctx_hmac;
|
||||
hmac_sha256_set_key(&ctx_hmac, keylen, reinterpret_cast<const uint8_t*>(key));
|
||||
hmac_sha256_update(&ctx_hmac, datalen, reinterpret_cast<const uint8_t*>(data));
|
||||
hmac_sha256_digest(&ctx_hmac, SHA256_DIGEST_SIZE, reinterpret_cast<uint8_t*>(*digest));
|
||||
*digestlen = SHA256_DIGEST_SIZE;
|
||||
struct hmac_sha256_ctx ctx_hmac;
|
||||
hmac_sha256_set_key(&ctx_hmac, keylen, reinterpret_cast<const uint8_t*>(key));
|
||||
hmac_sha256_update(&ctx_hmac, datalen, reinterpret_cast<const uint8_t*>(data));
|
||||
hmac_sha256_digest(&ctx_hmac, SHA256_DIGEST_SIZE, reinterpret_cast<uint8_t*>(*digest));
|
||||
*digestlen = SHA256_DIGEST_SIZE;
|
||||
|
||||
return true;
|
||||
return true;
|
||||
}
|
||||
|
||||
#else // USE_GNUTLS_NETTLE
|
||||
#else // USE_GNUTLS_NETTLE
|
||||
|
||||
bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
if(!key || !data || !digest || !digestlen){
|
||||
return false;
|
||||
}
|
||||
if(!key || !data || !digest || !digestlen){
|
||||
return false;
|
||||
}
|
||||
|
||||
if(0 == (*digestlen = gnutls_hmac_get_len(GNUTLS_MAC_SHA1))){
|
||||
return false;
|
||||
}
|
||||
*digest = new unsigned char[*digestlen + 1];
|
||||
if(0 > gnutls_hmac_fast(GNUTLS_MAC_SHA1, key, keylen, data, datalen, *digest)){
|
||||
delete[] *digest;
|
||||
*digest = NULL;
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
if(0 == (*digestlen = gnutls_hmac_get_len(GNUTLS_MAC_SHA1))){
|
||||
return false;
|
||||
}
|
||||
*digest = new unsigned char[*digestlen + 1];
|
||||
if(0 > gnutls_hmac_fast(GNUTLS_MAC_SHA1, key, keylen, data, datalen, *digest)){
|
||||
delete[] *digest;
|
||||
*digest = NULL;
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
if(!key || !data || !digest || !digestlen){
|
||||
return false;
|
||||
}
|
||||
if(!key || !data || !digest || !digestlen){
|
||||
return false;
|
||||
}
|
||||
|
||||
if(0 == (*digestlen = gnutls_hmac_get_len(GNUTLS_MAC_SHA256))){
|
||||
return false;
|
||||
}
|
||||
*digest = new unsigned char[*digestlen + 1];
|
||||
if(0 > gnutls_hmac_fast(GNUTLS_MAC_SHA256, key, keylen, data, datalen, *digest)){
|
||||
delete[] *digest;
|
||||
*digest = NULL;
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
if(0 == (*digestlen = gnutls_hmac_get_len(GNUTLS_MAC_SHA256))){
|
||||
return false;
|
||||
}
|
||||
*digest = new unsigned char[*digestlen + 1];
|
||||
if(0 > gnutls_hmac_fast(GNUTLS_MAC_SHA256, key, keylen, data, datalen, *digest)){
|
||||
delete[] *digest;
|
||||
*digest = NULL;
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif // USE_GNUTLS_NETTLE
|
||||
#endif // USE_GNUTLS_NETTLE
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for MD5
|
||||
//-------------------------------------------------------------------
|
||||
size_t get_md5_digest_length()
|
||||
{
|
||||
return 16;
|
||||
return 16;
|
||||
}
|
||||
|
||||
#ifdef USE_GNUTLS_NETTLE
|
||||
#ifdef USE_GNUTLS_NETTLE
|
||||
unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
|
||||
{
|
||||
struct md5_ctx ctx_md5;
|
||||
unsigned char buf[512];
|
||||
ssize_t bytes;
|
||||
unsigned char* result;
|
||||
struct md5_ctx ctx_md5;
|
||||
unsigned char buf[512];
|
||||
ssize_t bytes;
|
||||
unsigned char* result;
|
||||
|
||||
memset(buf, 0, 512);
|
||||
md5_init(&ctx_md5);
|
||||
|
||||
for(ssize_t total = 0; total < size; total += bytes){
|
||||
bytes = 512 < (size - total) ? 512 : (size - total);
|
||||
bytes = pread(fd, buf, bytes, start + total);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
return NULL;
|
||||
}
|
||||
md5_update(&ctx_md5, bytes, buf);
|
||||
memset(buf, 0, 512);
|
||||
}
|
||||
result = new unsigned char[get_md5_digest_length()];
|
||||
md5_digest(&ctx_md5, get_md5_digest_length(), result);
|
||||
md5_init(&ctx_md5);
|
||||
|
||||
return result;
|
||||
for(ssize_t total = 0; total < size; total += bytes){
|
||||
bytes = 512 < (size - total) ? 512 : (size - total);
|
||||
bytes = pread(fd, buf, bytes, start + total);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
return NULL;
|
||||
}
|
||||
md5_update(&ctx_md5, bytes, buf);
|
||||
memset(buf, 0, 512);
|
||||
}
|
||||
result = new unsigned char[get_md5_digest_length()];
|
||||
md5_digest(&ctx_md5, get_md5_digest_length(), result);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
#else // USE_GNUTLS_NETTLE
|
||||
#else // USE_GNUTLS_NETTLE
|
||||
|
||||
unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
|
||||
{
|
||||
gcry_md_hd_t ctx_md5;
|
||||
gcry_error_t err;
|
||||
char buf[512];
|
||||
ssize_t bytes;
|
||||
unsigned char* result;
|
||||
gcry_md_hd_t ctx_md5;
|
||||
gcry_error_t err;
|
||||
char buf[512];
|
||||
ssize_t bytes;
|
||||
unsigned char* result;
|
||||
|
||||
if(-1 == size){
|
||||
struct stat st;
|
||||
if(-1 == fstat(fd, &st)){
|
||||
return NULL;
|
||||
if(-1 == size){
|
||||
struct stat st;
|
||||
if(-1 == fstat(fd, &st)){
|
||||
return NULL;
|
||||
}
|
||||
size = static_cast<ssize_t>(st.st_size);
|
||||
}
|
||||
size = static_cast<ssize_t>(st.st_size);
|
||||
}
|
||||
|
||||
memset(buf, 0, 512);
|
||||
if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_md5, GCRY_MD_MD5, 0))){
|
||||
S3FS_PRN_ERR("MD5 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for(ssize_t total = 0; total < size; total += bytes){
|
||||
bytes = 512 < (size - total) ? 512 : (size - total);
|
||||
bytes = pread(fd, buf, bytes, start + total);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
gcry_md_close(ctx_md5);
|
||||
return NULL;
|
||||
}
|
||||
gcry_md_write(ctx_md5, buf, bytes);
|
||||
memset(buf, 0, 512);
|
||||
}
|
||||
result = new unsigned char[get_md5_digest_length()];
|
||||
memcpy(result, gcry_md_read(ctx_md5, 0), get_md5_digest_length());
|
||||
gcry_md_close(ctx_md5);
|
||||
if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_md5, GCRY_MD_MD5, 0))){
|
||||
S3FS_PRN_ERR("MD5 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return result;
|
||||
for(ssize_t total = 0; total < size; total += bytes){
|
||||
bytes = 512 < (size - total) ? 512 : (size - total);
|
||||
bytes = pread(fd, buf, bytes, start + total);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
gcry_md_close(ctx_md5);
|
||||
return NULL;
|
||||
}
|
||||
gcry_md_write(ctx_md5, buf, bytes);
|
||||
memset(buf, 0, 512);
|
||||
}
|
||||
result = new unsigned char[get_md5_digest_length()];
|
||||
memcpy(result, gcry_md_read(ctx_md5, 0), get_md5_digest_length());
|
||||
gcry_md_close(ctx_md5);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
#endif // USE_GNUTLS_NETTLE
|
||||
#endif // USE_GNUTLS_NETTLE
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for SHA256
|
||||
//-------------------------------------------------------------------
|
||||
size_t get_sha256_digest_length()
|
||||
{
|
||||
return 32;
|
||||
return 32;
|
||||
}
|
||||
|
||||
#ifdef USE_GNUTLS_NETTLE
|
||||
#ifdef USE_GNUTLS_NETTLE
|
||||
bool s3fs_sha256(const unsigned char* data, unsigned int datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
(*digestlen) = static_cast<unsigned int>(get_sha256_digest_length());
|
||||
*digest = new unsigned char[*digestlen];
|
||||
(*digestlen) = static_cast<unsigned int>(get_sha256_digest_length());
|
||||
*digest = new unsigned char[*digestlen];
|
||||
|
||||
struct sha256_ctx ctx_sha256;
|
||||
sha256_init(&ctx_sha256);
|
||||
sha256_update(&ctx_sha256, datalen, data);
|
||||
sha256_digest(&ctx_sha256, *digestlen, *digest);
|
||||
struct sha256_ctx ctx_sha256;
|
||||
sha256_init(&ctx_sha256);
|
||||
sha256_update(&ctx_sha256, datalen, data);
|
||||
sha256_digest(&ctx_sha256, *digestlen, *digest);
|
||||
|
||||
return true;
|
||||
return true;
|
||||
}
|
||||
|
||||
unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size)
|
||||
{
|
||||
struct sha256_ctx ctx_sha256;
|
||||
unsigned char buf[512];
|
||||
ssize_t bytes;
|
||||
unsigned char* result;
|
||||
struct sha256_ctx ctx_sha256;
|
||||
unsigned char buf[512];
|
||||
ssize_t bytes;
|
||||
unsigned char* result;
|
||||
|
||||
memset(buf, 0, 512);
|
||||
sha256_init(&ctx_sha256);
|
||||
|
||||
for(ssize_t total = 0; total < size; total += bytes){
|
||||
bytes = 512 < (size - total) ? 512 : (size - total);
|
||||
bytes = pread(fd, buf, bytes, start + total);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
return NULL;
|
||||
}
|
||||
sha256_update(&ctx_sha256, bytes, buf);
|
||||
memset(buf, 0, 512);
|
||||
}
|
||||
result = new unsigned char[get_sha256_digest_length()];
|
||||
sha256_digest(&ctx_sha256, get_sha256_digest_length(), result);
|
||||
sha256_init(&ctx_sha256);
|
||||
|
||||
return result;
|
||||
for(ssize_t total = 0; total < size; total += bytes){
|
||||
bytes = 512 < (size - total) ? 512 : (size - total);
|
||||
bytes = pread(fd, buf, bytes, start + total);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
return NULL;
|
||||
}
|
||||
sha256_update(&ctx_sha256, bytes, buf);
|
||||
memset(buf, 0, 512);
|
||||
}
|
||||
result = new unsigned char[get_sha256_digest_length()];
|
||||
sha256_digest(&ctx_sha256, get_sha256_digest_length(), result);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
#else // USE_GNUTLS_NETTLE
|
||||
#else // USE_GNUTLS_NETTLE
|
||||
|
||||
bool s3fs_sha256(const unsigned char* data, unsigned int datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
size_t len = (*digestlen) = static_cast<unsigned int>(get_sha256_digest_length());
|
||||
*digest = new unsigned char[len];
|
||||
size_t len = (*digestlen) = static_cast<unsigned int>(get_sha256_digest_length());
|
||||
*digest = new unsigned char[len];
|
||||
|
||||
gcry_md_hd_t ctx_sha256;
|
||||
gcry_error_t err;
|
||||
if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_sha256, GCRY_MD_SHA256, 0))){
|
||||
S3FS_PRN_ERR("SHA256 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err));
|
||||
delete[] *digest;
|
||||
return false;
|
||||
}
|
||||
gcry_md_write(ctx_sha256, data, datalen);
|
||||
memcpy(*digest, gcry_md_read(ctx_sha256, 0), *digestlen);
|
||||
gcry_md_close(ctx_sha256);
|
||||
gcry_md_hd_t ctx_sha256;
|
||||
gcry_error_t err;
|
||||
if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_sha256, GCRY_MD_SHA256, 0))){
|
||||
S3FS_PRN_ERR("SHA256 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err));
|
||||
delete[] *digest;
|
||||
return false;
|
||||
}
|
||||
gcry_md_write(ctx_sha256, data, datalen);
|
||||
memcpy(*digest, gcry_md_read(ctx_sha256, 0), *digestlen);
|
||||
gcry_md_close(ctx_sha256);
|
||||
|
||||
return true;
|
||||
return true;
|
||||
}
|
||||
|
||||
unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size)
|
||||
{
|
||||
gcry_md_hd_t ctx_sha256;
|
||||
gcry_error_t err;
|
||||
char buf[512];
|
||||
ssize_t bytes;
|
||||
unsigned char* result;
|
||||
gcry_md_hd_t ctx_sha256;
|
||||
gcry_error_t err;
|
||||
char buf[512];
|
||||
ssize_t bytes;
|
||||
unsigned char* result;
|
||||
|
||||
if(-1 == size){
|
||||
struct stat st;
|
||||
if(-1 == fstat(fd, &st)){
|
||||
return NULL;
|
||||
if(-1 == size){
|
||||
struct stat st;
|
||||
if(-1 == fstat(fd, &st)){
|
||||
return NULL;
|
||||
}
|
||||
size = static_cast<ssize_t>(st.st_size);
|
||||
}
|
||||
size = static_cast<ssize_t>(st.st_size);
|
||||
}
|
||||
|
||||
memset(buf, 0, 512);
|
||||
if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_sha256, GCRY_MD_SHA256, 0))){
|
||||
S3FS_PRN_ERR("SHA256 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for(ssize_t total = 0; total < size; total += bytes){
|
||||
bytes = 512 < (size - total) ? 512 : (size - total);
|
||||
bytes = pread(fd, buf, bytes, start + total);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
gcry_md_close(ctx_sha256);
|
||||
return NULL;
|
||||
}
|
||||
gcry_md_write(ctx_sha256, buf, bytes);
|
||||
memset(buf, 0, 512);
|
||||
}
|
||||
result = new unsigned char[get_sha256_digest_length()];
|
||||
memcpy(result, gcry_md_read(ctx_sha256, 0), get_sha256_digest_length());
|
||||
gcry_md_close(ctx_sha256);
|
||||
if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_sha256, GCRY_MD_SHA256, 0))){
|
||||
S3FS_PRN_ERR("SHA256 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return result;
|
||||
for(ssize_t total = 0; total < size; total += bytes){
|
||||
bytes = 512 < (size - total) ? 512 : (size - total);
|
||||
bytes = pread(fd, buf, bytes, start + total);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
gcry_md_close(ctx_sha256);
|
||||
return NULL;
|
||||
}
|
||||
gcry_md_write(ctx_sha256, buf, bytes);
|
||||
memset(buf, 0, 512);
|
||||
}
|
||||
result = new unsigned char[get_sha256_digest_length()];
|
||||
memcpy(result, gcry_md_read(ctx_sha256, 0), get_sha256_digest_length());
|
||||
gcry_md_close(ctx_sha256);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
#endif // USE_GNUTLS_NETTLE
|
||||
#endif // USE_GNUTLS_NETTLE
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 2
|
||||
* c-basic-offset: 2
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=2 ts=2 fdm=marker
|
||||
* vim<600: expandtab sw=2 ts=2
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
|
322
src/metaheader.cpp
Normal file
322
src/metaheader.cpp
Normal file
@ -0,0 +1,322 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Takeshi Nakatani <ggtakec.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <unistd.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "metaheader.h"
|
||||
#include "string_util.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility functions for convert
|
||||
//-------------------------------------------------------------------
|
||||
time_t get_mtime(const char *str)
|
||||
{
|
||||
// [NOTE]
|
||||
// In rclone, there are cases where ns is set to x-amz-meta-mtime
|
||||
// with floating point number. s3fs uses x-amz-meta-mtime by
|
||||
// truncating the floating point or less (in seconds or less) to
|
||||
// correspond to this.
|
||||
//
|
||||
string strmtime;
|
||||
if(str && '\0' != *str){
|
||||
strmtime = str;
|
||||
string::size_type pos = strmtime.find('.', 0);
|
||||
if(string::npos != pos){
|
||||
strmtime = strmtime.substr(0, pos);
|
||||
}
|
||||
}
|
||||
return static_cast<time_t>(cvt_strtoofft(strmtime.c_str()));
|
||||
}
|
||||
|
||||
static time_t get_time(const headers_t& meta, const char *header)
|
||||
{
|
||||
headers_t::const_iterator iter;
|
||||
if(meta.end() == (iter = meta.find(header))){
|
||||
return 0;
|
||||
}
|
||||
return get_mtime((*iter).second.c_str());
|
||||
}
|
||||
|
||||
time_t get_mtime(const headers_t& meta, bool overcheck)
|
||||
{
|
||||
time_t t = get_time(meta, "x-amz-meta-mtime");
|
||||
if(t != 0){
|
||||
return t;
|
||||
}
|
||||
t = get_time(meta, "x-amz-meta-goog-reserved-file-mtime");
|
||||
if(t != 0){
|
||||
return t;
|
||||
}
|
||||
if(overcheck){
|
||||
return get_lastmodified(meta);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
time_t get_ctime(const headers_t& meta, bool overcheck)
|
||||
{
|
||||
time_t t = get_time(meta, "x-amz-meta-ctime");
|
||||
if(t != 0){
|
||||
return t;
|
||||
}
|
||||
if(overcheck){
|
||||
return get_lastmodified(meta);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
off_t get_size(const char *s)
|
||||
{
|
||||
return cvt_strtoofft(s);
|
||||
}
|
||||
|
||||
off_t get_size(const headers_t& meta)
|
||||
{
|
||||
headers_t::const_iterator iter = meta.find("Content-Length");
|
||||
if(meta.end() == iter){
|
||||
return 0;
|
||||
}
|
||||
return get_size((*iter).second.c_str());
|
||||
}
|
||||
|
||||
mode_t get_mode(const char *s, int base)
|
||||
{
|
||||
return static_cast<mode_t>(cvt_strtoofft(s, base));
|
||||
}
|
||||
|
||||
mode_t get_mode(const headers_t& meta, const char* path, bool checkdir, bool forcedir)
|
||||
{
|
||||
mode_t mode = 0;
|
||||
bool isS3sync = false;
|
||||
headers_t::const_iterator iter;
|
||||
|
||||
if(meta.end() != (iter = meta.find("x-amz-meta-mode"))){
|
||||
mode = get_mode((*iter).second.c_str());
|
||||
}else if(meta.end() != (iter = meta.find("x-amz-meta-permissions"))){ // for s3sync
|
||||
mode = get_mode((*iter).second.c_str());
|
||||
isS3sync = true;
|
||||
}else if(meta.end() != (iter = meta.find("x-amz-meta-goog-reserved-posix-mode"))){ // for GCS
|
||||
mode = get_mode((*iter).second.c_str(), 8);
|
||||
}else{
|
||||
// If another tool creates an object without permissions, default to owner
|
||||
// read-write and group readable.
|
||||
mode = path[strlen(path) - 1] == '/' ? 0750 : 0640;
|
||||
}
|
||||
|
||||
// Checking the bitmask, if the last 3 bits are all zero then process as a regular
|
||||
// file type (S_IFDIR or S_IFREG), otherwise return mode unmodified so that S_IFIFO,
|
||||
// S_IFSOCK, S_IFCHR, S_IFLNK and S_IFBLK devices can be processed properly by fuse.
|
||||
if(!(mode & S_IFMT)){
|
||||
if(!isS3sync){
|
||||
if(checkdir){
|
||||
if(forcedir){
|
||||
mode |= S_IFDIR;
|
||||
}else{
|
||||
if(meta.end() != (iter = meta.find("Content-Type"))){
|
||||
string strConType = (*iter).second;
|
||||
// Leave just the mime type, remove any optional parameters (eg charset)
|
||||
string::size_type pos = strConType.find(';');
|
||||
if(string::npos != pos){
|
||||
strConType = strConType.substr(0, pos);
|
||||
}
|
||||
if(strConType == "application/x-directory" || strConType == "httpd/unix-directory"){
|
||||
// Nextcloud uses this MIME type for directory objects when mounting bucket as external Storage
|
||||
mode |= S_IFDIR;
|
||||
}else if(path && 0 < strlen(path) && '/' == path[strlen(path) - 1]){
|
||||
if(strConType == "binary/octet-stream" || strConType == "application/octet-stream"){
|
||||
mode |= S_IFDIR;
|
||||
}else{
|
||||
if(complement_stat){
|
||||
// If complement lack stat mode, when the object has '/' character at end of name
|
||||
// and content type is text/plain and the object's size is 0 or 1, it should be
|
||||
// directory.
|
||||
off_t size = get_size(meta);
|
||||
if(strConType == "text/plain" && (0 == size || 1 == size)){
|
||||
mode |= S_IFDIR;
|
||||
}else{
|
||||
mode |= S_IFREG;
|
||||
}
|
||||
}else{
|
||||
mode |= S_IFREG;
|
||||
}
|
||||
}
|
||||
}else{
|
||||
mode |= S_IFREG;
|
||||
}
|
||||
}else{
|
||||
mode |= S_IFREG;
|
||||
}
|
||||
}
|
||||
}
|
||||
// If complement lack stat mode, when it's mode is not set any permission,
|
||||
// the object is added minimal mode only for read permission.
|
||||
if(complement_stat && 0 == (mode & (S_IRWXU | S_IRWXG | S_IRWXO))){
|
||||
mode |= (S_IRUSR | (0 == (mode & S_IFDIR) ? 0 : S_IXUSR));
|
||||
}
|
||||
}else{
|
||||
if(!checkdir){
|
||||
// cut dir/reg flag.
|
||||
mode &= ~S_IFDIR;
|
||||
mode &= ~S_IFREG;
|
||||
}
|
||||
}
|
||||
}
|
||||
return mode;
|
||||
}
|
||||
|
||||
uid_t get_uid(const char *s)
|
||||
{
|
||||
return static_cast<uid_t>(cvt_strtoofft(s));
|
||||
}
|
||||
|
||||
uid_t get_uid(const headers_t& meta)
|
||||
{
|
||||
headers_t::const_iterator iter;
|
||||
if(meta.end() != (iter = meta.find("x-amz-meta-uid"))){
|
||||
return get_uid((*iter).second.c_str());
|
||||
}else if(meta.end() != (iter = meta.find("x-amz-meta-owner"))){ // for s3sync
|
||||
return get_uid((*iter).second.c_str());
|
||||
}else if(meta.end() != (iter = meta.find("x-amz-meta-goog-reserved-posix-uid"))){ // for GCS
|
||||
return get_uid((*iter).second.c_str());
|
||||
}else{
|
||||
return geteuid();
|
||||
}
|
||||
}
|
||||
|
||||
gid_t get_gid(const char *s)
|
||||
{
|
||||
return static_cast<gid_t>(cvt_strtoofft(s));
|
||||
}
|
||||
|
||||
gid_t get_gid(const headers_t& meta)
|
||||
{
|
||||
headers_t::const_iterator iter;
|
||||
if(meta.end() != (iter = meta.find("x-amz-meta-gid"))){
|
||||
return get_gid((*iter).second.c_str());
|
||||
}else if(meta.end() != (iter = meta.find("x-amz-meta-group"))){ // for s3sync
|
||||
return get_gid((*iter).second.c_str());
|
||||
}else if(meta.end() != (iter = meta.find("x-amz-meta-goog-reserved-posix-gid"))){ // for GCS
|
||||
return get_gid((*iter).second.c_str());
|
||||
}else{
|
||||
return getegid();
|
||||
}
|
||||
}
|
||||
|
||||
blkcnt_t get_blocks(off_t size)
|
||||
{
|
||||
return size / 512 + 1;
|
||||
}
|
||||
|
||||
time_t cvtIAMExpireStringToTime(const char* s)
|
||||
{
|
||||
struct tm tm;
|
||||
if(!s){
|
||||
return 0L;
|
||||
}
|
||||
memset(&tm, 0, sizeof(struct tm));
|
||||
strptime(s, "%Y-%m-%dT%H:%M:%S", &tm);
|
||||
return timegm(&tm); // GMT
|
||||
}
|
||||
|
||||
time_t get_lastmodified(const char* s)
|
||||
{
|
||||
struct tm tm;
|
||||
if(!s){
|
||||
return 0L;
|
||||
}
|
||||
memset(&tm, 0, sizeof(struct tm));
|
||||
strptime(s, "%a, %d %b %Y %H:%M:%S %Z", &tm);
|
||||
return timegm(&tm); // GMT
|
||||
}
|
||||
|
||||
time_t get_lastmodified(const headers_t& meta)
|
||||
{
|
||||
headers_t::const_iterator iter = meta.find("Last-Modified");
|
||||
if(meta.end() == iter){
|
||||
return 0;
|
||||
}
|
||||
return get_lastmodified((*iter).second.c_str());
|
||||
}
|
||||
|
||||
//
|
||||
// Returns it whether it is an object with need checking in detail.
|
||||
// If this function returns true, the object is possible to be directory
|
||||
// and is needed checking detail(searching sub object).
|
||||
//
|
||||
bool is_need_check_obj_detail(const headers_t& meta)
|
||||
{
|
||||
headers_t::const_iterator iter;
|
||||
|
||||
// directory object is Content-Length as 0.
|
||||
if(0 != get_size(meta)){
|
||||
return false;
|
||||
}
|
||||
// if the object has x-amz-meta information, checking is no more.
|
||||
if(meta.end() != meta.find("x-amz-meta-mode") ||
|
||||
meta.end() != meta.find("x-amz-meta-mtime") ||
|
||||
meta.end() != meta.find("x-amz-meta-uid") ||
|
||||
meta.end() != meta.find("x-amz-meta-gid") ||
|
||||
meta.end() != meta.find("x-amz-meta-owner") ||
|
||||
meta.end() != meta.find("x-amz-meta-group") ||
|
||||
meta.end() != meta.find("x-amz-meta-permissions") )
|
||||
{
|
||||
return false;
|
||||
}
|
||||
// if there is not Content-Type, or Content-Type is "x-directory",
|
||||
// checking is no more.
|
||||
if(meta.end() == (iter = meta.find("Content-Type"))){
|
||||
return false;
|
||||
}
|
||||
if("application/x-directory" == (*iter).second){
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// [NOTE]
|
||||
// If add_noexist is false and the key does not exist, it will not be added.
|
||||
//
|
||||
bool merge_headers(headers_t& base, const headers_t& additional, bool add_noexist)
|
||||
{
|
||||
bool added = false;
|
||||
for(headers_t::const_iterator iter = additional.begin(); iter != additional.end(); ++iter){
|
||||
if(add_noexist || base.find(iter->first) != base.end()){
|
||||
base[iter->first] = iter->second;
|
||||
added = true;
|
||||
}
|
||||
}
|
||||
return added;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
72
src/metaheader.h
Normal file
72
src/metaheader.h
Normal file
@ -0,0 +1,72 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_METAHEADER_H_
|
||||
#define S3FS_METAHEADER_H_
|
||||
|
||||
#include <string>
|
||||
#include <map>
|
||||
#include <list>
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// headers_t
|
||||
//-------------------------------------------------------------------
|
||||
struct header_nocase_cmp : public std::binary_function<std::string, std::string, bool>
|
||||
{
|
||||
bool operator()(const std::string &strleft, const std::string &strright) const
|
||||
{
|
||||
return (strcasecmp(strleft.c_str(), strright.c_str()) < 0);
|
||||
}
|
||||
};
|
||||
typedef std::map<std::string, std::string, header_nocase_cmp> headers_t;
|
||||
typedef std::list<headers_t> headers_list_t;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Functions
|
||||
//-------------------------------------------------------------------
|
||||
time_t get_mtime(const char *s);
|
||||
time_t get_mtime(const headers_t& meta, bool overcheck = true);
|
||||
time_t get_ctime(const headers_t& meta, bool overcheck = true);
|
||||
off_t get_size(const char *s);
|
||||
off_t get_size(const headers_t& meta);
|
||||
mode_t get_mode(const char *s, int base = 0);
|
||||
mode_t get_mode(const headers_t& meta, const char* path = NULL, bool checkdir = false, bool forcedir = false);
|
||||
uid_t get_uid(const char *s);
|
||||
uid_t get_uid(const headers_t& meta);
|
||||
gid_t get_gid(const char *s);
|
||||
gid_t get_gid(const headers_t& meta);
|
||||
blkcnt_t get_blocks(off_t size);
|
||||
time_t cvtIAMExpireStringToTime(const char* s);
|
||||
time_t get_lastmodified(const char* s);
|
||||
time_t get_lastmodified(const headers_t& meta);
|
||||
bool is_need_check_obj_detail(const headers_t& meta);
|
||||
bool merge_headers(headers_t& base, const headers_t& additional, bool add_noexist);
|
||||
bool simple_parse_xml(const char* data, size_t len, const char* key, std::string& value);
|
||||
|
||||
#endif // S3FS_METAHEADER_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
161
src/mpu_util.cpp
Normal file
161
src/mpu_util.cpp
Normal file
@ -0,0 +1,161 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Takeshi Nakatani <ggtakec.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "mpu_util.h"
|
||||
#include "curl.h"
|
||||
#include "s3fs_xml.h"
|
||||
#include "s3fs_auth.h"
|
||||
#include "string_util.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Global variables
|
||||
//-------------------------------------------------------------------
|
||||
utility_incomp_type utility_mode = NO_UTILITY_MODE;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Functions
|
||||
//-------------------------------------------------------------------
|
||||
static void print_incomp_mpu_list(incomp_mpu_list_t& list)
|
||||
{
|
||||
printf("\n");
|
||||
printf("Lists the parts that have been uploaded for a specific multipart upload.\n");
|
||||
printf("\n");
|
||||
|
||||
if(!list.empty()){
|
||||
printf("---------------------------------------------------------------\n");
|
||||
|
||||
int cnt = 0;
|
||||
for(incomp_mpu_list_t::iterator iter = list.begin(); iter != list.end(); ++iter, ++cnt){
|
||||
printf(" Path : %s\n", (*iter).key.c_str());
|
||||
printf(" UploadId : %s\n", (*iter).id.c_str());
|
||||
printf(" Date : %s\n", (*iter).date.c_str());
|
||||
printf("\n");
|
||||
}
|
||||
printf("---------------------------------------------------------------\n");
|
||||
|
||||
}else{
|
||||
printf("There is no list.\n");
|
||||
}
|
||||
}
|
||||
|
||||
static bool abort_incomp_mpu_list(incomp_mpu_list_t& list, time_t abort_time)
|
||||
{
|
||||
if(list.empty()){
|
||||
return true;
|
||||
}
|
||||
time_t now_time = time(NULL);
|
||||
|
||||
// do removing.
|
||||
S3fsCurl s3fscurl;
|
||||
bool result = true;
|
||||
for(incomp_mpu_list_t::iterator iter = list.begin(); iter != list.end(); ++iter){
|
||||
const char* tpath = (*iter).key.c_str();
|
||||
string upload_id = (*iter).id;
|
||||
|
||||
if(0 != abort_time){ // abort_time is 0, it means all.
|
||||
time_t date = 0;
|
||||
if(!get_unixtime_from_iso8601((*iter).date.c_str(), date)){
|
||||
S3FS_PRN_DBG("date format is not ISO 8601 for %s multipart uploading object, skip this.", tpath);
|
||||
continue;
|
||||
}
|
||||
if(now_time <= (date + abort_time)){
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if(0 != s3fscurl.AbortMultipartUpload(tpath, upload_id)){
|
||||
S3FS_PRN_EXIT("Failed to remove %s multipart uploading object.", tpath);
|
||||
result = false;
|
||||
}else{
|
||||
printf("Succeed to remove %s multipart uploading object.\n", tpath);
|
||||
}
|
||||
|
||||
// reset(initialize) curl object
|
||||
s3fscurl.DestroyCurlHandle();
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
int s3fs_utility_processing(time_t abort_time)
|
||||
{
|
||||
if(NO_UTILITY_MODE == utility_mode){
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
printf("\n*** s3fs run as utility mode.\n\n");
|
||||
|
||||
S3fsCurl s3fscurl;
|
||||
string body;
|
||||
int result = EXIT_SUCCESS;
|
||||
if(0 != s3fscurl.MultipartListRequest(body)){
|
||||
S3FS_PRN_EXIT("Could not get list multipart upload.\nThere is no incomplete multipart uploaded object in bucket.\n");
|
||||
result = EXIT_FAILURE;
|
||||
}else{
|
||||
// parse result(incomplete multipart upload information)
|
||||
S3FS_PRN_DBG("response body = {\n%s\n}", body.c_str());
|
||||
|
||||
xmlDocPtr doc;
|
||||
if(NULL == (doc = xmlReadMemory(body.c_str(), static_cast<int>(body.size()), "", NULL, 0))){
|
||||
S3FS_PRN_DBG("xmlReadMemory exited with error.");
|
||||
result = EXIT_FAILURE;
|
||||
|
||||
}else{
|
||||
// make incomplete uploads list
|
||||
incomp_mpu_list_t list;
|
||||
if(!get_incomp_mpu_list(doc, list)){
|
||||
S3FS_PRN_DBG("get_incomp_mpu_list exited with error.");
|
||||
result = EXIT_FAILURE;
|
||||
|
||||
}else{
|
||||
if(INCOMP_TYPE_LIST == utility_mode){
|
||||
// print list
|
||||
print_incomp_mpu_list(list);
|
||||
}else if(INCOMP_TYPE_ABORT == utility_mode){
|
||||
// remove
|
||||
if(!abort_incomp_mpu_list(list, abort_time)){
|
||||
S3FS_PRN_DBG("an error occurred during removal process.");
|
||||
result = EXIT_FAILURE;
|
||||
}
|
||||
}
|
||||
}
|
||||
S3FS_XMLFREEDOC(doc);
|
||||
}
|
||||
}
|
||||
|
||||
// ssl
|
||||
s3fs_destroy_global_ssl();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
64
src/mpu_util.h
Normal file
64
src/mpu_util.h
Normal file
@ -0,0 +1,64 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_MPU_UTIL_H_
|
||||
#define S3FS_MPU_UTIL_H_
|
||||
|
||||
#include <string>
|
||||
#include <list>
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Structure / Typedef
|
||||
//-------------------------------------------------------------------
|
||||
typedef struct incomplete_multipart_upload_info
|
||||
{
|
||||
std::string key;
|
||||
std::string id;
|
||||
std::string date;
|
||||
}INCOMP_MPU_INFO;
|
||||
|
||||
typedef std::list<INCOMP_MPU_INFO> incomp_mpu_list_t;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// enum for utility process mode
|
||||
//-------------------------------------------------------------------
|
||||
enum utility_incomp_type{
|
||||
NO_UTILITY_MODE = 0, // not utility mode
|
||||
INCOMP_TYPE_LIST, // list of incomplete mpu
|
||||
INCOMP_TYPE_ABORT // delete incomplete mpu
|
||||
};
|
||||
|
||||
extern utility_incomp_type utility_mode;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Functions
|
||||
//-------------------------------------------------------------------
|
||||
int s3fs_utility_processing(time_t abort_time);
|
||||
|
||||
#endif // S3FS_MPU_UTIL_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
142
src/mvnode.cpp
Normal file
142
src/mvnode.cpp
Normal file
@ -0,0 +1,142 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Takeshi Nakatani <ggtakec.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "mvnode.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility functions for moving objects
|
||||
//-------------------------------------------------------------------
|
||||
MVNODE *create_mvnode(const char *old_path, const char *new_path, bool is_dir, bool normdir)
|
||||
{
|
||||
MVNODE *p;
|
||||
char *p_old_path;
|
||||
char *p_new_path;
|
||||
|
||||
p = new MVNODE();
|
||||
|
||||
if(NULL == (p_old_path = strdup(old_path))){
|
||||
delete p;
|
||||
printf("create_mvnode: could not allocation memory for p_old_path\n");
|
||||
S3FS_FUSE_EXIT();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if(NULL == (p_new_path = strdup(new_path))){
|
||||
delete p;
|
||||
free(p_old_path);
|
||||
printf("create_mvnode: could not allocation memory for p_new_path\n");
|
||||
S3FS_FUSE_EXIT();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
p->old_path = p_old_path;
|
||||
p->new_path = p_new_path;
|
||||
p->is_dir = is_dir;
|
||||
p->is_normdir = normdir;
|
||||
p->prev = NULL;
|
||||
p->next = NULL;
|
||||
return p;
|
||||
}
|
||||
|
||||
//
|
||||
// Add sorted MVNODE data(Ascending order)
|
||||
//
|
||||
MVNODE *add_mvnode(MVNODE** head, MVNODE** tail, const char *old_path, const char *new_path, bool is_dir, bool normdir)
|
||||
{
|
||||
if(!head || !tail){
|
||||
return NULL;
|
||||
}
|
||||
|
||||
MVNODE* cur;
|
||||
MVNODE* mvnew;
|
||||
for(cur = *head; cur; cur = cur->next){
|
||||
if(cur->is_dir == is_dir){
|
||||
int nResult = strcmp(cur->old_path, old_path);
|
||||
if(0 == nResult){
|
||||
// Found same old_path.
|
||||
return cur;
|
||||
|
||||
}else if(0 > nResult){
|
||||
// next check.
|
||||
// ex: cur("abc"), mvnew("abcd")
|
||||
// ex: cur("abc"), mvnew("abd")
|
||||
continue;
|
||||
|
||||
}else{
|
||||
// Add into before cur-pos.
|
||||
// ex: cur("abc"), mvnew("ab")
|
||||
// ex: cur("abc"), mvnew("abb")
|
||||
if(NULL == (mvnew = create_mvnode(old_path, new_path, is_dir, normdir))){
|
||||
return NULL;
|
||||
}
|
||||
if(cur->prev){
|
||||
(cur->prev)->next = mvnew;
|
||||
}else{
|
||||
*head = mvnew;
|
||||
}
|
||||
mvnew->prev = cur->prev;
|
||||
mvnew->next = cur;
|
||||
cur->prev = mvnew;
|
||||
|
||||
return mvnew;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Add into tail.
|
||||
if(NULL == (mvnew = create_mvnode(old_path, new_path, is_dir, normdir))){
|
||||
return NULL;
|
||||
}
|
||||
mvnew->prev = (*tail);
|
||||
if(*tail){
|
||||
(*tail)->next = mvnew;
|
||||
}
|
||||
(*tail) = mvnew;
|
||||
if(!(*head)){
|
||||
(*head) = mvnew;
|
||||
}
|
||||
return mvnew;
|
||||
}
|
||||
|
||||
void free_mvnodes(MVNODE *head)
|
||||
{
|
||||
MVNODE *my_head;
|
||||
MVNODE *next;
|
||||
|
||||
for(my_head = head, next = NULL; my_head; my_head = next){
|
||||
next = my_head->next;
|
||||
free(my_head->old_path);
|
||||
free(my_head->new_path);
|
||||
delete my_head;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
53
src/mvnode.h
Normal file
53
src/mvnode.h
Normal file
@ -0,0 +1,53 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_MVNODE_H_
|
||||
#define S3FS_MVNODE_H_
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Structure
|
||||
//-------------------------------------------------------------------
|
||||
typedef struct mvnode
|
||||
{
|
||||
char* old_path;
|
||||
char* new_path;
|
||||
bool is_dir;
|
||||
bool is_normdir;
|
||||
struct mvnode* prev;
|
||||
struct mvnode* next;
|
||||
} MVNODE;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility functions for moving objects
|
||||
//-------------------------------------------------------------------
|
||||
MVNODE *create_mvnode(const char *old_path, const char *new_path, bool is_dir, bool normdir = false);
|
||||
MVNODE *add_mvnode(MVNODE** head, MVNODE** tail, const char *old_path, const char *new_path, bool is_dir, bool normdir = false);
|
||||
void free_mvnodes(MVNODE *head);
|
||||
|
||||
#endif // S3FS_MVNODE_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
261
src/nss_auth.cpp
261
src/nss_auth.cpp
@ -35,6 +35,7 @@
|
||||
#include <map>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "s3fs_auth.h"
|
||||
|
||||
using namespace std;
|
||||
@ -44,9 +45,9 @@ using namespace std;
|
||||
//-------------------------------------------------------------------
|
||||
const char* s3fs_crypt_lib_name()
|
||||
{
|
||||
static const char version[] = "NSS";
|
||||
static const char version[] = "NSS";
|
||||
|
||||
return version;
|
||||
return version;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
@ -54,21 +55,21 @@ const char* s3fs_crypt_lib_name()
|
||||
//-------------------------------------------------------------------
|
||||
bool s3fs_init_global_ssl()
|
||||
{
|
||||
PR_Init(PR_USER_THREAD, PR_PRIORITY_NORMAL, 0);
|
||||
PR_Init(PR_USER_THREAD, PR_PRIORITY_NORMAL, 0);
|
||||
|
||||
if(SECSuccess != NSS_NoDB_Init(NULL)){
|
||||
S3FS_PRN_ERR("Failed NSS_NoDB_Init call.");
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
if(SECSuccess != NSS_NoDB_Init(NULL)){
|
||||
S3FS_PRN_ERR("Failed NSS_NoDB_Init call.");
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_destroy_global_ssl()
|
||||
{
|
||||
NSS_Shutdown();
|
||||
PL_ArenaFinish();
|
||||
PR_Cleanup();
|
||||
return true;
|
||||
NSS_Shutdown();
|
||||
PL_ArenaFinish();
|
||||
PR_Cleanup();
|
||||
return true;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
@ -76,12 +77,12 @@ bool s3fs_destroy_global_ssl()
|
||||
//-------------------------------------------------------------------
|
||||
bool s3fs_init_crypt_mutex()
|
||||
{
|
||||
return true;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_destroy_crypt_mutex()
|
||||
{
|
||||
return true;
|
||||
return true;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
@ -89,58 +90,58 @@ bool s3fs_destroy_crypt_mutex()
|
||||
//-------------------------------------------------------------------
|
||||
static bool s3fs_HMAC_RAW(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen, bool is_sha256)
|
||||
{
|
||||
if(!key || !data || !digest || !digestlen){
|
||||
return false;
|
||||
}
|
||||
if(!key || !data || !digest || !digestlen){
|
||||
return false;
|
||||
}
|
||||
|
||||
PK11SlotInfo* Slot;
|
||||
PK11SymKey* pKey;
|
||||
PK11Context* Context;
|
||||
unsigned char tmpdigest[64];
|
||||
SECItem KeySecItem = {siBuffer, reinterpret_cast<unsigned char*>(const_cast<void*>(key)), static_cast<unsigned int>(keylen)};
|
||||
SECItem NullSecItem = {siBuffer, NULL, 0};
|
||||
PK11SlotInfo* Slot;
|
||||
PK11SymKey* pKey;
|
||||
PK11Context* Context;
|
||||
unsigned char tmpdigest[64];
|
||||
SECItem KeySecItem = {siBuffer, reinterpret_cast<unsigned char*>(const_cast<void*>(key)), static_cast<unsigned int>(keylen)};
|
||||
SECItem NullSecItem = {siBuffer, NULL, 0};
|
||||
|
||||
if(NULL == (Slot = PK11_GetInternalKeySlot())){
|
||||
return false;
|
||||
}
|
||||
if(NULL == (pKey = PK11_ImportSymKey(Slot, (is_sha256 ? CKM_SHA256_HMAC : CKM_SHA_1_HMAC), PK11_OriginUnwrap, CKA_SIGN, &KeySecItem, NULL))){
|
||||
PK11_FreeSlot(Slot);
|
||||
return false;
|
||||
}
|
||||
if(NULL == (Context = PK11_CreateContextBySymKey((is_sha256 ? CKM_SHA256_HMAC : CKM_SHA_1_HMAC), CKA_SIGN, pKey, &NullSecItem))){
|
||||
PK11_FreeSymKey(pKey);
|
||||
PK11_FreeSlot(Slot);
|
||||
return false;
|
||||
}
|
||||
if(NULL == (Slot = PK11_GetInternalKeySlot())){
|
||||
return false;
|
||||
}
|
||||
if(NULL == (pKey = PK11_ImportSymKey(Slot, (is_sha256 ? CKM_SHA256_HMAC : CKM_SHA_1_HMAC), PK11_OriginUnwrap, CKA_SIGN, &KeySecItem, NULL))){
|
||||
PK11_FreeSlot(Slot);
|
||||
return false;
|
||||
}
|
||||
if(NULL == (Context = PK11_CreateContextBySymKey((is_sha256 ? CKM_SHA256_HMAC : CKM_SHA_1_HMAC), CKA_SIGN, pKey, &NullSecItem))){
|
||||
PK11_FreeSymKey(pKey);
|
||||
PK11_FreeSlot(Slot);
|
||||
return false;
|
||||
}
|
||||
|
||||
*digestlen = 0;
|
||||
if(SECSuccess != PK11_DigestBegin(Context) ||
|
||||
SECSuccess != PK11_DigestOp(Context, data, datalen) ||
|
||||
SECSuccess != PK11_DigestFinal(Context, tmpdigest, digestlen, sizeof(tmpdigest)) )
|
||||
{
|
||||
*digestlen = 0;
|
||||
if(SECSuccess != PK11_DigestBegin(Context) ||
|
||||
SECSuccess != PK11_DigestOp(Context, data, datalen) ||
|
||||
SECSuccess != PK11_DigestFinal(Context, tmpdigest, digestlen, sizeof(tmpdigest)) )
|
||||
{
|
||||
PK11_DestroyContext(Context, PR_TRUE);
|
||||
PK11_FreeSymKey(pKey);
|
||||
PK11_FreeSlot(Slot);
|
||||
return false;
|
||||
}
|
||||
PK11_DestroyContext(Context, PR_TRUE);
|
||||
PK11_FreeSymKey(pKey);
|
||||
PK11_FreeSlot(Slot);
|
||||
return false;
|
||||
}
|
||||
PK11_DestroyContext(Context, PR_TRUE);
|
||||
PK11_FreeSymKey(pKey);
|
||||
PK11_FreeSlot(Slot);
|
||||
|
||||
*digest = new unsigned char[*digestlen];
|
||||
memcpy(*digest, tmpdigest, *digestlen);
|
||||
*digest = new unsigned char[*digestlen];
|
||||
memcpy(*digest, tmpdigest, *digestlen);
|
||||
|
||||
return true;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
return s3fs_HMAC_RAW(key, keylen, data, datalen, digest, digestlen, false);
|
||||
return s3fs_HMAC_RAW(key, keylen, data, datalen, digest, digestlen, false);
|
||||
}
|
||||
|
||||
bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
return s3fs_HMAC_RAW(key, keylen, data, datalen, digest, digestlen, true);
|
||||
return s3fs_HMAC_RAW(key, keylen, data, datalen, digest, digestlen, true);
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
@ -148,48 +149,48 @@ bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, siz
|
||||
//-------------------------------------------------------------------
|
||||
size_t get_md5_digest_length()
|
||||
{
|
||||
return MD5_LENGTH;
|
||||
return MD5_LENGTH;
|
||||
}
|
||||
|
||||
unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
|
||||
{
|
||||
PK11Context* md5ctx;
|
||||
unsigned char buf[512];
|
||||
ssize_t bytes;
|
||||
unsigned char* result;
|
||||
unsigned int md5outlen;
|
||||
PK11Context* md5ctx;
|
||||
unsigned char buf[512];
|
||||
ssize_t bytes;
|
||||
unsigned char* result;
|
||||
unsigned int md5outlen;
|
||||
|
||||
if(-1 == size){
|
||||
struct stat st;
|
||||
if(-1 == fstat(fd, &st)){
|
||||
return NULL;
|
||||
if(-1 == size){
|
||||
struct stat st;
|
||||
if(-1 == fstat(fd, &st)){
|
||||
return NULL;
|
||||
}
|
||||
size = static_cast<ssize_t>(st.st_size);
|
||||
}
|
||||
size = static_cast<ssize_t>(st.st_size);
|
||||
}
|
||||
|
||||
memset(buf, 0, 512);
|
||||
md5ctx = PK11_CreateDigestContext(SEC_OID_MD5);
|
||||
|
||||
for(ssize_t total = 0; total < size; total += bytes){
|
||||
bytes = 512 < (size - total) ? 512 : (size - total);
|
||||
bytes = pread(fd, buf, bytes, start + total);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
PK11_DestroyContext(md5ctx, PR_TRUE);
|
||||
return NULL;
|
||||
}
|
||||
PK11_DigestOp(md5ctx, buf, bytes);
|
||||
memset(buf, 0, 512);
|
||||
}
|
||||
result = new unsigned char[get_md5_digest_length()];
|
||||
PK11_DigestFinal(md5ctx, result, &md5outlen, get_md5_digest_length());
|
||||
PK11_DestroyContext(md5ctx, PR_TRUE);
|
||||
md5ctx = PK11_CreateDigestContext(SEC_OID_MD5);
|
||||
|
||||
return result;
|
||||
for(ssize_t total = 0; total < size; total += bytes){
|
||||
bytes = 512 < (size - total) ? 512 : (size - total);
|
||||
bytes = pread(fd, buf, bytes, start + total);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
PK11_DestroyContext(md5ctx, PR_TRUE);
|
||||
return NULL;
|
||||
}
|
||||
PK11_DigestOp(md5ctx, buf, bytes);
|
||||
memset(buf, 0, 512);
|
||||
}
|
||||
result = new unsigned char[get_md5_digest_length()];
|
||||
PK11_DigestFinal(md5ctx, result, &md5outlen, get_md5_digest_length());
|
||||
PK11_DestroyContext(md5ctx, PR_TRUE);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
@ -197,72 +198,72 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
|
||||
//-------------------------------------------------------------------
|
||||
size_t get_sha256_digest_length()
|
||||
{
|
||||
return SHA256_LENGTH;
|
||||
return SHA256_LENGTH;
|
||||
}
|
||||
|
||||
bool s3fs_sha256(const unsigned char* data, unsigned int datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
(*digestlen) = static_cast<unsigned int>(get_sha256_digest_length());
|
||||
*digest = new unsigned char[*digestlen];
|
||||
(*digestlen) = static_cast<unsigned int>(get_sha256_digest_length());
|
||||
*digest = new unsigned char[*digestlen];
|
||||
|
||||
PK11Context* sha256ctx;
|
||||
unsigned int sha256outlen;
|
||||
sha256ctx = PK11_CreateDigestContext(SEC_OID_SHA256);
|
||||
PK11Context* sha256ctx;
|
||||
unsigned int sha256outlen;
|
||||
sha256ctx = PK11_CreateDigestContext(SEC_OID_SHA256);
|
||||
|
||||
PK11_DigestOp(sha256ctx, data, datalen);
|
||||
PK11_DigestFinal(sha256ctx, *digest, &sha256outlen, *digestlen);
|
||||
PK11_DestroyContext(sha256ctx, PR_TRUE);
|
||||
*digestlen = sha256outlen;
|
||||
PK11_DigestOp(sha256ctx, data, datalen);
|
||||
PK11_DigestFinal(sha256ctx, *digest, &sha256outlen, *digestlen);
|
||||
PK11_DestroyContext(sha256ctx, PR_TRUE);
|
||||
*digestlen = sha256outlen;
|
||||
|
||||
return true;
|
||||
return true;
|
||||
}
|
||||
|
||||
unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size)
|
||||
{
|
||||
PK11Context* sha256ctx;
|
||||
unsigned char buf[512];
|
||||
ssize_t bytes;
|
||||
unsigned char* result;
|
||||
unsigned int sha256outlen;
|
||||
PK11Context* sha256ctx;
|
||||
unsigned char buf[512];
|
||||
ssize_t bytes;
|
||||
unsigned char* result;
|
||||
unsigned int sha256outlen;
|
||||
|
||||
if(-1 == size){
|
||||
struct stat st;
|
||||
if(-1 == fstat(fd, &st)){
|
||||
return NULL;
|
||||
if(-1 == size){
|
||||
struct stat st;
|
||||
if(-1 == fstat(fd, &st)){
|
||||
return NULL;
|
||||
}
|
||||
size = static_cast<ssize_t>(st.st_size);
|
||||
}
|
||||
size = static_cast<ssize_t>(st.st_size);
|
||||
}
|
||||
|
||||
memset(buf, 0, 512);
|
||||
sha256ctx = PK11_CreateDigestContext(SEC_OID_SHA256);
|
||||
|
||||
for(ssize_t total = 0; total < size; total += bytes){
|
||||
bytes = 512 < (size - total) ? 512 : (size - total);
|
||||
bytes = pread(fd, buf, bytes, start + total);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
PK11_DestroyContext(sha256ctx, PR_TRUE);
|
||||
return NULL;
|
||||
}
|
||||
PK11_DigestOp(sha256ctx, buf, bytes);
|
||||
memset(buf, 0, 512);
|
||||
}
|
||||
result = new unsigned char[get_sha256_digest_length()];
|
||||
PK11_DigestFinal(sha256ctx, result, &sha256outlen, get_sha256_digest_length());
|
||||
PK11_DestroyContext(sha256ctx, PR_TRUE);
|
||||
sha256ctx = PK11_CreateDigestContext(SEC_OID_SHA256);
|
||||
|
||||
return result;
|
||||
for(ssize_t total = 0; total < size; total += bytes){
|
||||
bytes = 512 < (size - total) ? 512 : (size - total);
|
||||
bytes = pread(fd, buf, bytes, start + total);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
PK11_DestroyContext(sha256ctx, PR_TRUE);
|
||||
return NULL;
|
||||
}
|
||||
PK11_DigestOp(sha256ctx, buf, bytes);
|
||||
memset(buf, 0, 512);
|
||||
}
|
||||
result = new unsigned char[get_sha256_digest_length()];
|
||||
PK11_DigestFinal(sha256ctx, result, &sha256outlen, get_sha256_digest_length());
|
||||
PK11_DestroyContext(sha256ctx, PR_TRUE);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 2
|
||||
* c-basic-offset: 2
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=2 ts=2 fdm=marker
|
||||
* vim<600: expandtab sw=2 ts=2
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
|
@ -39,6 +39,7 @@
|
||||
#include <map>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "s3fs_auth.h"
|
||||
|
||||
using namespace std;
|
||||
@ -48,9 +49,9 @@ using namespace std;
|
||||
//-------------------------------------------------------------------
|
||||
const char* s3fs_crypt_lib_name()
|
||||
{
|
||||
static const char version[] = "OpenSSL";
|
||||
static const char version[] = "OpenSSL";
|
||||
|
||||
return version;
|
||||
return version;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
@ -58,17 +59,17 @@ const char* s3fs_crypt_lib_name()
|
||||
//-------------------------------------------------------------------
|
||||
bool s3fs_init_global_ssl()
|
||||
{
|
||||
ERR_load_crypto_strings();
|
||||
ERR_load_BIO_strings();
|
||||
OpenSSL_add_all_algorithms();
|
||||
return true;
|
||||
ERR_load_crypto_strings();
|
||||
ERR_load_BIO_strings();
|
||||
OpenSSL_add_all_algorithms();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_destroy_global_ssl()
|
||||
{
|
||||
EVP_cleanup();
|
||||
ERR_free_strings();
|
||||
return true;
|
||||
EVP_cleanup();
|
||||
ERR_free_strings();
|
||||
return true;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
@ -77,7 +78,7 @@ bool s3fs_destroy_global_ssl()
|
||||
// internal use struct for openssl
|
||||
struct CRYPTO_dynlock_value
|
||||
{
|
||||
pthread_mutex_t dyn_mutex;
|
||||
pthread_mutex_t dyn_mutex;
|
||||
};
|
||||
|
||||
static pthread_mutex_t* s3fs_crypt_mutex = NULL;
|
||||
@ -85,136 +86,136 @@ static pthread_mutex_t* s3fs_crypt_mutex = NULL;
|
||||
static void s3fs_crypt_mutex_lock(int mode, int pos, const char* file, int line) __attribute__ ((unused));
|
||||
static void s3fs_crypt_mutex_lock(int mode, int pos, const char* file, int line)
|
||||
{
|
||||
if(s3fs_crypt_mutex){
|
||||
int res;
|
||||
if(mode & CRYPTO_LOCK){
|
||||
if(0 != (res = pthread_mutex_lock(&s3fs_crypt_mutex[pos]))){
|
||||
S3FS_PRN_CRIT("pthread_mutex_lock returned: %d", res);
|
||||
abort();
|
||||
}
|
||||
}else{
|
||||
if(0 != (res = pthread_mutex_unlock(&s3fs_crypt_mutex[pos]))){
|
||||
S3FS_PRN_CRIT("pthread_mutex_unlock returned: %d", res);
|
||||
abort();
|
||||
}
|
||||
if(s3fs_crypt_mutex){
|
||||
int res;
|
||||
if(mode & CRYPTO_LOCK){
|
||||
if(0 != (res = pthread_mutex_lock(&s3fs_crypt_mutex[pos]))){
|
||||
S3FS_PRN_CRIT("pthread_mutex_lock returned: %d", res);
|
||||
abort();
|
||||
}
|
||||
}else{
|
||||
if(0 != (res = pthread_mutex_unlock(&s3fs_crypt_mutex[pos]))){
|
||||
S3FS_PRN_CRIT("pthread_mutex_unlock returned: %d", res);
|
||||
abort();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned long s3fs_crypt_get_threadid() __attribute__ ((unused));
|
||||
static unsigned long s3fs_crypt_get_threadid()
|
||||
{
|
||||
// For FreeBSD etc, some system's pthread_t is structure pointer.
|
||||
// Then we use cast like C style(not C++) instead of ifdef.
|
||||
return (unsigned long)(pthread_self());
|
||||
// For FreeBSD etc, some system's pthread_t is structure pointer.
|
||||
// Then we use cast like C style(not C++) instead of ifdef.
|
||||
return (unsigned long)(pthread_self());
|
||||
}
|
||||
|
||||
static struct CRYPTO_dynlock_value* s3fs_dyn_crypt_mutex(const char* file, int line) __attribute__ ((unused));
|
||||
static struct CRYPTO_dynlock_value* s3fs_dyn_crypt_mutex(const char* file, int line)
|
||||
{
|
||||
struct CRYPTO_dynlock_value* dyndata = new CRYPTO_dynlock_value();
|
||||
pthread_mutexattr_t attr;
|
||||
pthread_mutexattr_init(&attr);
|
||||
struct CRYPTO_dynlock_value* dyndata = new CRYPTO_dynlock_value();
|
||||
pthread_mutexattr_t attr;
|
||||
pthread_mutexattr_init(&attr);
|
||||
#if S3FS_PTHREAD_ERRORCHECK
|
||||
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
|
||||
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
|
||||
#endif
|
||||
int res;
|
||||
if(0 != (res = pthread_mutex_init(&(dyndata->dyn_mutex), &attr))){
|
||||
S3FS_PRN_CRIT("pthread_mutex_init returned: %d", res);
|
||||
return NULL;
|
||||
}
|
||||
return dyndata;
|
||||
int res;
|
||||
if(0 != (res = pthread_mutex_init(&(dyndata->dyn_mutex), &attr))){
|
||||
S3FS_PRN_CRIT("pthread_mutex_init returned: %d", res);
|
||||
return NULL;
|
||||
}
|
||||
return dyndata;
|
||||
}
|
||||
|
||||
static void s3fs_dyn_crypt_mutex_lock(int mode, struct CRYPTO_dynlock_value* dyndata, const char* file, int line) __attribute__ ((unused));
|
||||
static void s3fs_dyn_crypt_mutex_lock(int mode, struct CRYPTO_dynlock_value* dyndata, const char* file, int line)
|
||||
{
|
||||
if(dyndata){
|
||||
int res;
|
||||
if(mode & CRYPTO_LOCK){
|
||||
if(0 != (res = pthread_mutex_lock(&(dyndata->dyn_mutex)))){
|
||||
S3FS_PRN_CRIT("pthread_mutex_lock returned: %d", res);
|
||||
abort();
|
||||
}
|
||||
}else{
|
||||
if(0 != (res = pthread_mutex_unlock(&(dyndata->dyn_mutex)))){
|
||||
S3FS_PRN_CRIT("pthread_mutex_unlock returned: %d", res);
|
||||
abort();
|
||||
}
|
||||
if(dyndata){
|
||||
int res;
|
||||
if(mode & CRYPTO_LOCK){
|
||||
if(0 != (res = pthread_mutex_lock(&(dyndata->dyn_mutex)))){
|
||||
S3FS_PRN_CRIT("pthread_mutex_lock returned: %d", res);
|
||||
abort();
|
||||
}
|
||||
}else{
|
||||
if(0 != (res = pthread_mutex_unlock(&(dyndata->dyn_mutex)))){
|
||||
S3FS_PRN_CRIT("pthread_mutex_unlock returned: %d", res);
|
||||
abort();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void s3fs_destroy_dyn_crypt_mutex(struct CRYPTO_dynlock_value* dyndata, const char* file, int line) __attribute__ ((unused));
|
||||
static void s3fs_destroy_dyn_crypt_mutex(struct CRYPTO_dynlock_value* dyndata, const char* file, int line)
|
||||
{
|
||||
if(dyndata){
|
||||
int res = pthread_mutex_destroy(&(dyndata->dyn_mutex));
|
||||
if(res != 0){
|
||||
S3FS_PRN_CRIT("failed to destroy dyn_mutex");
|
||||
abort();
|
||||
if(dyndata){
|
||||
int res = pthread_mutex_destroy(&(dyndata->dyn_mutex));
|
||||
if(res != 0){
|
||||
S3FS_PRN_CRIT("failed to destroy dyn_mutex");
|
||||
abort();
|
||||
}
|
||||
delete dyndata;
|
||||
}
|
||||
delete dyndata;
|
||||
}
|
||||
}
|
||||
|
||||
bool s3fs_init_crypt_mutex()
|
||||
{
|
||||
if(s3fs_crypt_mutex){
|
||||
S3FS_PRN_DBG("s3fs_crypt_mutex is not NULL, destroy it.");
|
||||
if(!s3fs_destroy_crypt_mutex()){
|
||||
S3FS_PRN_ERR("Failed to s3fs_crypt_mutex");
|
||||
return false;
|
||||
if(s3fs_crypt_mutex){
|
||||
S3FS_PRN_DBG("s3fs_crypt_mutex is not NULL, destroy it.");
|
||||
if(!s3fs_destroy_crypt_mutex()){
|
||||
S3FS_PRN_ERR("Failed to s3fs_crypt_mutex");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
s3fs_crypt_mutex = new pthread_mutex_t[CRYPTO_num_locks()];
|
||||
pthread_mutexattr_t attr;
|
||||
pthread_mutexattr_init(&attr);
|
||||
s3fs_crypt_mutex = new pthread_mutex_t[CRYPTO_num_locks()];
|
||||
pthread_mutexattr_t attr;
|
||||
pthread_mutexattr_init(&attr);
|
||||
#if S3FS_PTHREAD_ERRORCHECK
|
||||
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
|
||||
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
|
||||
#endif
|
||||
for(int cnt = 0; cnt < CRYPTO_num_locks(); cnt++){
|
||||
int res = pthread_mutex_init(&s3fs_crypt_mutex[cnt], &attr);
|
||||
if(res != 0){
|
||||
S3FS_PRN_CRIT("pthread_mutex_init returned: %d", res);
|
||||
return false;
|
||||
for(int cnt = 0; cnt < CRYPTO_num_locks(); cnt++){
|
||||
int res = pthread_mutex_init(&s3fs_crypt_mutex[cnt], &attr);
|
||||
if(res != 0){
|
||||
S3FS_PRN_CRIT("pthread_mutex_init returned: %d", res);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
// static lock
|
||||
CRYPTO_set_locking_callback(s3fs_crypt_mutex_lock);
|
||||
CRYPTO_set_id_callback(s3fs_crypt_get_threadid);
|
||||
// dynamic lock
|
||||
CRYPTO_set_dynlock_create_callback(s3fs_dyn_crypt_mutex);
|
||||
CRYPTO_set_dynlock_lock_callback(s3fs_dyn_crypt_mutex_lock);
|
||||
CRYPTO_set_dynlock_destroy_callback(s3fs_destroy_dyn_crypt_mutex);
|
||||
// static lock
|
||||
CRYPTO_set_locking_callback(s3fs_crypt_mutex_lock);
|
||||
CRYPTO_set_id_callback(s3fs_crypt_get_threadid);
|
||||
// dynamic lock
|
||||
CRYPTO_set_dynlock_create_callback(s3fs_dyn_crypt_mutex);
|
||||
CRYPTO_set_dynlock_lock_callback(s3fs_dyn_crypt_mutex_lock);
|
||||
CRYPTO_set_dynlock_destroy_callback(s3fs_destroy_dyn_crypt_mutex);
|
||||
|
||||
return true;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_destroy_crypt_mutex()
|
||||
{
|
||||
if(!s3fs_crypt_mutex){
|
||||
return true;
|
||||
}
|
||||
|
||||
CRYPTO_set_dynlock_destroy_callback(NULL);
|
||||
CRYPTO_set_dynlock_lock_callback(NULL);
|
||||
CRYPTO_set_dynlock_create_callback(NULL);
|
||||
CRYPTO_set_id_callback(NULL);
|
||||
CRYPTO_set_locking_callback(NULL);
|
||||
|
||||
for(int cnt = 0; cnt < CRYPTO_num_locks(); cnt++){
|
||||
int res = pthread_mutex_destroy(&s3fs_crypt_mutex[cnt]);
|
||||
if(res != 0){
|
||||
S3FS_PRN_CRIT("failed to destroy s3fs_crypt_mutex[%d]", cnt);
|
||||
abort();
|
||||
if(!s3fs_crypt_mutex){
|
||||
return true;
|
||||
}
|
||||
}
|
||||
CRYPTO_cleanup_all_ex_data();
|
||||
delete[] s3fs_crypt_mutex;
|
||||
s3fs_crypt_mutex = NULL;
|
||||
|
||||
return true;
|
||||
CRYPTO_set_dynlock_destroy_callback(NULL);
|
||||
CRYPTO_set_dynlock_lock_callback(NULL);
|
||||
CRYPTO_set_dynlock_create_callback(NULL);
|
||||
CRYPTO_set_id_callback(NULL);
|
||||
CRYPTO_set_locking_callback(NULL);
|
||||
|
||||
for(int cnt = 0; cnt < CRYPTO_num_locks(); cnt++){
|
||||
int res = pthread_mutex_destroy(&s3fs_crypt_mutex[cnt]);
|
||||
if(res != 0){
|
||||
S3FS_PRN_CRIT("failed to destroy s3fs_crypt_mutex[%d]", cnt);
|
||||
abort();
|
||||
}
|
||||
}
|
||||
CRYPTO_cleanup_all_ex_data();
|
||||
delete[] s3fs_crypt_mutex;
|
||||
s3fs_crypt_mutex = NULL;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
@ -222,28 +223,28 @@ bool s3fs_destroy_crypt_mutex()
|
||||
//-------------------------------------------------------------------
|
||||
static bool s3fs_HMAC_RAW(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen, bool is_sha256)
|
||||
{
|
||||
if(!key || !data || !digest || !digestlen){
|
||||
return false;
|
||||
}
|
||||
(*digestlen) = EVP_MAX_MD_SIZE * sizeof(unsigned char);
|
||||
*digest = new unsigned char[*digestlen];
|
||||
if(is_sha256){
|
||||
HMAC(EVP_sha256(), key, keylen, data, datalen, *digest, digestlen);
|
||||
}else{
|
||||
HMAC(EVP_sha1(), key, keylen, data, datalen, *digest, digestlen);
|
||||
}
|
||||
if(!key || !data || !digest || !digestlen){
|
||||
return false;
|
||||
}
|
||||
(*digestlen) = EVP_MAX_MD_SIZE * sizeof(unsigned char);
|
||||
*digest = new unsigned char[*digestlen];
|
||||
if(is_sha256){
|
||||
HMAC(EVP_sha256(), key, keylen, data, datalen, *digest, digestlen);
|
||||
}else{
|
||||
HMAC(EVP_sha1(), key, keylen, data, datalen, *digest, digestlen);
|
||||
}
|
||||
|
||||
return true;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
return s3fs_HMAC_RAW(key, keylen, data, datalen, digest, digestlen, false);
|
||||
return s3fs_HMAC_RAW(key, keylen, data, datalen, digest, digestlen, false);
|
||||
}
|
||||
|
||||
bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
return s3fs_HMAC_RAW(key, keylen, data, datalen, digest, digestlen, true);
|
||||
return s3fs_HMAC_RAW(key, keylen, data, datalen, digest, digestlen, true);
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
@ -251,46 +252,46 @@ bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, siz
|
||||
//-------------------------------------------------------------------
|
||||
size_t get_md5_digest_length()
|
||||
{
|
||||
return MD5_DIGEST_LENGTH;
|
||||
return MD5_DIGEST_LENGTH;
|
||||
}
|
||||
|
||||
unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
|
||||
{
|
||||
MD5_CTX md5ctx;
|
||||
char buf[512];
|
||||
ssize_t bytes;
|
||||
unsigned char* result;
|
||||
MD5_CTX md5ctx;
|
||||
char buf[512];
|
||||
ssize_t bytes;
|
||||
unsigned char* result;
|
||||
|
||||
if(-1 == size){
|
||||
struct stat st;
|
||||
if(-1 == fstat(fd, &st)){
|
||||
return NULL;
|
||||
if(-1 == size){
|
||||
struct stat st;
|
||||
if(-1 == fstat(fd, &st)){
|
||||
return NULL;
|
||||
}
|
||||
size = static_cast<ssize_t>(st.st_size);
|
||||
}
|
||||
size = static_cast<ssize_t>(st.st_size);
|
||||
}
|
||||
|
||||
memset(buf, 0, 512);
|
||||
MD5_Init(&md5ctx);
|
||||
|
||||
for(ssize_t total = 0; total < size; total += bytes){
|
||||
bytes = 512 < (size - total) ? 512 : (size - total);
|
||||
bytes = pread(fd, buf, bytes, start + total);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
return NULL;
|
||||
}
|
||||
MD5_Update(&md5ctx, buf, bytes);
|
||||
memset(buf, 0, 512);
|
||||
}
|
||||
MD5_Init(&md5ctx);
|
||||
|
||||
result = new unsigned char[get_md5_digest_length()];
|
||||
MD5_Final(result, &md5ctx);
|
||||
for(ssize_t total = 0; total < size; total += bytes){
|
||||
bytes = 512 < (size - total) ? 512 : (size - total);
|
||||
bytes = pread(fd, buf, bytes, start + total);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
return NULL;
|
||||
}
|
||||
MD5_Update(&md5ctx, buf, bytes);
|
||||
memset(buf, 0, 512);
|
||||
}
|
||||
|
||||
return result;
|
||||
result = new unsigned char[get_md5_digest_length()];
|
||||
MD5_Final(result, &md5ctx);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
@ -298,71 +299,71 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
|
||||
//-------------------------------------------------------------------
|
||||
size_t get_sha256_digest_length()
|
||||
{
|
||||
return SHA256_DIGEST_LENGTH;
|
||||
return SHA256_DIGEST_LENGTH;
|
||||
}
|
||||
|
||||
bool s3fs_sha256(const unsigned char* data, unsigned int datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
(*digestlen) = EVP_MAX_MD_SIZE * sizeof(unsigned char);
|
||||
*digest = new unsigned char[*digestlen];
|
||||
(*digestlen) = EVP_MAX_MD_SIZE * sizeof(unsigned char);
|
||||
*digest = new unsigned char[*digestlen];
|
||||
|
||||
const EVP_MD* md = EVP_get_digestbyname("sha256");
|
||||
EVP_MD_CTX* mdctx = EVP_MD_CTX_create();
|
||||
EVP_DigestInit_ex(mdctx, md, NULL);
|
||||
EVP_DigestUpdate(mdctx, data, datalen);
|
||||
EVP_DigestFinal_ex(mdctx, *digest, digestlen);
|
||||
EVP_MD_CTX_destroy(mdctx);
|
||||
const EVP_MD* md = EVP_get_digestbyname("sha256");
|
||||
EVP_MD_CTX* mdctx = EVP_MD_CTX_create();
|
||||
EVP_DigestInit_ex(mdctx, md, NULL);
|
||||
EVP_DigestUpdate(mdctx, data, datalen);
|
||||
EVP_DigestFinal_ex(mdctx, *digest, digestlen);
|
||||
EVP_MD_CTX_destroy(mdctx);
|
||||
|
||||
return true;
|
||||
return true;
|
||||
}
|
||||
|
||||
unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size)
|
||||
{
|
||||
const EVP_MD* md = EVP_get_digestbyname("sha256");
|
||||
EVP_MD_CTX* sha256ctx;
|
||||
char buf[512];
|
||||
ssize_t bytes;
|
||||
unsigned char* result;
|
||||
const EVP_MD* md = EVP_get_digestbyname("sha256");
|
||||
EVP_MD_CTX* sha256ctx;
|
||||
char buf[512];
|
||||
ssize_t bytes;
|
||||
unsigned char* result;
|
||||
|
||||
if(-1 == size){
|
||||
struct stat st;
|
||||
if(-1 == fstat(fd, &st)){
|
||||
return NULL;
|
||||
if(-1 == size){
|
||||
struct stat st;
|
||||
if(-1 == fstat(fd, &st)){
|
||||
return NULL;
|
||||
}
|
||||
size = static_cast<ssize_t>(st.st_size);
|
||||
}
|
||||
size = static_cast<ssize_t>(st.st_size);
|
||||
}
|
||||
|
||||
sha256ctx = EVP_MD_CTX_create();
|
||||
EVP_DigestInit_ex(sha256ctx, md, NULL);
|
||||
sha256ctx = EVP_MD_CTX_create();
|
||||
EVP_DigestInit_ex(sha256ctx, md, NULL);
|
||||
|
||||
memset(buf, 0, 512);
|
||||
for(ssize_t total = 0; total < size; total += bytes){
|
||||
bytes = 512 < (size - total) ? 512 : (size - total);
|
||||
bytes = pread(fd, buf, bytes, start + total);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
EVP_MD_CTX_destroy(sha256ctx);
|
||||
return NULL;
|
||||
}
|
||||
EVP_DigestUpdate(sha256ctx, buf, bytes);
|
||||
memset(buf, 0, 512);
|
||||
}
|
||||
result = new unsigned char[get_sha256_digest_length()];
|
||||
EVP_DigestFinal_ex(sha256ctx, result, NULL);
|
||||
EVP_MD_CTX_destroy(sha256ctx);
|
||||
for(ssize_t total = 0; total < size; total += bytes){
|
||||
bytes = 512 < (size - total) ? 512 : (size - total);
|
||||
bytes = pread(fd, buf, bytes, start + total);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
EVP_MD_CTX_destroy(sha256ctx);
|
||||
return NULL;
|
||||
}
|
||||
EVP_DigestUpdate(sha256ctx, buf, bytes);
|
||||
memset(buf, 0, 512);
|
||||
}
|
||||
result = new unsigned char[get_sha256_digest_length()];
|
||||
EVP_DigestFinal_ex(sha256ctx, result, NULL);
|
||||
EVP_MD_CTX_destroy(sha256ctx);
|
||||
|
||||
return result;
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 2
|
||||
* c-basic-offset: 2
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=2 ts=2 fdm=marker
|
||||
* vim<600: expandtab sw=2 ts=2
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
|
@ -21,29 +21,33 @@
|
||||
#ifndef S3FS_SEMAPHORE_H_
|
||||
#define S3FS_SEMAPHORE_H_
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Class Semaphore
|
||||
//-------------------------------------------------------------------
|
||||
// portability wrapper for sem_t since macOS does not implement it
|
||||
|
||||
#ifdef __APPLE__
|
||||
|
||||
#include <dispatch/dispatch.h>
|
||||
|
||||
class Semaphore
|
||||
{
|
||||
public:
|
||||
explicit Semaphore(int value) : value(value), sem(dispatch_semaphore_create(value)) {}
|
||||
~Semaphore() {
|
||||
// macOS cannot destroy a semaphore with posts less than the initializer
|
||||
for(int i = 0; i < get_value(); ++i){
|
||||
post();
|
||||
}
|
||||
dispatch_release(sem);
|
||||
}
|
||||
void wait() { dispatch_semaphore_wait(sem, DISPATCH_TIME_FOREVER); }
|
||||
void post() { dispatch_semaphore_signal(sem); }
|
||||
int get_value() const { return value; }
|
||||
private:
|
||||
const int value;
|
||||
dispatch_semaphore_t sem;
|
||||
public:
|
||||
explicit Semaphore(int value) : value(value), sem(dispatch_semaphore_create(value)) {}
|
||||
~Semaphore()
|
||||
{
|
||||
// macOS cannot destroy a semaphore with posts less than the initializer
|
||||
for(int i = 0; i < get_value(); ++i){
|
||||
post();
|
||||
}
|
||||
dispatch_release(sem);
|
||||
}
|
||||
void wait() { dispatch_semaphore_wait(sem, DISPATCH_TIME_FOREVER); }
|
||||
void post() { dispatch_semaphore_signal(sem); }
|
||||
int get_value() const { return value; }
|
||||
|
||||
private:
|
||||
const int value;
|
||||
dispatch_semaphore_t sem;
|
||||
};
|
||||
|
||||
#else
|
||||
@ -53,31 +57,33 @@ class Semaphore
|
||||
|
||||
class Semaphore
|
||||
{
|
||||
public:
|
||||
explicit Semaphore(int value) : value(value) { sem_init(&mutex, 0, value); }
|
||||
~Semaphore() { sem_destroy(&mutex); }
|
||||
void wait()
|
||||
{
|
||||
int r;
|
||||
do {
|
||||
r = sem_wait(&mutex);
|
||||
} while (r == -1 && errno == EINTR);
|
||||
}
|
||||
void post() { sem_post(&mutex); }
|
||||
int get_value() const { return value; }
|
||||
private:
|
||||
const int value;
|
||||
sem_t mutex;
|
||||
public:
|
||||
explicit Semaphore(int value) : value(value) { sem_init(&mutex, 0, value); }
|
||||
~Semaphore() { sem_destroy(&mutex); }
|
||||
void wait()
|
||||
{
|
||||
int r;
|
||||
do {
|
||||
r = sem_wait(&mutex);
|
||||
} while (r == -1 && errno == EINTR);
|
||||
}
|
||||
void post() { sem_post(&mutex); }
|
||||
int get_value() const { return value; }
|
||||
|
||||
private:
|
||||
const int value;
|
||||
sem_t mutex;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
#endif // S3FS_SEMAPHORE_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 2
|
||||
* c-basic-offset: 2
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=2 ts=2 fdm=marker
|
||||
* vim<600: expandtab sw=2 ts=2
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
|
8687
src/s3fs.cpp
8687
src/s3fs.cpp
File diff suppressed because it is too large
Load Diff
29
src/s3fs.h
29
src/s3fs.h
@ -17,22 +17,21 @@
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
#ifndef S3FS_S3_H_
|
||||
#define S3FS_S3_H_
|
||||
|
||||
#ifndef S3FS_S3FS_H_
|
||||
#define S3FS_S3FS_H_
|
||||
|
||||
#define FUSE_USE_VERSION 26
|
||||
|
||||
static const int64_t FIVE_GB = 5LL * 1024LL * 1024LL * 1024LL;
|
||||
|
||||
#include <fuse.h>
|
||||
|
||||
#define S3FS_FUSE_EXIT() \
|
||||
do{ \
|
||||
struct fuse_context* pcxt = fuse_get_context(); \
|
||||
if(pcxt){ \
|
||||
fuse_exit(pcxt->fuse); \
|
||||
} \
|
||||
}while(0)
|
||||
do{ \
|
||||
struct fuse_context* pcxt = fuse_get_context(); \
|
||||
if(pcxt){ \
|
||||
fuse_exit(pcxt->fuse); \
|
||||
} \
|
||||
}while(0)
|
||||
|
||||
// [NOTE]
|
||||
// s3fs use many small allocated chunk in heap area for stats
|
||||
@ -81,13 +80,13 @@ do{ \
|
||||
S3FS_MALLOCTRIM(0); \
|
||||
}while(0)
|
||||
|
||||
#endif // S3FS_S3_H_
|
||||
#endif // S3FS_S3FS_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 2
|
||||
* c-basic-offset: 2
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=2 ts=2 fdm=marker
|
||||
* vim<600: expandtab sw=2 ts=2
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
|
@ -17,6 +17,7 @@
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_AUTH_H_
|
||||
#define S3FS_AUTH_H_
|
||||
|
||||
@ -53,9 +54,9 @@ unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size);
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 2
|
||||
* c-basic-offset: 2
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=2 ts=2 fdm=marker
|
||||
* vim<600: expandtab sw=2 ts=2
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
|
51
src/s3fs_global.cpp
Normal file
51
src/s3fs_global.cpp
Normal file
@ -0,0 +1,51 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Takeshi Nakatani <ggtakec.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <string>
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Global variables
|
||||
//-------------------------------------------------------------------
|
||||
int64_t FIVE_GB = 5LL * 1024LL * 1024LL * 1024LL;
|
||||
off_t MIN_MULTIPART_SIZE = 5 * 1024 * 1024;
|
||||
|
||||
bool foreground = false;
|
||||
bool nomultipart = false;
|
||||
bool pathrequeststyle = false;
|
||||
bool complement_stat = false;
|
||||
bool noxmlns = false;
|
||||
std::string program_name;
|
||||
std::string service_path = "/";
|
||||
std::string s3host = "https://s3.amazonaws.com";
|
||||
std::string bucket;
|
||||
std::string endpoint = "us-east-1";
|
||||
std::string cipher_suites;
|
||||
std::string instance_name;
|
||||
std::string aws_profile = "default";
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
524
src/s3fs_help.cpp
Normal file
524
src/s3fs_help.cpp
Normal file
@ -0,0 +1,524 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Takeshi Nakatani <ggtakec.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
|
||||
#include <string>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "s3fs_help.h"
|
||||
#include "s3fs_auth.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Contents
|
||||
//-------------------------------------------------------------------
|
||||
static const char help_string[] =
|
||||
"\n"
|
||||
"Mount an Amazon S3 bucket as a file system.\n"
|
||||
"\n"
|
||||
"Usage:\n"
|
||||
" mounting\n"
|
||||
" s3fs bucket[:/path] mountpoint [options]\n"
|
||||
" s3fs mountpoint [options (must specify bucket= option)]\n"
|
||||
"\n"
|
||||
" unmounting\n"
|
||||
" umount mountpoint\n"
|
||||
"\n"
|
||||
" General forms for s3fs and FUSE/mount options:\n"
|
||||
" -o opt[,opt...]\n"
|
||||
" -o opt [-o opt] ...\n"
|
||||
"\n"
|
||||
" utility mode (remove interrupted multipart uploading objects)\n"
|
||||
" s3fs --incomplete-mpu-list (-u) bucket\n"
|
||||
" s3fs --incomplete-mpu-abort[=all | =<date format>] bucket\n"
|
||||
"\n"
|
||||
"s3fs Options:\n"
|
||||
"\n"
|
||||
" Most s3fs options are given in the form where \"opt\" is:\n"
|
||||
"\n"
|
||||
" <option_name>=<option_value>\n"
|
||||
"\n"
|
||||
" bucket\n"
|
||||
" - if it is not specified bucket name (and path) in command line,\n"
|
||||
" must specify this option after -o option for bucket name.\n"
|
||||
"\n"
|
||||
" default_acl (default=\"private\")\n"
|
||||
" - the default canned acl to apply to all written s3 objects,\n"
|
||||
" e.g., private, public-read. see\n"
|
||||
" https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl\n"
|
||||
" for the full list of canned acls\n"
|
||||
"\n"
|
||||
" retries (default=\"5\")\n"
|
||||
" - number of times to retry a failed S3 transaction\n"
|
||||
"\n"
|
||||
" use_cache (default=\"\" which means disabled)\n"
|
||||
" - local folder to use for local file cache\n"
|
||||
"\n"
|
||||
" check_cache_dir_exist (default is disable)\n"
|
||||
" - if use_cache is set, check if the cache directory exists.\n"
|
||||
" If this option is not specified, it will be created at runtime\n"
|
||||
" when the cache directory does not exist.\n"
|
||||
"\n"
|
||||
" del_cache (delete local file cache)\n"
|
||||
" - delete local file cache when s3fs starts and exits.\n"
|
||||
"\n"
|
||||
" storage_class (default=\"standard\")\n"
|
||||
" - store object with specified storage class. Possible values:\n"
|
||||
" standard, standard_ia, onezone_ia, reduced_redundancy,\n"
|
||||
" intelligent_tiering, glacier, and deep_archive.\n"
|
||||
"\n"
|
||||
" use_rrs (default is disable)\n"
|
||||
" - use Amazon's Reduced Redundancy Storage.\n"
|
||||
" this option can not be specified with use_sse.\n"
|
||||
" (can specify use_rrs=1 for old version)\n"
|
||||
" this option has been replaced by new storage_class option.\n"
|
||||
"\n"
|
||||
" use_sse (default is disable)\n"
|
||||
" - Specify three type Amazon's Server-Site Encryption: SSE-S3,\n"
|
||||
" SSE-C or SSE-KMS. SSE-S3 uses Amazon S3-managed encryption\n"
|
||||
" keys, SSE-C uses customer-provided encryption keys, and\n"
|
||||
" SSE-KMS uses the master key which you manage in AWS KMS.\n"
|
||||
" You can specify \"use_sse\" or \"use_sse=1\" enables SSE-S3\n"
|
||||
" type (use_sse=1 is old type parameter).\n"
|
||||
" Case of setting SSE-C, you can specify \"use_sse=custom\",\n"
|
||||
" \"use_sse=custom:<custom key file path>\" or\n"
|
||||
" \"use_sse=<custom key file path>\" (only <custom key file path>\n"
|
||||
" specified is old type parameter). You can use \"c\" for\n"
|
||||
" short \"custom\".\n"
|
||||
" The custom key file must be 600 permission. The file can\n"
|
||||
" have some lines, each line is one SSE-C key. The first line\n"
|
||||
" in file is used as Customer-Provided Encryption Keys for\n"
|
||||
" uploading and changing headers etc. If there are some keys\n"
|
||||
" after first line, those are used downloading object which\n"
|
||||
" are encrypted by not first key. So that, you can keep all\n"
|
||||
" SSE-C keys in file, that is SSE-C key history.\n"
|
||||
" If you specify \"custom\" (\"c\") without file path, you\n"
|
||||
" need to set custom key by load_sse_c option or AWSSSECKEYS\n"
|
||||
" environment. (AWSSSECKEYS environment has some SSE-C keys\n"
|
||||
" with \":\" separator.) This option is used to decide the\n"
|
||||
" SSE type. So that if you do not want to encrypt a object\n"
|
||||
" object at uploading, but you need to decrypt encrypted\n"
|
||||
" object at downloading, you can use load_sse_c option instead\n"
|
||||
" of this option.\n"
|
||||
" For setting SSE-KMS, specify \"use_sse=kmsid\" or\n"
|
||||
" \"use_sse=kmsid:<kms id>\". You can use \"k\" for short \"kmsid\".\n"
|
||||
" If you san specify SSE-KMS type with your <kms id> in AWS\n"
|
||||
" KMS, you can set it after \"kmsid:\" (or \"k:\"). If you\n"
|
||||
" specify only \"kmsid\" (\"k\"), you need to set AWSSSEKMSID\n"
|
||||
" environment which value is <kms id>. You must be careful\n"
|
||||
" about that you can not use the KMS id which is not same EC2\n"
|
||||
" region.\n"
|
||||
"\n"
|
||||
" load_sse_c - specify SSE-C keys\n"
|
||||
" Specify the custom-provided encryption keys file path for decrypting\n"
|
||||
" at downloading.\n"
|
||||
" If you use the custom-provided encryption key at uploading, you\n"
|
||||
" specify with \"use_sse=custom\". The file has many lines, one line\n"
|
||||
" means one custom key. So that you can keep all SSE-C keys in file,\n"
|
||||
" that is SSE-C key history. AWSSSECKEYS environment is as same as this\n"
|
||||
" file contents.\n"
|
||||
"\n"
|
||||
" public_bucket (default=\"\" which means disabled)\n"
|
||||
" - anonymously mount a public bucket when set to 1, ignores the \n"
|
||||
" $HOME/.passwd-s3fs and /etc/passwd-s3fs files.\n"
|
||||
" S3 does not allow copy object api for anonymous users, then\n"
|
||||
" s3fs sets nocopyapi option automatically when public_bucket=1\n"
|
||||
" option is specified.\n"
|
||||
"\n"
|
||||
" passwd_file (default=\"\")\n"
|
||||
" - specify which s3fs password file to use\n"
|
||||
"\n"
|
||||
" ahbe_conf (default=\"\" which means disabled)\n"
|
||||
" - This option specifies the configuration file path which\n"
|
||||
" file is the additional HTTP header by file (object) extension.\n"
|
||||
" The configuration file format is below:\n"
|
||||
" -----------\n"
|
||||
" line = [file suffix or regex] HTTP-header [HTTP-values]\n"
|
||||
" file suffix = file (object) suffix, if this field is empty,\n"
|
||||
" it means \"reg:(.*)\".(=all object).\n"
|
||||
" regex = regular expression to match the file (object) path.\n"
|
||||
" this type starts with \"reg:\" prefix.\n"
|
||||
" HTTP-header = additional HTTP header name\n"
|
||||
" HTTP-values = additional HTTP header value\n"
|
||||
" -----------\n"
|
||||
" Sample:\n"
|
||||
" -----------\n"
|
||||
" .gz Content-Encoding gzip\n"
|
||||
" .Z Content-Encoding compress\n"
|
||||
" reg:^/MYDIR/(.*)[.]t2$ Content-Encoding text2\n"
|
||||
" -----------\n"
|
||||
" A sample configuration file is uploaded in \"test\" directory.\n"
|
||||
" If you specify this option for set \"Content-Encoding\" HTTP \n"
|
||||
" header, please take care for RFC 2616.\n"
|
||||
"\n"
|
||||
" profile (default=\"default\")\n"
|
||||
" - Choose a profile from ${HOME}/.aws/credentials to authenticate\n"
|
||||
" against S3. Note that this format matches the AWS CLI format and\n"
|
||||
" differs from the s3fs passwd format.\n"
|
||||
"\n"
|
||||
" connect_timeout (default=\"300\" seconds)\n"
|
||||
" - time to wait for connection before giving up\n"
|
||||
"\n"
|
||||
" readwrite_timeout (default=\"120\" seconds)\n"
|
||||
" - time to wait between read/write activity before giving up\n"
|
||||
"\n"
|
||||
" list_object_max_keys (default=\"1000\")\n"
|
||||
" - specify the maximum number of keys returned by S3 list object\n"
|
||||
" API. The default is 1000. you can set this value to 1000 or more.\n"
|
||||
"\n"
|
||||
" max_stat_cache_size (default=\"100,000\" entries (about 40MB))\n"
|
||||
" - maximum number of entries in the stat cache, and this maximum is\n"
|
||||
" also treated as the number of symbolic link cache.\n"
|
||||
"\n"
|
||||
" stat_cache_expire (default is 900))\n"
|
||||
" - specify expire time (seconds) for entries in the stat cache.\n"
|
||||
" This expire time indicates the time since stat cached. and this\n"
|
||||
" is also set to the expire time of the symbolic link cache.\n"
|
||||
"\n"
|
||||
" stat_cache_interval_expire (default is 900)\n"
|
||||
" - specify expire time (seconds) for entries in the stat cache(and\n"
|
||||
" symbolic link cache).\n"
|
||||
" This expire time is based on the time from the last access time\n"
|
||||
" of the stat cache. This option is exclusive with stat_cache_expire,\n"
|
||||
" and is left for compatibility with older versions.\n"
|
||||
"\n"
|
||||
" enable_noobj_cache (default is disable)\n"
|
||||
" - enable cache entries for the object which does not exist.\n"
|
||||
" s3fs always has to check whether file (or sub directory) exists \n"
|
||||
" under object (path) when s3fs does some command, since s3fs has \n"
|
||||
" recognized a directory which does not exist and has files or \n"
|
||||
" sub directories under itself. It increases ListBucket request \n"
|
||||
" and makes performance bad.\n"
|
||||
" You can specify this option for performance, s3fs memorizes \n"
|
||||
" in stat cache that the object (file or directory) does not exist.\n"
|
||||
"\n"
|
||||
" no_check_certificate\n"
|
||||
" - server certificate won't be checked against the available \n"
|
||||
" certificate authorities.\n"
|
||||
"\n"
|
||||
" ssl_verify_hostname (default=\"2\")\n"
|
||||
" - When 0, do not verify the SSL certificate against the hostname.\n"
|
||||
"\n"
|
||||
" nodnscache (disable DNS cache)\n"
|
||||
" - s3fs is always using DNS cache, this option make DNS cache disable.\n"
|
||||
"\n"
|
||||
" nosscache (disable SSL session cache)\n"
|
||||
" - s3fs is always using SSL session cache, this option make SSL \n"
|
||||
" session cache disable.\n"
|
||||
"\n"
|
||||
" multireq_max (default=\"20\")\n"
|
||||
" - maximum number of parallel request for listing objects.\n"
|
||||
"\n"
|
||||
" parallel_count (default=\"5\")\n"
|
||||
" - number of parallel request for uploading big objects.\n"
|
||||
" s3fs uploads large object (over 20MB) by multipart post request, \n"
|
||||
" and sends parallel requests.\n"
|
||||
" This option limits parallel request count which s3fs requests \n"
|
||||
" at once. It is necessary to set this value depending on a CPU \n"
|
||||
" and a network band.\n"
|
||||
"\n"
|
||||
" multipart_size (default=\"10\")\n"
|
||||
" - part size, in MB, for each multipart request.\n"
|
||||
" The minimum value is 5 MB and the maximum value is 5 GB.\n"
|
||||
"\n"
|
||||
" ensure_diskfree (default 0)\n"
|
||||
" - sets MB to ensure disk free space. This option means the\n"
|
||||
" threshold of free space size on disk which is used for the\n"
|
||||
" cache file by s3fs. s3fs makes file for\n"
|
||||
" downloading, uploading and caching files. If the disk free\n"
|
||||
" space is smaller than this value, s3fs do not use diskspace\n"
|
||||
" as possible in exchange for the performance.\n"
|
||||
"\n"
|
||||
" singlepart_copy_limit (default=\"512\")\n"
|
||||
" - maximum size, in MB, of a single-part copy before trying \n"
|
||||
" multipart copy.\n"
|
||||
"\n"
|
||||
" host (default=\"https://s3.amazonaws.com\")\n"
|
||||
" - Set a non-Amazon host, e.g., https://example.com.\n"
|
||||
"\n"
|
||||
" servicepath (default=\"/\")\n"
|
||||
" - Set a service path when the non-Amazon host requires a prefix.\n"
|
||||
"\n"
|
||||
" url (default=\"https://s3.amazonaws.com\")\n"
|
||||
" - sets the url to use to access Amazon S3. If you want to use HTTP,\n"
|
||||
" then you can set \"url=http://s3.amazonaws.com\".\n"
|
||||
" If you do not use https, please specify the URL with the url\n"
|
||||
" option.\n"
|
||||
"\n"
|
||||
" endpoint (default=\"us-east-1\")\n"
|
||||
" - sets the endpoint to use on signature version 4\n"
|
||||
" If this option is not specified, s3fs uses \"us-east-1\" region as\n"
|
||||
" the default. If the s3fs could not connect to the region specified\n"
|
||||
" by this option, s3fs could not run. But if you do not specify this\n"
|
||||
" option, and if you can not connect with the default region, s3fs\n"
|
||||
" will retry to automatically connect to the other region. So s3fs\n"
|
||||
" can know the correct region name, because s3fs can find it in an\n"
|
||||
" error from the S3 server.\n"
|
||||
"\n"
|
||||
" sigv2 (default is signature version 4)\n"
|
||||
" - sets signing AWS requests by using Signature Version 2\n"
|
||||
"\n"
|
||||
" mp_umask (default is \"0000\")\n"
|
||||
" - sets umask for the mount point directory.\n"
|
||||
" If allow_other option is not set, s3fs allows access to the mount\n"
|
||||
" point only to the owner. In the opposite case s3fs allows access\n"
|
||||
" to all users as the default. But if you set the allow_other with\n"
|
||||
" this option, you can control the permissions of the\n"
|
||||
" mount point by this option like umask.\n"
|
||||
"\n"
|
||||
" umask (default is \"0000\")\n"
|
||||
" - sets umask for files under the mountpoint. This can allow\n"
|
||||
" users other than the mounting user to read and write to files\n"
|
||||
" that they did not create.\n"
|
||||
"\n"
|
||||
" nomultipart (disable multipart uploads)\n"
|
||||
"\n"
|
||||
" enable_content_md5 (default is disable)\n"
|
||||
" Allow S3 server to check data integrity of uploads via the\n"
|
||||
" Content-MD5 header. This can add CPU overhead to transfers.\n"
|
||||
"\n"
|
||||
" ecs (default is disable)\n"
|
||||
" - This option instructs s3fs to query the ECS container credential\n"
|
||||
" metadata address instead of the instance metadata address.\n"
|
||||
"\n"
|
||||
" iam_role (default is no IAM role)\n"
|
||||
" - This option requires the IAM role name or \"auto\". If you specify\n"
|
||||
" \"auto\", s3fs will automatically use the IAM role names that are set\n"
|
||||
" to an instance. If you specify this option without any argument, it\n"
|
||||
" is the same as that you have specified the \"auto\".\n"
|
||||
"\n"
|
||||
" ibm_iam_auth (default is not using IBM IAM authentication)\n"
|
||||
" - This option instructs s3fs to use IBM IAM authentication.\n"
|
||||
" In this mode, the AWSAccessKey and AWSSecretKey will be used as\n"
|
||||
" IBM's Service-Instance-ID and APIKey, respectively.\n"
|
||||
"\n"
|
||||
" ibm_iam_endpoint (default is https://iam.bluemix.net)\n"
|
||||
" - sets the URL to use for IBM IAM authentication.\n"
|
||||
"\n"
|
||||
" use_xattr (default is not handling the extended attribute)\n"
|
||||
" Enable to handle the extended attribute (xattrs).\n"
|
||||
" If you set this option, you can use the extended attribute.\n"
|
||||
" For example, encfs and ecryptfs need to support the extended attribute.\n"
|
||||
" Notice: if s3fs handles the extended attribute, s3fs can not work to\n"
|
||||
" copy command with preserve=mode.\n"
|
||||
"\n"
|
||||
" noxmlns (disable registering xml name space)\n"
|
||||
" disable registering xml name space for response of \n"
|
||||
" ListBucketResult and ListVersionsResult etc. Default name \n"
|
||||
" space is looked up from \"http://s3.amazonaws.com/doc/2006-03-01\".\n"
|
||||
" This option should not be specified now, because s3fs looks up\n"
|
||||
" xmlns automatically after v1.66.\n"
|
||||
"\n"
|
||||
" nomixupload (disable copy in multipart uploads)\n"
|
||||
" Disable to use PUT (copy api) when multipart uploading large size objects.\n"
|
||||
" By default, when doing multipart upload, the range of unchanged data\n"
|
||||
" will use PUT (copy api) whenever possible.\n"
|
||||
" When nocopyapi or norenameapi is specified, use of PUT (copy api) is\n"
|
||||
" invalidated even if this option is not specified.\n"
|
||||
"\n"
|
||||
" nocopyapi (for other incomplete compatibility object storage)\n"
|
||||
" For a distributed object storage which is compatibility S3\n"
|
||||
" API without PUT (copy api).\n"
|
||||
" If you set this option, s3fs do not use PUT with \n"
|
||||
" \"x-amz-copy-source\" (copy api). Because traffic is increased\n"
|
||||
" 2-3 times by this option, we do not recommend this.\n"
|
||||
"\n"
|
||||
" norenameapi (for other incomplete compatibility object storage)\n"
|
||||
" For a distributed object storage which is compatibility S3\n"
|
||||
" API without PUT (copy api).\n"
|
||||
" This option is a subset of nocopyapi option. The nocopyapi\n"
|
||||
" option does not use copy-api for all command (ex. chmod, chown,\n"
|
||||
" touch, mv, etc), but this option does not use copy-api for\n"
|
||||
" only rename command (ex. mv). If this option is specified with\n"
|
||||
" nocopyapi, then s3fs ignores it.\n"
|
||||
"\n"
|
||||
" use_path_request_style (use legacy API calling style)\n"
|
||||
" Enable compatibility with S3-like APIs which do not support\n"
|
||||
" the virtual-host request style, by using the older path request\n"
|
||||
" style.\n"
|
||||
"\n"
|
||||
" noua (suppress User-Agent header)\n"
|
||||
" Usually s3fs outputs of the User-Agent in \"s3fs/<version> (commit\n"
|
||||
" hash <hash>; <using ssl library name>)\" format.\n"
|
||||
" If this option is specified, s3fs suppresses the output of the\n"
|
||||
" User-Agent.\n"
|
||||
"\n"
|
||||
" cipher_suites\n"
|
||||
" Customize the list of TLS cipher suites.\n"
|
||||
" Expects a colon separated list of cipher suite names.\n"
|
||||
" A list of available cipher suites, depending on your TLS engine,\n"
|
||||
" can be found on the CURL library documentation:\n"
|
||||
" https://curl.haxx.se/docs/ssl-ciphers.html\n"
|
||||
"\n"
|
||||
" instance_name - The instance name of the current s3fs mountpoint.\n"
|
||||
" This name will be added to logging messages and user agent headers sent by s3fs.\n"
|
||||
"\n"
|
||||
" complement_stat (complement lack of file/directory mode)\n"
|
||||
" s3fs complements lack of information about file/directory mode\n"
|
||||
" if a file or a directory object does not have x-amz-meta-mode\n"
|
||||
" header. As default, s3fs does not complements stat information\n"
|
||||
" for a object, then the object will not be able to be allowed to\n"
|
||||
" list/modify.\n"
|
||||
"\n"
|
||||
" notsup_compat_dir (not support compatibility directory types)\n"
|
||||
" As a default, s3fs supports objects of the directory type as\n"
|
||||
" much as possible and recognizes them as directories.\n"
|
||||
" Objects that can be recognized as directory objects are \"dir/\",\n"
|
||||
" \"dir\", \"dir_$folder$\", and there is a file object that does\n"
|
||||
" not have a directory object but contains that directory path.\n"
|
||||
" s3fs needs redundant communication to support all these\n"
|
||||
" directory types. The object as the directory created by s3fs\n"
|
||||
" is \"dir/\". By restricting s3fs to recognize only \"dir/\" as\n"
|
||||
" a directory, communication traffic can be reduced. This option\n"
|
||||
" is used to give this restriction to s3fs.\n"
|
||||
" However, if there is a directory object other than \"dir/\" in\n"
|
||||
" the bucket, specifying this option is not recommended. s3fs may\n"
|
||||
" not be able to recognize the object correctly if an object\n"
|
||||
" created by s3fs exists in the bucket.\n"
|
||||
" Please use this option when the directory in the bucket is\n"
|
||||
" only \"dir/\" object.\n"
|
||||
"\n"
|
||||
" use_wtf8 - support arbitrary file system encoding.\n"
|
||||
" S3 requires all object names to be valid UTF-8. But some\n"
|
||||
" clients, notably Windows NFS clients, use their own encoding.\n"
|
||||
" This option re-encodes invalid UTF-8 object names into valid\n"
|
||||
" UTF-8 by mapping offending codes into a 'private' codepage of the\n"
|
||||
" Unicode set.\n"
|
||||
" Useful on clients not using UTF-8 as their file system encoding.\n"
|
||||
"\n"
|
||||
" use_session_token - indicate that session token should be provided.\n"
|
||||
" If credentials are provided by environment variables this switch\n"
|
||||
" forces presence check of AWSSESSIONTOKEN variable.\n"
|
||||
" Otherwise an error is returned.\n"
|
||||
"\n"
|
||||
" requester_pays (default is disable)\n"
|
||||
" This option instructs s3fs to enable requests involving\n"
|
||||
" Requester Pays buckets.\n"
|
||||
" It includes the 'x-amz-request-payer=requester' entry in the\n"
|
||||
" request header.\n"
|
||||
"\n"
|
||||
" mime (default is \"/etc/mime.types\")\n"
|
||||
" Specify the path of the mime.types file.\n"
|
||||
" If this option is not specified, the existence of \"/etc/mime.types\"\n"
|
||||
" is checked, and that file is loaded as mime information.\n"
|
||||
" If this file does not exist on macOS, then \"/etc/apache2/mime.types\"\n"
|
||||
" is checked as well.\n"
|
||||
"\n"
|
||||
" dbglevel (default=\"crit\")\n"
|
||||
" Set the debug message level. set value as crit (critical), err\n"
|
||||
" (error), warn (warning), info (information) to debug level.\n"
|
||||
" default debug level is critical. If s3fs run with \"-d\" option,\n"
|
||||
" the debug level is set information. When s3fs catch the signal\n"
|
||||
" SIGUSR2, the debug level is bumpup.\n"
|
||||
"\n"
|
||||
" curldbg - put curl debug message\n"
|
||||
" Put the debug message from libcurl when this option is specified.\n"
|
||||
" Specify \"normal\" or \"body\" for the parameter.\n"
|
||||
" If the parameter is omitted, it is the same as \"normal\".\n"
|
||||
" If \"body\" is specified, some API communication body data will be\n"
|
||||
" output in addition to the debug message output as \"normal\".\n"
|
||||
"\n"
|
||||
" set_check_cache_sigusr1 (default is stdout)\n"
|
||||
" If the cache is enabled, you can check the integrity of the\n"
|
||||
" cache file and the cache file's stats info file.\n"
|
||||
" This option is specified and when sending the SIGUSR1 signal\n"
|
||||
" to the s3fs process checks the cache status at that time.\n"
|
||||
" This option can take a file path as parameter to output the\n"
|
||||
" check result to that file. The file path parameter can be omitted.\n"
|
||||
" If omitted, the result will be output to stdout or syslog.\n"
|
||||
"\n"
|
||||
"FUSE/mount Options:\n"
|
||||
"\n"
|
||||
" Most of the generic mount options described in 'man mount' are\n"
|
||||
" supported (ro, rw, suid, nosuid, dev, nodev, exec, noexec, atime,\n"
|
||||
" noatime, sync async, dirsync). Filesystems are mounted with\n"
|
||||
" '-onodev,nosuid' by default, which can only be overridden by a\n"
|
||||
" privileged user.\n"
|
||||
" \n"
|
||||
" There are many FUSE specific mount options that can be specified.\n"
|
||||
" e.g. allow_other See the FUSE's README for the full set.\n"
|
||||
"\n"
|
||||
"Utility mode Options:\n"
|
||||
"\n"
|
||||
" -u, --incomplete-mpu-list\n"
|
||||
" Lists multipart incomplete objects uploaded to the specified\n"
|
||||
" bucket.\n"
|
||||
" --incomplete-mpu-abort (=all or =<date format>)\n"
|
||||
" Delete the multipart incomplete object uploaded to the specified\n"
|
||||
" bucket.\n"
|
||||
" If \"all\" is specified for this option, all multipart incomplete\n"
|
||||
" objects will be deleted. If you specify no argument as an option,\n"
|
||||
" objects older than 24 hours (24H) will be deleted (This is the\n"
|
||||
" default value). You can specify an optional date format. It can\n"
|
||||
" be specified as year, month, day, hour, minute, second, and it is\n"
|
||||
" expressed as \"Y\", \"M\", \"D\", \"h\", \"m\", \"s\" respectively.\n"
|
||||
" For example, \"1Y6M10D12h30m30s\".\n"
|
||||
"\n"
|
||||
"Miscellaneous Options:\n"
|
||||
"\n"
|
||||
" -h, --help Output this help.\n"
|
||||
" --version Output version info.\n"
|
||||
" -d --debug Turn on DEBUG messages to syslog. Specifying -d\n"
|
||||
" twice turns on FUSE debug messages to STDOUT.\n"
|
||||
" -f FUSE foreground option - do not run as daemon.\n"
|
||||
" -s FUSE single-threaded option\n"
|
||||
" disable multi-threaded operation\n"
|
||||
"\n"
|
||||
"\n"
|
||||
"s3fs home page: <https://github.com/s3fs-fuse/s3fs-fuse>\n"
|
||||
;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Functions
|
||||
//-------------------------------------------------------------------
|
||||
void show_usage()
|
||||
{
|
||||
printf("Usage: %s BUCKET:[PATH] MOUNTPOINT [OPTION]...\n", program_name.c_str());
|
||||
}
|
||||
|
||||
void show_help()
|
||||
{
|
||||
show_usage();
|
||||
printf(help_string);
|
||||
}
|
||||
|
||||
void show_version()
|
||||
{
|
||||
printf(
|
||||
"Amazon Simple Storage Service File System V%s (commit:%s) with %s\n"
|
||||
"Copyright (C) 2010 Randy Rizun <rrizun@gmail.com>\n"
|
||||
"License GPL2: GNU GPL version 2 <https://gnu.org/licenses/gpl.html>\n"
|
||||
"This is free software: you are free to change and redistribute it.\n"
|
||||
"There is NO WARRANTY, to the extent permitted by law.\n",
|
||||
VERSION, COMMIT_HASH_VAL, s3fs_crypt_lib_name());
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
40
src/s3fs_help.h
Normal file
40
src/s3fs_help.h
Normal file
@ -0,0 +1,40 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_S3FS_HELP_H_
|
||||
#define S3FS_S3FS_HELP_H_
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Functions
|
||||
//-------------------------------------------------------------------
|
||||
void show_usage(void);
|
||||
void show_help(void);
|
||||
void show_version(void);
|
||||
|
||||
#endif // S3FS_S3FS_HELP_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
36
src/s3fs_logger.cpp
Normal file
36
src/s3fs_logger.cpp
Normal file
@ -0,0 +1,36 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include "s3fs_logger.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Global variables
|
||||
//-------------------------------------------------------------------
|
||||
s3fs_log_level debug_level = S3FS_LOG_CRIT;
|
||||
const char* s3fs_log_nest[S3FS_LOG_NEST_MAX] = {"", " ", " ", " "};
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
154
src/s3fs_logger.h
Normal file
154
src/s3fs_logger.h
Normal file
@ -0,0 +1,154 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_LOGGER_H_
|
||||
#define S3FS_LOGGER_H_
|
||||
|
||||
#include <syslog.h>
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Debug level
|
||||
//-------------------------------------------------------------------
|
||||
enum s3fs_log_level{
|
||||
S3FS_LOG_CRIT = 0, // LOG_CRIT
|
||||
S3FS_LOG_ERR = 1, // LOG_ERR
|
||||
S3FS_LOG_WARN = 3, // LOG_WARNING
|
||||
S3FS_LOG_INFO = 7, // LOG_INFO
|
||||
S3FS_LOG_DBG = 15 // LOG_DEBUG
|
||||
};
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Debug macros
|
||||
//-------------------------------------------------------------------
|
||||
#define IS_S3FS_LOG_CRIT() (S3FS_LOG_CRIT == debug_level)
|
||||
#define IS_S3FS_LOG_ERR() (S3FS_LOG_ERR == (debug_level & S3FS_LOG_DBG))
|
||||
#define IS_S3FS_LOG_WARN() (S3FS_LOG_WARN == (debug_level & S3FS_LOG_DBG))
|
||||
#define IS_S3FS_LOG_INFO() (S3FS_LOG_INFO == (debug_level & S3FS_LOG_DBG))
|
||||
#define IS_S3FS_LOG_DBG() (S3FS_LOG_DBG == (debug_level & S3FS_LOG_DBG))
|
||||
|
||||
#define S3FS_LOG_LEVEL_TO_SYSLOG(level) \
|
||||
( S3FS_LOG_DBG == (level & S3FS_LOG_DBG) ? LOG_DEBUG : \
|
||||
S3FS_LOG_INFO == (level & S3FS_LOG_DBG) ? LOG_INFO : \
|
||||
S3FS_LOG_WARN == (level & S3FS_LOG_DBG) ? LOG_WARNING : \
|
||||
S3FS_LOG_ERR == (level & S3FS_LOG_DBG) ? LOG_ERR : LOG_CRIT )
|
||||
|
||||
#define S3FS_LOG_LEVEL_STRING(level) \
|
||||
( S3FS_LOG_DBG == (level & S3FS_LOG_DBG) ? "[DBG] " : \
|
||||
S3FS_LOG_INFO == (level & S3FS_LOG_DBG) ? "[INF] " : \
|
||||
S3FS_LOG_WARN == (level & S3FS_LOG_DBG) ? "[WAN] " : \
|
||||
S3FS_LOG_ERR == (level & S3FS_LOG_DBG) ? "[ERR] " : "[CRT] " )
|
||||
|
||||
#define S3FS_LOG_NEST_MAX 4
|
||||
#define S3FS_LOG_NEST(nest) (nest < S3FS_LOG_NEST_MAX ? s3fs_log_nest[nest] : s3fs_log_nest[S3FS_LOG_NEST_MAX - 1])
|
||||
|
||||
#define S3FS_LOW_LOGPRN(level, fmt, ...) \
|
||||
do{ \
|
||||
if(S3FS_LOG_CRIT == level || (S3FS_LOG_CRIT != debug_level && level == (debug_level & level))){ \
|
||||
if(foreground){ \
|
||||
fprintf(stdout, "%s%s:%s(%d): " fmt "%s\n", S3FS_LOG_LEVEL_STRING(level), __FILE__, __func__, __LINE__, __VA_ARGS__); \
|
||||
}else{ \
|
||||
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(level), "%s%s:%s(%d): " fmt "%s", instance_name.c_str(), __FILE__, __func__, __LINE__, __VA_ARGS__); \
|
||||
} \
|
||||
} \
|
||||
}while(0)
|
||||
|
||||
#define S3FS_LOW_LOGPRN2(level, nest, fmt, ...) \
|
||||
do{ \
|
||||
if(S3FS_LOG_CRIT == level || (S3FS_LOG_CRIT != debug_level && level == (debug_level & level))){ \
|
||||
if(foreground){ \
|
||||
fprintf(stdout, "%s%s%s:%s(%d): " fmt "%s\n", S3FS_LOG_LEVEL_STRING(level), S3FS_LOG_NEST(nest), __FILE__, __func__, __LINE__, __VA_ARGS__); \
|
||||
}else{ \
|
||||
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(level), "%s%s" fmt "%s", instance_name.c_str(), S3FS_LOG_NEST(nest), __VA_ARGS__); \
|
||||
} \
|
||||
} \
|
||||
}while(0)
|
||||
|
||||
#define S3FS_LOW_CURLDBG(fmt, ...) \
|
||||
do{ \
|
||||
if(foreground){ \
|
||||
fprintf(stdout, "[CURL DBG] " fmt "%s\n", __VA_ARGS__); \
|
||||
}else{ \
|
||||
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(S3FS_LOG_CRIT), "%s" fmt "%s", instance_name.c_str(), __VA_ARGS__); \
|
||||
} \
|
||||
}while(0)
|
||||
|
||||
#define S3FS_LOW_LOGPRN_EXIT(fmt, ...) \
|
||||
do{ \
|
||||
if(foreground){ \
|
||||
fprintf(stderr, "s3fs: " fmt "%s\n", __VA_ARGS__); \
|
||||
}else{ \
|
||||
fprintf(stderr, "s3fs: " fmt "%s\n", __VA_ARGS__); \
|
||||
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(S3FS_LOG_CRIT), "%ss3fs: " fmt "%s", instance_name.c_str(), __VA_ARGS__); \
|
||||
} \
|
||||
}while(0)
|
||||
|
||||
// Special macro for init message
|
||||
#define S3FS_PRN_INIT_INFO(fmt, ...) \
|
||||
do{ \
|
||||
if(foreground){ \
|
||||
fprintf(stdout, "%s%s%s:%s(%d): " fmt "%s\n", S3FS_LOG_LEVEL_STRING(S3FS_LOG_INFO), S3FS_LOG_NEST(0), __FILE__, __func__, __LINE__, __VA_ARGS__, ""); \
|
||||
}else{ \
|
||||
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(S3FS_LOG_INFO), "%s%s" fmt "%s", instance_name.c_str(), S3FS_LOG_NEST(0), __VA_ARGS__, ""); \
|
||||
} \
|
||||
}while(0)
|
||||
|
||||
// Special macro for checking cache files
|
||||
#define S3FS_LOW_CACHE(fp, fmt, ...) \
|
||||
do{ \
|
||||
if(foreground){ \
|
||||
fprintf(fp, fmt "%s\n", __VA_ARGS__); \
|
||||
}else{ \
|
||||
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(S3FS_LOG_INFO), "%s: " fmt "%s", instance_name.c_str(), __VA_ARGS__); \
|
||||
} \
|
||||
}while(0)
|
||||
|
||||
// [NOTE]
|
||||
// small trick for VA_ARGS
|
||||
//
|
||||
#define S3FS_PRN_EXIT(fmt, ...) S3FS_LOW_LOGPRN_EXIT(fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_CRIT(fmt, ...) S3FS_LOW_LOGPRN(S3FS_LOG_CRIT, fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_ERR(fmt, ...) S3FS_LOW_LOGPRN(S3FS_LOG_ERR, fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_WARN(fmt, ...) S3FS_LOW_LOGPRN(S3FS_LOG_WARN, fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_DBG(fmt, ...) S3FS_LOW_LOGPRN(S3FS_LOG_DBG, fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_INFO(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_INFO, 0, fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_INFO0(fmt, ...) S3FS_LOG_INFO(fmt, __VA_ARGS__)
|
||||
#define S3FS_PRN_INFO1(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_INFO, 1, fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_INFO2(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_INFO, 2, fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_INFO3(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_INFO, 3, fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_CURL(fmt, ...) S3FS_LOW_CURLDBG(fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_CACHE(fp, ...) S3FS_LOW_CACHE(fp, ##__VA_ARGS__, "")
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Global variables
|
||||
//-------------------------------------------------------------------
|
||||
// TODO: namespace these
|
||||
extern s3fs_log_level debug_level;
|
||||
extern const char* s3fs_log_nest[S3FS_LOG_NEST_MAX];
|
||||
|
||||
#endif // S3FS_LOGGER_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
1692
src/s3fs_util.cpp
1692
src/s3fs_util.cpp
File diff suppressed because it is too large
Load Diff
119
src/s3fs_util.h
119
src/s3fs_util.h
@ -17,100 +17,15 @@
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_S3FS_UTIL_H_
|
||||
#define S3FS_S3FS_UTIL_H_
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Typedef
|
||||
//-------------------------------------------------------------------
|
||||
//
|
||||
// Struct
|
||||
//
|
||||
struct s3obj_entry{
|
||||
std::string normalname; // normalized name: if empty, object is normalized name.
|
||||
std::string orgname; // original name: if empty, object is original name.
|
||||
std::string etag;
|
||||
bool is_dir;
|
||||
|
||||
s3obj_entry() : is_dir(false) {}
|
||||
};
|
||||
|
||||
typedef std::map<std::string, struct s3obj_entry> s3obj_t;
|
||||
typedef std::list<std::string> s3obj_list_t;
|
||||
|
||||
//
|
||||
// Class
|
||||
//
|
||||
class S3ObjList
|
||||
{
|
||||
private:
|
||||
s3obj_t objects;
|
||||
|
||||
private:
|
||||
bool insert_normalized(const char* name, const char* normalized, bool is_dir);
|
||||
const s3obj_entry* GetS3Obj(const char* name) const;
|
||||
|
||||
s3obj_t::const_iterator begin(void) const {
|
||||
return objects.begin();
|
||||
}
|
||||
s3obj_t::const_iterator end(void) const {
|
||||
return objects.end();
|
||||
}
|
||||
|
||||
public:
|
||||
S3ObjList() {}
|
||||
~S3ObjList() {}
|
||||
|
||||
bool IsEmpty(void) const {
|
||||
return objects.empty();
|
||||
}
|
||||
bool insert(const char* name, const char* etag = NULL, bool is_dir = false);
|
||||
std::string GetOrgName(const char* name) const;
|
||||
std::string GetNormalizedName(const char* name) const;
|
||||
std::string GetETag(const char* name) const;
|
||||
bool IsDir(const char* name) const;
|
||||
bool GetNameList(s3obj_list_t& list, bool OnlyNormalized = true, bool CutSlash = true) const;
|
||||
bool GetLastName(std::string& lastname) const;
|
||||
|
||||
static bool MakeHierarchizedList(s3obj_list_t& list, bool haveSlash);
|
||||
};
|
||||
|
||||
typedef struct mvnode {
|
||||
char *old_path;
|
||||
char *new_path;
|
||||
bool is_dir;
|
||||
bool is_normdir;
|
||||
struct mvnode *prev;
|
||||
struct mvnode *next;
|
||||
} MVNODE;
|
||||
|
||||
class AutoLock
|
||||
{
|
||||
public:
|
||||
enum Type {
|
||||
NO_WAIT = 1,
|
||||
ALREADY_LOCKED = 2,
|
||||
NONE = 0
|
||||
};
|
||||
explicit AutoLock(pthread_mutex_t* pmutex, Type type = NONE);
|
||||
bool isLockAcquired() const;
|
||||
~AutoLock();
|
||||
|
||||
private:
|
||||
AutoLock(const AutoLock&);
|
||||
pthread_mutex_t* const auto_mutex;
|
||||
bool is_lock_acquired;
|
||||
};
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Functions
|
||||
//-------------------------------------------------------------------
|
||||
std::string get_realpath(const char *path);
|
||||
|
||||
MVNODE *create_mvnode(const char *old_path, const char *new_path, bool is_dir, bool normdir = false);
|
||||
MVNODE *add_mvnode(MVNODE** head, MVNODE** tail, const char *old_path, const char *new_path, bool is_dir, bool normdir = false);
|
||||
void free_mvnodes(MVNODE *head);
|
||||
|
||||
void init_sysconf_vars();
|
||||
std::string get_username(uid_t uid);
|
||||
int is_uid_include_group(uid_t uid, gid_t gid);
|
||||
@ -119,6 +34,7 @@ std::string mydirname(const char* path);
|
||||
std::string mydirname(const std::string& path);
|
||||
std::string mybasename(const char* path);
|
||||
std::string mybasename(const std::string& path);
|
||||
|
||||
int mkdirp(const std::string& path, mode_t mode);
|
||||
std::string get_exist_directory_path(const std::string& path);
|
||||
bool check_exist_dir_permission(const char* dirpath);
|
||||
@ -126,36 +42,13 @@ bool delete_files_in_dir(const char* dir, bool is_remove_own);
|
||||
|
||||
bool compare_sysname(const char* target);
|
||||
|
||||
time_t get_mtime(const char *s);
|
||||
time_t get_mtime(const headers_t& meta, bool overcheck = true);
|
||||
time_t get_ctime(const headers_t& meta, bool overcheck = true);
|
||||
off_t get_size(const char *s);
|
||||
off_t get_size(const headers_t& meta);
|
||||
mode_t get_mode(const char *s, int base = 0);
|
||||
mode_t get_mode(const headers_t& meta, const char* path = NULL, bool checkdir = false, bool forcedir = false);
|
||||
uid_t get_uid(const char *s);
|
||||
uid_t get_uid(const headers_t& meta);
|
||||
gid_t get_gid(const char *s);
|
||||
gid_t get_gid(const headers_t& meta);
|
||||
blkcnt_t get_blocks(off_t size);
|
||||
time_t cvtIAMExpireStringToTime(const char* s);
|
||||
time_t get_lastmodified(const char* s);
|
||||
time_t get_lastmodified(const headers_t& meta);
|
||||
bool is_need_check_obj_detail(const headers_t& meta);
|
||||
bool merge_headers(headers_t& base, const headers_t& additional, bool add_noexist);
|
||||
bool simple_parse_xml(const char* data, size_t len, const char* key, std::string& value);
|
||||
|
||||
void show_usage(void);
|
||||
void show_help(void);
|
||||
void show_version(void);
|
||||
|
||||
#endif // S3FS_S3FS_UTIL_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 2
|
||||
* c-basic-offset: 2
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=2 ts=2 fdm=marker
|
||||
* vim<600: expandtab sw=2 ts=2
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
|
499
src/s3fs_xml.cpp
Normal file
499
src/s3fs_xml.cpp
Normal file
@ -0,0 +1,499 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Takeshi Nakatani <ggtakec.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "s3fs_xml.h"
|
||||
#include "s3fs_util.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Variables
|
||||
//-------------------------------------------------------------------
|
||||
static const char* c_strErrorObjectName = "FILE or SUBDIR in DIR";
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Functions
|
||||
//-------------------------------------------------------------------
|
||||
static bool GetXmlNsUrl(xmlDocPtr doc, string& nsurl)
|
||||
{
|
||||
static time_t tmLast = 0; // cache for 60 sec.
|
||||
static string strNs;
|
||||
bool result = false;
|
||||
|
||||
if(!doc){
|
||||
return false;
|
||||
}
|
||||
if((tmLast + 60) < time(NULL)){
|
||||
// refresh
|
||||
tmLast = time(NULL);
|
||||
strNs = "";
|
||||
xmlNodePtr pRootNode = xmlDocGetRootElement(doc);
|
||||
if(pRootNode){
|
||||
xmlNsPtr* nslist = xmlGetNsList(doc, pRootNode);
|
||||
if(nslist){
|
||||
if(nslist[0] && nslist[0]->href){
|
||||
strNs = (const char*)(nslist[0]->href);
|
||||
}
|
||||
S3FS_XMLFREE(nslist);
|
||||
}
|
||||
}
|
||||
}
|
||||
if(!strNs.empty()){
|
||||
nsurl = strNs;
|
||||
result = true;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
static xmlChar* get_base_exp(xmlDocPtr doc, const char* exp)
|
||||
{
|
||||
xmlXPathObjectPtr marker_xp;
|
||||
string xmlnsurl;
|
||||
string exp_string;
|
||||
|
||||
if(!doc){
|
||||
return NULL;
|
||||
}
|
||||
xmlXPathContextPtr ctx = xmlXPathNewContext(doc);
|
||||
|
||||
if(!noxmlns && GetXmlNsUrl(doc, xmlnsurl)){
|
||||
xmlXPathRegisterNs(ctx, (xmlChar*)"s3", (xmlChar*)xmlnsurl.c_str());
|
||||
exp_string = "/s3:ListBucketResult/s3:";
|
||||
} else {
|
||||
exp_string = "/ListBucketResult/";
|
||||
}
|
||||
|
||||
exp_string += exp;
|
||||
|
||||
if(NULL == (marker_xp = xmlXPathEvalExpression((xmlChar *)exp_string.c_str(), ctx))){
|
||||
xmlXPathFreeContext(ctx);
|
||||
return NULL;
|
||||
}
|
||||
if(xmlXPathNodeSetIsEmpty(marker_xp->nodesetval)){
|
||||
S3FS_PRN_ERR("marker_xp->nodesetval is empty.");
|
||||
xmlXPathFreeObject(marker_xp);
|
||||
xmlXPathFreeContext(ctx);
|
||||
return NULL;
|
||||
}
|
||||
xmlNodeSetPtr nodes = marker_xp->nodesetval;
|
||||
xmlChar* result = xmlNodeListGetString(doc, nodes->nodeTab[0]->xmlChildrenNode, 1);
|
||||
|
||||
xmlXPathFreeObject(marker_xp);
|
||||
xmlXPathFreeContext(ctx);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static xmlChar* get_prefix(xmlDocPtr doc)
|
||||
{
|
||||
return get_base_exp(doc, "Prefix");
|
||||
}
|
||||
|
||||
xmlChar* get_next_marker(xmlDocPtr doc)
|
||||
{
|
||||
return get_base_exp(doc, "NextMarker");
|
||||
}
|
||||
|
||||
// return: the pointer to object name on allocated memory.
|
||||
// the pointer to "c_strErrorObjectName".(not allocated)
|
||||
// NULL(a case of something error occurred)
|
||||
static char* get_object_name(xmlDocPtr doc, xmlNodePtr node, const char* path)
|
||||
{
|
||||
// Get full path
|
||||
xmlChar* fullpath = xmlNodeListGetString(doc, node, 1);
|
||||
if(!fullpath){
|
||||
S3FS_PRN_ERR("could not get object full path name..");
|
||||
return NULL;
|
||||
}
|
||||
// basepath(path) is as same as fullpath.
|
||||
if(0 == strcmp((char*)fullpath, path)){
|
||||
xmlFree(fullpath);
|
||||
return (char*)c_strErrorObjectName;
|
||||
}
|
||||
|
||||
// Make dir path and filename
|
||||
string strdirpath = mydirname(string((char*)fullpath));
|
||||
string strmybpath = mybasename(string((char*)fullpath));
|
||||
const char* dirpath = strdirpath.c_str();
|
||||
const char* mybname = strmybpath.c_str();
|
||||
const char* basepath= (path && '/' == path[0]) ? &path[1] : path;
|
||||
xmlFree(fullpath);
|
||||
|
||||
if(!mybname || '\0' == mybname[0]){
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// check subdir & file in subdir
|
||||
if(dirpath && 0 < strlen(dirpath)){
|
||||
// case of "/"
|
||||
if(0 == strcmp(mybname, "/") && 0 == strcmp(dirpath, "/")){
|
||||
return (char*)c_strErrorObjectName;
|
||||
}
|
||||
// case of "."
|
||||
if(0 == strcmp(mybname, ".") && 0 == strcmp(dirpath, ".")){
|
||||
return (char*)c_strErrorObjectName;
|
||||
}
|
||||
// case of ".."
|
||||
if(0 == strcmp(mybname, "..") && 0 == strcmp(dirpath, ".")){
|
||||
return (char*)c_strErrorObjectName;
|
||||
}
|
||||
// case of "name"
|
||||
if(0 == strcmp(dirpath, ".")){
|
||||
// OK
|
||||
return strdup(mybname);
|
||||
}else{
|
||||
if(basepath && 0 == strcmp(dirpath, basepath)){
|
||||
// OK
|
||||
return strdup(mybname);
|
||||
}else if(basepath && 0 < strlen(basepath) && '/' == basepath[strlen(basepath) - 1] && 0 == strncmp(dirpath, basepath, strlen(basepath) - 1)){
|
||||
string withdirname;
|
||||
if(strlen(dirpath) > strlen(basepath)){
|
||||
withdirname = &dirpath[strlen(basepath)];
|
||||
}
|
||||
if(0 < withdirname.length() && '/' != withdirname[withdirname.length() - 1]){
|
||||
withdirname += "/";
|
||||
}
|
||||
withdirname += mybname;
|
||||
return strdup(withdirname.c_str());
|
||||
}
|
||||
}
|
||||
}
|
||||
// case of something wrong
|
||||
return (char*)c_strErrorObjectName;
|
||||
}
|
||||
|
||||
static xmlChar* get_exp_value_xml(xmlDocPtr doc, xmlXPathContextPtr ctx, const char* exp_key)
|
||||
{
|
||||
if(!doc || !ctx || !exp_key){
|
||||
return NULL;
|
||||
}
|
||||
|
||||
xmlXPathObjectPtr exp;
|
||||
xmlNodeSetPtr exp_nodes;
|
||||
xmlChar* exp_value;
|
||||
|
||||
// search exp_key tag
|
||||
if(NULL == (exp = xmlXPathEvalExpression((xmlChar*)exp_key, ctx))){
|
||||
S3FS_PRN_ERR("Could not find key(%s).", exp_key);
|
||||
return NULL;
|
||||
}
|
||||
if(xmlXPathNodeSetIsEmpty(exp->nodesetval)){
|
||||
S3FS_PRN_ERR("Key(%s) node is empty.", exp_key);
|
||||
S3FS_XMLXPATHFREEOBJECT(exp);
|
||||
return NULL;
|
||||
}
|
||||
// get exp_key value & set in struct
|
||||
exp_nodes = exp->nodesetval;
|
||||
if(NULL == (exp_value = xmlNodeListGetString(doc, exp_nodes->nodeTab[0]->xmlChildrenNode, 1))){
|
||||
S3FS_PRN_ERR("Key(%s) value is empty.", exp_key);
|
||||
S3FS_XMLXPATHFREEOBJECT(exp);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
S3FS_XMLXPATHFREEOBJECT(exp);
|
||||
return exp_value;
|
||||
}
|
||||
|
||||
bool get_incomp_mpu_list(xmlDocPtr doc, incomp_mpu_list_t& list)
|
||||
{
|
||||
if(!doc){
|
||||
return false;
|
||||
}
|
||||
|
||||
xmlXPathContextPtr ctx = xmlXPathNewContext(doc);;
|
||||
|
||||
string xmlnsurl;
|
||||
string ex_upload = "//";
|
||||
string ex_key;
|
||||
string ex_id;
|
||||
string ex_date;
|
||||
|
||||
if(!noxmlns && GetXmlNsUrl(doc, xmlnsurl)){
|
||||
xmlXPathRegisterNs(ctx, (xmlChar*)"s3", (xmlChar*)xmlnsurl.c_str());
|
||||
ex_upload += "s3:";
|
||||
ex_key += "s3:";
|
||||
ex_id += "s3:";
|
||||
ex_date += "s3:";
|
||||
}
|
||||
ex_upload += "Upload";
|
||||
ex_key += "Key";
|
||||
ex_id += "UploadId";
|
||||
ex_date += "Initiated";
|
||||
|
||||
// get "Upload" Tags
|
||||
xmlXPathObjectPtr upload_xp;
|
||||
if(NULL == (upload_xp = xmlXPathEvalExpression((xmlChar*)ex_upload.c_str(), ctx))){
|
||||
S3FS_PRN_ERR("xmlXPathEvalExpression returns null.");
|
||||
return false;
|
||||
}
|
||||
if(xmlXPathNodeSetIsEmpty(upload_xp->nodesetval)){
|
||||
S3FS_PRN_INFO("upload_xp->nodesetval is empty.");
|
||||
S3FS_XMLXPATHFREEOBJECT(upload_xp);
|
||||
S3FS_XMLXPATHFREECONTEXT(ctx);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Make list
|
||||
int cnt;
|
||||
xmlNodeSetPtr upload_nodes;
|
||||
list.clear();
|
||||
for(cnt = 0, upload_nodes = upload_xp->nodesetval; cnt < upload_nodes->nodeNr; cnt++){
|
||||
ctx->node = upload_nodes->nodeTab[cnt];
|
||||
|
||||
INCOMP_MPU_INFO part;
|
||||
xmlChar* ex_value;
|
||||
|
||||
// search "Key" tag
|
||||
if(NULL == (ex_value = get_exp_value_xml(doc, ctx, ex_key.c_str()))){
|
||||
continue;
|
||||
}
|
||||
if('/' != *((char*)ex_value)){
|
||||
part.key = "/";
|
||||
}else{
|
||||
part.key = "";
|
||||
}
|
||||
part.key += (char*)ex_value;
|
||||
S3FS_XMLFREE(ex_value);
|
||||
|
||||
// search "UploadId" tag
|
||||
if(NULL == (ex_value = get_exp_value_xml(doc, ctx, ex_id.c_str()))){
|
||||
continue;
|
||||
}
|
||||
part.id = (char*)ex_value;
|
||||
S3FS_XMLFREE(ex_value);
|
||||
|
||||
// search "Initiated" tag
|
||||
if(NULL == (ex_value = get_exp_value_xml(doc, ctx, ex_date.c_str()))){
|
||||
continue;
|
||||
}
|
||||
part.date = (char*)ex_value;
|
||||
S3FS_XMLFREE(ex_value);
|
||||
|
||||
list.push_back(part);
|
||||
}
|
||||
|
||||
S3FS_XMLXPATHFREEOBJECT(upload_xp);
|
||||
S3FS_XMLXPATHFREECONTEXT(ctx);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool is_truncated(xmlDocPtr doc)
|
||||
{
|
||||
bool result = false;
|
||||
|
||||
xmlChar* strTruncate = get_base_exp(doc, "IsTruncated");
|
||||
if(!strTruncate){
|
||||
return false;
|
||||
}
|
||||
if(0 == strcasecmp((const char*)strTruncate, "true")){
|
||||
result = true;
|
||||
}
|
||||
xmlFree(strTruncate);
|
||||
return result;
|
||||
}
|
||||
|
||||
int append_objects_from_xml_ex(const char* path, xmlDocPtr doc, xmlXPathContextPtr ctx, const char* ex_contents, const char* ex_key, const char* ex_etag, int isCPrefix, S3ObjList& head)
|
||||
{
|
||||
xmlXPathObjectPtr contents_xp;
|
||||
xmlNodeSetPtr content_nodes;
|
||||
|
||||
if(NULL == (contents_xp = xmlXPathEvalExpression((xmlChar*)ex_contents, ctx))){
|
||||
S3FS_PRN_ERR("xmlXPathEvalExpression returns null.");
|
||||
return -1;
|
||||
}
|
||||
if(xmlXPathNodeSetIsEmpty(contents_xp->nodesetval)){
|
||||
S3FS_PRN_DBG("contents_xp->nodesetval is empty.");
|
||||
S3FS_XMLXPATHFREEOBJECT(contents_xp);
|
||||
return 0;
|
||||
}
|
||||
content_nodes = contents_xp->nodesetval;
|
||||
|
||||
bool is_dir;
|
||||
string stretag;
|
||||
int i;
|
||||
for(i = 0; i < content_nodes->nodeNr; i++){
|
||||
ctx->node = content_nodes->nodeTab[i];
|
||||
|
||||
// object name
|
||||
xmlXPathObjectPtr key;
|
||||
if(NULL == (key = xmlXPathEvalExpression((xmlChar*)ex_key, ctx))){
|
||||
S3FS_PRN_WARN("key is null. but continue.");
|
||||
continue;
|
||||
}
|
||||
if(xmlXPathNodeSetIsEmpty(key->nodesetval)){
|
||||
S3FS_PRN_WARN("node is empty. but continue.");
|
||||
xmlXPathFreeObject(key);
|
||||
continue;
|
||||
}
|
||||
xmlNodeSetPtr key_nodes = key->nodesetval;
|
||||
char* name = get_object_name(doc, key_nodes->nodeTab[0]->xmlChildrenNode, path);
|
||||
|
||||
if(!name){
|
||||
S3FS_PRN_WARN("name is something wrong. but continue.");
|
||||
|
||||
}else if((const char*)name != c_strErrorObjectName){
|
||||
is_dir = isCPrefix ? true : false;
|
||||
stretag = "";
|
||||
|
||||
if(!isCPrefix && ex_etag){
|
||||
// Get ETag
|
||||
xmlXPathObjectPtr ETag;
|
||||
if(NULL != (ETag = xmlXPathEvalExpression((xmlChar*)ex_etag, ctx))){
|
||||
if(xmlXPathNodeSetIsEmpty(ETag->nodesetval)){
|
||||
S3FS_PRN_INFO("ETag->nodesetval is empty.");
|
||||
}else{
|
||||
xmlNodeSetPtr etag_nodes = ETag->nodesetval;
|
||||
xmlChar* petag = xmlNodeListGetString(doc, etag_nodes->nodeTab[0]->xmlChildrenNode, 1);
|
||||
if(petag){
|
||||
stretag = (char*)petag;
|
||||
xmlFree(petag);
|
||||
}
|
||||
}
|
||||
xmlXPathFreeObject(ETag);
|
||||
}
|
||||
}
|
||||
if(!head.insert(name, (0 < stretag.length() ? stretag.c_str() : NULL), is_dir)){
|
||||
S3FS_PRN_ERR("insert_object returns with error.");
|
||||
xmlXPathFreeObject(key);
|
||||
xmlXPathFreeObject(contents_xp);
|
||||
free(name);
|
||||
S3FS_MALLOCTRIM(0);
|
||||
return -1;
|
||||
}
|
||||
free(name);
|
||||
}else{
|
||||
S3FS_PRN_DBG("name is file or subdir in dir. but continue.");
|
||||
}
|
||||
xmlXPathFreeObject(key);
|
||||
}
|
||||
S3FS_XMLXPATHFREEOBJECT(contents_xp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int append_objects_from_xml(const char* path, xmlDocPtr doc, S3ObjList& head)
|
||||
{
|
||||
string xmlnsurl;
|
||||
string ex_contents = "//";
|
||||
string ex_key;
|
||||
string ex_cprefix = "//";
|
||||
string ex_prefix;
|
||||
string ex_etag;
|
||||
|
||||
if(!doc){
|
||||
return -1;
|
||||
}
|
||||
|
||||
// If there is not <Prefix>, use path instead of it.
|
||||
xmlChar* pprefix = get_prefix(doc);
|
||||
string prefix = (pprefix ? (char*)pprefix : path ? path : "");
|
||||
if(pprefix){
|
||||
xmlFree(pprefix);
|
||||
}
|
||||
|
||||
xmlXPathContextPtr ctx = xmlXPathNewContext(doc);
|
||||
|
||||
if(!noxmlns && GetXmlNsUrl(doc, xmlnsurl)){
|
||||
xmlXPathRegisterNs(ctx, (xmlChar*)"s3", (xmlChar*)xmlnsurl.c_str());
|
||||
ex_contents+= "s3:";
|
||||
ex_key += "s3:";
|
||||
ex_cprefix += "s3:";
|
||||
ex_prefix += "s3:";
|
||||
ex_etag += "s3:";
|
||||
}
|
||||
ex_contents+= "Contents";
|
||||
ex_key += "Key";
|
||||
ex_cprefix += "CommonPrefixes";
|
||||
ex_prefix += "Prefix";
|
||||
ex_etag += "ETag";
|
||||
|
||||
if(-1 == append_objects_from_xml_ex(prefix.c_str(), doc, ctx, ex_contents.c_str(), ex_key.c_str(), ex_etag.c_str(), 0, head) ||
|
||||
-1 == append_objects_from_xml_ex(prefix.c_str(), doc, ctx, ex_cprefix.c_str(), ex_prefix.c_str(), NULL, 1, head) )
|
||||
{
|
||||
S3FS_PRN_ERR("append_objects_from_xml_ex returns with error.");
|
||||
S3FS_XMLXPATHFREECONTEXT(ctx);
|
||||
return -1;
|
||||
}
|
||||
S3FS_XMLXPATHFREECONTEXT(ctx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility functions
|
||||
//-------------------------------------------------------------------
|
||||
bool simple_parse_xml(const char* data, size_t len, const char* key, std::string& value)
|
||||
{
|
||||
bool result = false;
|
||||
|
||||
if(!data || !key){
|
||||
return false;
|
||||
}
|
||||
value.clear();
|
||||
|
||||
xmlDocPtr doc;
|
||||
if(NULL == (doc = xmlReadMemory(data, len, "", NULL, 0))){
|
||||
return false;
|
||||
}
|
||||
|
||||
if(NULL == doc->children){
|
||||
S3FS_XMLFREEDOC(doc);
|
||||
return false;
|
||||
}
|
||||
for(xmlNodePtr cur_node = doc->children->children; NULL != cur_node; cur_node = cur_node->next){
|
||||
// For DEBUG
|
||||
// string cur_node_name(reinterpret_cast<const char *>(cur_node->name));
|
||||
// printf("cur_node_name: %s\n", cur_node_name.c_str());
|
||||
|
||||
if(XML_ELEMENT_NODE == cur_node->type){
|
||||
string elementName = reinterpret_cast<const char*>(cur_node->name);
|
||||
// For DEBUG
|
||||
// printf("elementName: %s\n", elementName.c_str());
|
||||
|
||||
if(cur_node->children){
|
||||
if(XML_TEXT_NODE == cur_node->children->type){
|
||||
if(elementName == key) {
|
||||
value = reinterpret_cast<const char *>(cur_node->children->content);
|
||||
result = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
S3FS_XMLFREEDOC(doc);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
53
src/s3fs_xml.h
Normal file
53
src/s3fs_xml.h
Normal file
@ -0,0 +1,53 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_S3FS_XML_H_
|
||||
#define S3FS_S3FS_XML_H_
|
||||
|
||||
#include <libxml/xpath.h>
|
||||
#include <libxml/xpathInternals.h>
|
||||
#include <libxml/tree.h>
|
||||
|
||||
#include <string>
|
||||
|
||||
#include "s3objlist.h"
|
||||
#include "mpu_util.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Functions
|
||||
//-------------------------------------------------------------------
|
||||
bool is_truncated(xmlDocPtr doc);
|
||||
int append_objects_from_xml_ex(const char* path, xmlDocPtr doc, xmlXPathContextPtr ctx, const char* ex_contents, const char* ex_key, const char* ex_etag, int isCPrefix, S3ObjList& head);
|
||||
int append_objects_from_xml(const char* path, xmlDocPtr doc, S3ObjList& head);
|
||||
xmlChar* get_next_marker(xmlDocPtr doc);
|
||||
bool get_incomp_mpu_list(xmlDocPtr doc, incomp_mpu_list_t& list);
|
||||
|
||||
bool simple_parse_xml(const char* data, size_t len, const char* key, std::string& value);
|
||||
|
||||
#endif // S3FS_S3FS_XML_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
286
src/s3objlist.cpp
Normal file
286
src/s3objlist.cpp
Normal file
@ -0,0 +1,286 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Takeshi Nakatani <ggtakec.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "s3objlist.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Class S3ObjList
|
||||
//-------------------------------------------------------------------
|
||||
// New class S3ObjList is base on old s3_object struct.
|
||||
// This class is for S3 compatible clients.
|
||||
//
|
||||
// If name is terminated by "/", it is forced dir type.
|
||||
// If name is terminated by "_$folder$", it is forced dir type.
|
||||
// If is_dir is true and name is not terminated by "/", the name is added "/".
|
||||
//
|
||||
bool S3ObjList::insert(const char* name, const char* etag, bool is_dir)
|
||||
{
|
||||
if(!name || '\0' == name[0]){
|
||||
return false;
|
||||
}
|
||||
|
||||
s3obj_t::iterator iter;
|
||||
string newname;
|
||||
string orgname = name;
|
||||
|
||||
// Normalization
|
||||
string::size_type pos = orgname.find("_$folder$");
|
||||
if(string::npos != pos){
|
||||
newname = orgname.substr(0, pos);
|
||||
is_dir = true;
|
||||
}else{
|
||||
newname = orgname;
|
||||
}
|
||||
if(is_dir){
|
||||
if('/' != newname[newname.length() - 1]){
|
||||
newname += "/";
|
||||
}
|
||||
}else{
|
||||
if('/' == newname[newname.length() - 1]){
|
||||
is_dir = true;
|
||||
}
|
||||
}
|
||||
|
||||
// Check derived name object.
|
||||
if(is_dir){
|
||||
string chkname = newname.substr(0, newname.length() - 1);
|
||||
if(objects.end() != (iter = objects.find(chkname))){
|
||||
// found "dir" object --> remove it.
|
||||
objects.erase(iter);
|
||||
}
|
||||
}else{
|
||||
string chkname = newname + "/";
|
||||
if(objects.end() != (iter = objects.find(chkname))){
|
||||
// found "dir/" object --> not add new object.
|
||||
// and add normalization
|
||||
return insert_normalized(orgname.c_str(), chkname.c_str(), true);
|
||||
}
|
||||
}
|
||||
|
||||
// Add object
|
||||
if(objects.end() != (iter = objects.find(newname))){
|
||||
// Found same object --> update information.
|
||||
(*iter).second.normalname.erase();
|
||||
(*iter).second.orgname = orgname;
|
||||
(*iter).second.is_dir = is_dir;
|
||||
if(etag){
|
||||
(*iter).second.etag = string(etag); // over write
|
||||
}
|
||||
}else{
|
||||
// add new object
|
||||
s3obj_entry newobject;
|
||||
newobject.orgname = orgname;
|
||||
newobject.is_dir = is_dir;
|
||||
if(etag){
|
||||
newobject.etag = etag;
|
||||
}
|
||||
objects[newname] = newobject;
|
||||
}
|
||||
|
||||
// add normalization
|
||||
return insert_normalized(orgname.c_str(), newname.c_str(), is_dir);
|
||||
}
|
||||
|
||||
bool S3ObjList::insert_normalized(const char* name, const char* normalized, bool is_dir)
|
||||
{
|
||||
if(!name || '\0' == name[0] || !normalized || '\0' == normalized[0]){
|
||||
return false;
|
||||
}
|
||||
if(0 == strcmp(name, normalized)){
|
||||
return true;
|
||||
}
|
||||
|
||||
s3obj_t::iterator iter;
|
||||
if(objects.end() != (iter = objects.find(name))){
|
||||
// found name --> over write
|
||||
iter->second.orgname.erase();
|
||||
iter->second.etag.erase();
|
||||
iter->second.normalname = normalized;
|
||||
iter->second.is_dir = is_dir;
|
||||
}else{
|
||||
// not found --> add new object
|
||||
s3obj_entry newobject;
|
||||
newobject.normalname = normalized;
|
||||
newobject.is_dir = is_dir;
|
||||
objects[name] = newobject;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
const s3obj_entry* S3ObjList::GetS3Obj(const char* name) const
|
||||
{
|
||||
s3obj_t::const_iterator iter;
|
||||
|
||||
if(!name || '\0' == name[0]){
|
||||
return NULL;
|
||||
}
|
||||
if(objects.end() == (iter = objects.find(name))){
|
||||
return NULL;
|
||||
}
|
||||
return &((*iter).second);
|
||||
}
|
||||
|
||||
string S3ObjList::GetOrgName(const char* name) const
|
||||
{
|
||||
const s3obj_entry* ps3obj;
|
||||
|
||||
if(!name || '\0' == name[0]){
|
||||
return string("");
|
||||
}
|
||||
if(NULL == (ps3obj = GetS3Obj(name))){
|
||||
return string("");
|
||||
}
|
||||
return ps3obj->orgname;
|
||||
}
|
||||
|
||||
string S3ObjList::GetNormalizedName(const char* name) const
|
||||
{
|
||||
const s3obj_entry* ps3obj;
|
||||
|
||||
if(!name || '\0' == name[0]){
|
||||
return string("");
|
||||
}
|
||||
if(NULL == (ps3obj = GetS3Obj(name))){
|
||||
return string("");
|
||||
}
|
||||
if(0 == (ps3obj->normalname).length()){
|
||||
return string(name);
|
||||
}
|
||||
return ps3obj->normalname;
|
||||
}
|
||||
|
||||
string S3ObjList::GetETag(const char* name) const
|
||||
{
|
||||
const s3obj_entry* ps3obj;
|
||||
|
||||
if(!name || '\0' == name[0]){
|
||||
return string("");
|
||||
}
|
||||
if(NULL == (ps3obj = GetS3Obj(name))){
|
||||
return string("");
|
||||
}
|
||||
return ps3obj->etag;
|
||||
}
|
||||
|
||||
bool S3ObjList::IsDir(const char* name) const
|
||||
{
|
||||
const s3obj_entry* ps3obj;
|
||||
|
||||
if(NULL == (ps3obj = GetS3Obj(name))){
|
||||
return false;
|
||||
}
|
||||
return ps3obj->is_dir;
|
||||
}
|
||||
|
||||
bool S3ObjList::GetLastName(std::string& lastname) const
|
||||
{
|
||||
bool result = false;
|
||||
lastname = "";
|
||||
for(s3obj_t::const_iterator iter = objects.begin(); iter != objects.end(); ++iter){
|
||||
if((*iter).second.orgname.length()){
|
||||
if(0 > strcmp(lastname.c_str(), (*iter).second.orgname.c_str())){
|
||||
lastname = (*iter).second.orgname;
|
||||
result = true;
|
||||
}
|
||||
}else{
|
||||
if(0 > strcmp(lastname.c_str(), (*iter).second.normalname.c_str())){
|
||||
lastname = (*iter).second.normalname;
|
||||
result = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
bool S3ObjList::GetNameList(s3obj_list_t& list, bool OnlyNormalized, bool CutSlash) const
|
||||
{
|
||||
s3obj_t::const_iterator iter;
|
||||
|
||||
for(iter = objects.begin(); objects.end() != iter; ++iter){
|
||||
if(OnlyNormalized && 0 != (*iter).second.normalname.length()){
|
||||
continue;
|
||||
}
|
||||
string name = (*iter).first;
|
||||
if(CutSlash && 1 < name.length() && '/' == name[name.length() - 1]){
|
||||
// only "/" string is skipped this.
|
||||
name = name.substr(0, name.length() - 1);
|
||||
}
|
||||
list.push_back(name);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
typedef std::map<std::string, bool> s3obj_h_t;
|
||||
|
||||
bool S3ObjList::MakeHierarchizedList(s3obj_list_t& list, bool haveSlash)
|
||||
{
|
||||
s3obj_h_t h_map;
|
||||
s3obj_h_t::iterator hiter;
|
||||
s3obj_list_t::const_iterator liter;
|
||||
|
||||
for(liter = list.begin(); list.end() != liter; ++liter){
|
||||
string strtmp = (*liter);
|
||||
if(1 < strtmp.length() && '/' == strtmp[strtmp.length() - 1]){
|
||||
strtmp = strtmp.substr(0, strtmp.length() - 1);
|
||||
}
|
||||
h_map[strtmp] = true;
|
||||
|
||||
// check hierarchized directory
|
||||
for(string::size_type pos = strtmp.find_last_of('/'); string::npos != pos; pos = strtmp.find_last_of('/')){
|
||||
strtmp = strtmp.substr(0, pos);
|
||||
if(0 == strtmp.length() || "/" == strtmp){
|
||||
break;
|
||||
}
|
||||
if(h_map.end() == h_map.find(strtmp)){
|
||||
// not found
|
||||
h_map[strtmp] = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// check map and add lost hierarchized directory.
|
||||
for(hiter = h_map.begin(); hiter != h_map.end(); ++hiter){
|
||||
if(false == (*hiter).second){
|
||||
// add hierarchized directory.
|
||||
string strtmp = (*hiter).first;
|
||||
if(haveSlash){
|
||||
strtmp += "/";
|
||||
}
|
||||
list.push_back(strtmp);
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
79
src/s3objlist.h
Normal file
79
src/s3objlist.h
Normal file
@ -0,0 +1,79 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_S3OBJLIST_H_
|
||||
#define S3FS_S3OBJLIST_H_
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Structure / Typedef
|
||||
//-------------------------------------------------------------------
|
||||
struct s3obj_entry{
|
||||
std::string normalname; // normalized name: if empty, object is normalized name.
|
||||
std::string orgname; // original name: if empty, object is original name.
|
||||
std::string etag;
|
||||
bool is_dir;
|
||||
|
||||
s3obj_entry() : is_dir(false) {}
|
||||
};
|
||||
|
||||
typedef std::map<std::string, struct s3obj_entry> s3obj_t;
|
||||
typedef std::list<std::string> s3obj_list_t;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Class S3ObjList
|
||||
//-------------------------------------------------------------------
|
||||
class S3ObjList
|
||||
{
|
||||
private:
|
||||
s3obj_t objects;
|
||||
|
||||
private:
|
||||
bool insert_normalized(const char* name, const char* normalized, bool is_dir);
|
||||
const s3obj_entry* GetS3Obj(const char* name) const;
|
||||
|
||||
s3obj_t::const_iterator begin(void) const { return objects.begin(); }
|
||||
s3obj_t::const_iterator end(void) const { return objects.end(); }
|
||||
|
||||
public:
|
||||
S3ObjList() {}
|
||||
~S3ObjList() {}
|
||||
|
||||
bool IsEmpty(void) const { return objects.empty(); }
|
||||
bool insert(const char* name, const char* etag = NULL, bool is_dir = false);
|
||||
std::string GetOrgName(const char* name) const;
|
||||
std::string GetNormalizedName(const char* name) const;
|
||||
std::string GetETag(const char* name) const;
|
||||
bool IsDir(const char* name) const;
|
||||
bool GetNameList(s3obj_list_t& list, bool OnlyNormalized = true, bool CutSlash = true) const;
|
||||
bool GetLastName(std::string& lastname) const;
|
||||
|
||||
static bool MakeHierarchizedList(s3obj_list_t& list, bool haveSlash);
|
||||
};
|
||||
|
||||
#endif // S3FS_S3OBJLIST_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
@ -20,33 +20,16 @@
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <cerrno>
|
||||
#include <syslog.h>
|
||||
#include <pthread.h>
|
||||
#include <curl/curl.h>
|
||||
#include <csignal>
|
||||
|
||||
#include <algorithm>
|
||||
#include <map>
|
||||
#include <string>
|
||||
#include <list>
|
||||
#include <vector>
|
||||
#include <pthread.h>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "sighandlers.h"
|
||||
#include "curl.h"
|
||||
#include "fdcache.h"
|
||||
#include "psemaphore.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Global variables
|
||||
//-------------------------------------------------------------------
|
||||
s3fs_log_level debug_level = S3FS_LOG_CRIT;
|
||||
const char* s3fs_log_nest[S3FS_LOG_NEST_MAX] = {"", " ", " ", " "};
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Class S3fsSignals
|
||||
//-------------------------------------------------------------------
|
||||
@ -58,133 +41,133 @@ bool S3fsSignals::enableUsr1 = false;
|
||||
//-------------------------------------------------------------------
|
||||
bool S3fsSignals::Initialize()
|
||||
{
|
||||
if(!S3fsSignals::pSingleton){
|
||||
S3fsSignals::pSingleton = new S3fsSignals;
|
||||
}
|
||||
return true;
|
||||
if(!S3fsSignals::pSingleton){
|
||||
S3fsSignals::pSingleton = new S3fsSignals;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool S3fsSignals::Destroy()
|
||||
{
|
||||
if(S3fsSignals::pSingleton){
|
||||
delete S3fsSignals::pSingleton;
|
||||
}
|
||||
return true;
|
||||
if(S3fsSignals::pSingleton){
|
||||
delete S3fsSignals::pSingleton;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void S3fsSignals::HandlerUSR1(int sig)
|
||||
{
|
||||
if(SIGUSR1 != sig){
|
||||
S3FS_PRN_ERR("The handler for SIGUSR1 received signal(%d)", sig);
|
||||
return;
|
||||
}
|
||||
if(SIGUSR1 != sig){
|
||||
S3FS_PRN_ERR("The handler for SIGUSR1 received signal(%d)", sig);
|
||||
return;
|
||||
}
|
||||
|
||||
S3fsSignals* pSigobj = S3fsSignals::get();
|
||||
if(!pSigobj){
|
||||
S3FS_PRN_ERR("S3fsSignals object is not initialized.");
|
||||
return;
|
||||
}
|
||||
S3fsSignals* pSigobj = S3fsSignals::get();
|
||||
if(!pSigobj){
|
||||
S3FS_PRN_ERR("S3fsSignals object is not initialized.");
|
||||
return;
|
||||
}
|
||||
|
||||
if(!pSigobj->WakeupUsr1Thread()){
|
||||
S3FS_PRN_ERR("Failed to wakeup the thread for SIGUSR1.");
|
||||
return;
|
||||
}
|
||||
if(!pSigobj->WakeupUsr1Thread()){
|
||||
S3FS_PRN_ERR("Failed to wakeup the thread for SIGUSR1.");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
bool S3fsSignals::SetUsr1Handler(const char* path)
|
||||
{
|
||||
if(!FdManager::HaveLseekHole()){
|
||||
S3FS_PRN_ERR("Could not set SIGUSR1 for checking cache, because this system does not support SEEK_DATA/SEEK_HOLE in lseek function.");
|
||||
return false;
|
||||
}
|
||||
if(!FdManager::HaveLseekHole()){
|
||||
S3FS_PRN_ERR("Could not set SIGUSR1 for checking cache, because this system does not support SEEK_DATA/SEEK_HOLE in lseek function.");
|
||||
return false;
|
||||
}
|
||||
|
||||
// set output file
|
||||
if(!FdManager::SetCacheCheckOutput(path)){
|
||||
S3FS_PRN_ERR("Could not set output file(%s) for checking cache.", path ? path : "null(stdout)");
|
||||
return false;
|
||||
}
|
||||
// set output file
|
||||
if(!FdManager::SetCacheCheckOutput(path)){
|
||||
S3FS_PRN_ERR("Could not set output file(%s) for checking cache.", path ? path : "null(stdout)");
|
||||
return false;
|
||||
}
|
||||
|
||||
S3fsSignals::enableUsr1 = true;
|
||||
S3fsSignals::enableUsr1 = true;
|
||||
|
||||
return true;
|
||||
return true;
|
||||
}
|
||||
|
||||
void* S3fsSignals::CheckCacheWorker(void* arg)
|
||||
{
|
||||
Semaphore* pSem = static_cast<Semaphore*>(arg);
|
||||
if(!pSem){
|
||||
pthread_exit(NULL);
|
||||
}
|
||||
if(!S3fsSignals::enableUsr1){
|
||||
pthread_exit(NULL);
|
||||
}
|
||||
|
||||
// wait and loop
|
||||
while(S3fsSignals::enableUsr1){
|
||||
// wait
|
||||
pSem->wait();
|
||||
Semaphore* pSem = static_cast<Semaphore*>(arg);
|
||||
if(!pSem){
|
||||
pthread_exit(NULL);
|
||||
}
|
||||
if(!S3fsSignals::enableUsr1){
|
||||
break; // assap
|
||||
pthread_exit(NULL);
|
||||
}
|
||||
|
||||
// check all cache
|
||||
if(!FdManager::get()->CheckAllCache()){
|
||||
S3FS_PRN_ERR("Processing failed due to some problem.");
|
||||
}
|
||||
// wait and loop
|
||||
while(S3fsSignals::enableUsr1){
|
||||
// wait
|
||||
pSem->wait();
|
||||
if(!S3fsSignals::enableUsr1){
|
||||
break; // assap
|
||||
}
|
||||
|
||||
// do not allow request queuing
|
||||
for(int value = pSem->get_value(); 0 < value; value = pSem->get_value()){
|
||||
pSem->wait();
|
||||
// check all cache
|
||||
if(!FdManager::get()->CheckAllCache()){
|
||||
S3FS_PRN_ERR("Processing failed due to some problem.");
|
||||
}
|
||||
|
||||
// do not allow request queuing
|
||||
for(int value = pSem->get_value(); 0 < value; value = pSem->get_value()){
|
||||
pSem->wait();
|
||||
}
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void S3fsSignals::HandlerUSR2(int sig)
|
||||
{
|
||||
if(SIGUSR2 == sig){
|
||||
S3fsSignals::BumpupLogLevel();
|
||||
}else{
|
||||
S3FS_PRN_ERR("The handler for SIGUSR2 received signal(%d)", sig);
|
||||
}
|
||||
if(SIGUSR2 == sig){
|
||||
S3fsSignals::BumpupLogLevel();
|
||||
}else{
|
||||
S3FS_PRN_ERR("The handler for SIGUSR2 received signal(%d)", sig);
|
||||
}
|
||||
}
|
||||
|
||||
bool S3fsSignals::InitUsr2Handler()
|
||||
{
|
||||
struct sigaction sa;
|
||||
struct sigaction sa;
|
||||
|
||||
memset(&sa, 0, sizeof(struct sigaction));
|
||||
sa.sa_handler = S3fsSignals::HandlerUSR2;
|
||||
sa.sa_flags = SA_RESTART;
|
||||
if(0 != sigaction(SIGUSR2, &sa, NULL)){
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
memset(&sa, 0, sizeof(struct sigaction));
|
||||
sa.sa_handler = S3fsSignals::HandlerUSR2;
|
||||
sa.sa_flags = SA_RESTART;
|
||||
if(0 != sigaction(SIGUSR2, &sa, NULL)){
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
s3fs_log_level S3fsSignals::SetLogLevel(s3fs_log_level level)
|
||||
{
|
||||
if(level == debug_level){
|
||||
return debug_level;
|
||||
}
|
||||
s3fs_log_level old = debug_level;
|
||||
debug_level = level;
|
||||
setlogmask(LOG_UPTO(S3FS_LOG_LEVEL_TO_SYSLOG(debug_level)));
|
||||
S3FS_PRN_CRIT("change debug level from %sto %s", S3FS_LOG_LEVEL_STRING(old), S3FS_LOG_LEVEL_STRING(debug_level));
|
||||
return old;
|
||||
if(level == debug_level){
|
||||
return debug_level;
|
||||
}
|
||||
s3fs_log_level old = debug_level;
|
||||
debug_level = level;
|
||||
setlogmask(LOG_UPTO(S3FS_LOG_LEVEL_TO_SYSLOG(debug_level)));
|
||||
S3FS_PRN_CRIT("change debug level from %sto %s", S3FS_LOG_LEVEL_STRING(old), S3FS_LOG_LEVEL_STRING(debug_level));
|
||||
return old;
|
||||
}
|
||||
|
||||
s3fs_log_level S3fsSignals::BumpupLogLevel()
|
||||
{
|
||||
s3fs_log_level old = debug_level;
|
||||
debug_level = ( S3FS_LOG_CRIT == debug_level ? S3FS_LOG_ERR :
|
||||
S3FS_LOG_ERR == debug_level ? S3FS_LOG_WARN :
|
||||
S3FS_LOG_WARN == debug_level ? S3FS_LOG_INFO :
|
||||
S3FS_LOG_INFO == debug_level ? S3FS_LOG_DBG :
|
||||
S3FS_LOG_CRIT );
|
||||
setlogmask(LOG_UPTO(S3FS_LOG_LEVEL_TO_SYSLOG(debug_level)));
|
||||
S3FS_PRN_CRIT("change debug level from %sto %s", S3FS_LOG_LEVEL_STRING(old), S3FS_LOG_LEVEL_STRING(debug_level));
|
||||
return old;
|
||||
s3fs_log_level old = debug_level;
|
||||
debug_level = ( S3FS_LOG_CRIT == debug_level ? S3FS_LOG_ERR :
|
||||
S3FS_LOG_ERR == debug_level ? S3FS_LOG_WARN :
|
||||
S3FS_LOG_WARN == debug_level ? S3FS_LOG_INFO :
|
||||
S3FS_LOG_INFO == debug_level ? S3FS_LOG_DBG :
|
||||
S3FS_LOG_CRIT );
|
||||
setlogmask(LOG_UPTO(S3FS_LOG_LEVEL_TO_SYSLOG(debug_level)));
|
||||
S3FS_PRN_CRIT("change debug level from %sto %s", S3FS_LOG_LEVEL_STRING(old), S3FS_LOG_LEVEL_STRING(debug_level));
|
||||
return old;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
@ -192,100 +175,100 @@ s3fs_log_level S3fsSignals::BumpupLogLevel()
|
||||
//-------------------------------------------------------------------
|
||||
S3fsSignals::S3fsSignals() : pThreadUsr1(NULL), pSemUsr1(NULL)
|
||||
{
|
||||
if(S3fsSignals::enableUsr1){
|
||||
if(!InitUsr1Handler()){
|
||||
S3FS_PRN_ERR("failed creating thread for SIGUSR1 handler, but continue...");
|
||||
if(S3fsSignals::enableUsr1){
|
||||
if(!InitUsr1Handler()){
|
||||
S3FS_PRN_ERR("failed creating thread for SIGUSR1 handler, but continue...");
|
||||
}
|
||||
}
|
||||
if(!S3fsSignals::InitUsr2Handler()){
|
||||
S3FS_PRN_ERR("failed to initialize SIGUSR2 handler for bumping log level, but continue...");
|
||||
}
|
||||
}
|
||||
if(!S3fsSignals::InitUsr2Handler()){
|
||||
S3FS_PRN_ERR("failed to initialize SIGUSR2 handler for bumping log level, but continue...");
|
||||
}
|
||||
}
|
||||
|
||||
S3fsSignals::~S3fsSignals()
|
||||
{
|
||||
if(S3fsSignals::enableUsr1){
|
||||
if(!DestroyUsr1Handler()){
|
||||
S3FS_PRN_ERR("failed stopping thread for SIGUSR1 handler, but continue...");
|
||||
if(S3fsSignals::enableUsr1){
|
||||
if(!DestroyUsr1Handler()){
|
||||
S3FS_PRN_ERR("failed stopping thread for SIGUSR1 handler, but continue...");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool S3fsSignals::InitUsr1Handler()
|
||||
{
|
||||
if(pThreadUsr1 || pSemUsr1){
|
||||
S3FS_PRN_ERR("Already run thread for SIGUSR1");
|
||||
return false;
|
||||
}
|
||||
if(pThreadUsr1 || pSemUsr1){
|
||||
S3FS_PRN_ERR("Already run thread for SIGUSR1");
|
||||
return false;
|
||||
}
|
||||
|
||||
// create thread
|
||||
int result;
|
||||
pSemUsr1 = new Semaphore(0);
|
||||
pThreadUsr1 = new pthread_t;
|
||||
if(0 != (result = pthread_create(pThreadUsr1, NULL, S3fsSignals::CheckCacheWorker, static_cast<void*>(pSemUsr1)))){
|
||||
S3FS_PRN_ERR("Could not create thread for SIGUSR1 by %d", result);
|
||||
delete pSemUsr1;
|
||||
delete pThreadUsr1;
|
||||
pSemUsr1 = NULL;
|
||||
pThreadUsr1 = NULL;
|
||||
return false;
|
||||
}
|
||||
// create thread
|
||||
int result;
|
||||
pSemUsr1 = new Semaphore(0);
|
||||
pThreadUsr1 = new pthread_t;
|
||||
if(0 != (result = pthread_create(pThreadUsr1, NULL, S3fsSignals::CheckCacheWorker, static_cast<void*>(pSemUsr1)))){
|
||||
S3FS_PRN_ERR("Could not create thread for SIGUSR1 by %d", result);
|
||||
delete pSemUsr1;
|
||||
delete pThreadUsr1;
|
||||
pSemUsr1 = NULL;
|
||||
pThreadUsr1 = NULL;
|
||||
return false;
|
||||
}
|
||||
|
||||
// set handler
|
||||
struct sigaction sa;
|
||||
memset(&sa, 0, sizeof(struct sigaction));
|
||||
sa.sa_handler = S3fsSignals::HandlerUSR1;
|
||||
sa.sa_flags = SA_RESTART;
|
||||
if(0 != sigaction(SIGUSR1, &sa, NULL)){
|
||||
S3FS_PRN_ERR("Could not set signal handler for SIGUSR1");
|
||||
DestroyUsr1Handler();
|
||||
return false;
|
||||
}
|
||||
// set handler
|
||||
struct sigaction sa;
|
||||
memset(&sa, 0, sizeof(struct sigaction));
|
||||
sa.sa_handler = S3fsSignals::HandlerUSR1;
|
||||
sa.sa_flags = SA_RESTART;
|
||||
if(0 != sigaction(SIGUSR1, &sa, NULL)){
|
||||
S3FS_PRN_ERR("Could not set signal handler for SIGUSR1");
|
||||
DestroyUsr1Handler();
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool S3fsSignals::DestroyUsr1Handler()
|
||||
{
|
||||
if(!pThreadUsr1 || !pSemUsr1){
|
||||
return false;
|
||||
}
|
||||
// for thread exit
|
||||
S3fsSignals::enableUsr1 = false;
|
||||
if(!pThreadUsr1 || !pSemUsr1){
|
||||
return false;
|
||||
}
|
||||
// for thread exit
|
||||
S3fsSignals::enableUsr1 = false;
|
||||
|
||||
// wakeup thread
|
||||
pSemUsr1->post();
|
||||
// wakeup thread
|
||||
pSemUsr1->post();
|
||||
|
||||
// wait for thread exiting
|
||||
void* retval = NULL;
|
||||
int result;
|
||||
if(0 != (result = pthread_join(*pThreadUsr1, &retval))){
|
||||
S3FS_PRN_ERR("Could not stop thread for SIGUSR1 by %d", result);
|
||||
return false;
|
||||
}
|
||||
delete pSemUsr1;
|
||||
delete pThreadUsr1;
|
||||
pSemUsr1 = NULL;
|
||||
pThreadUsr1 = NULL;
|
||||
// wait for thread exiting
|
||||
void* retval = NULL;
|
||||
int result;
|
||||
if(0 != (result = pthread_join(*pThreadUsr1, &retval))){
|
||||
S3FS_PRN_ERR("Could not stop thread for SIGUSR1 by %d", result);
|
||||
return false;
|
||||
}
|
||||
delete pSemUsr1;
|
||||
delete pThreadUsr1;
|
||||
pSemUsr1 = NULL;
|
||||
pThreadUsr1 = NULL;
|
||||
|
||||
return true;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool S3fsSignals::WakeupUsr1Thread()
|
||||
{
|
||||
if(!pThreadUsr1 || !pSemUsr1){
|
||||
S3FS_PRN_ERR("The thread for SIGUSR1 is not setup.");
|
||||
return false;
|
||||
}
|
||||
pSemUsr1->post();
|
||||
return true;
|
||||
if(!pThreadUsr1 || !pSemUsr1){
|
||||
S3FS_PRN_ERR("The thread for SIGUSR1 is not setup.");
|
||||
return false;
|
||||
}
|
||||
pSemUsr1->post();
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 2
|
||||
* c-basic-offset: 2
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=2 ts=2 fdm=marker
|
||||
* vim<600: expandtab sw=2 ts=2
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
|
@ -28,46 +28,46 @@
|
||||
//----------------------------------------------
|
||||
class S3fsSignals
|
||||
{
|
||||
private:
|
||||
static S3fsSignals* pSingleton;
|
||||
static bool enableUsr1;
|
||||
private:
|
||||
static S3fsSignals* pSingleton;
|
||||
static bool enableUsr1;
|
||||
|
||||
pthread_t* pThreadUsr1;
|
||||
Semaphore* pSemUsr1;
|
||||
pthread_t* pThreadUsr1;
|
||||
Semaphore* pSemUsr1;
|
||||
|
||||
protected:
|
||||
static S3fsSignals* get(void) { return pSingleton; }
|
||||
protected:
|
||||
static S3fsSignals* get(void) { return pSingleton; }
|
||||
|
||||
static void HandlerUSR1(int sig);
|
||||
static void* CheckCacheWorker(void* arg);
|
||||
static void HandlerUSR1(int sig);
|
||||
static void* CheckCacheWorker(void* arg);
|
||||
|
||||
static void HandlerUSR2(int sig);
|
||||
static bool InitUsr2Handler(void);
|
||||
static void HandlerUSR2(int sig);
|
||||
static bool InitUsr2Handler(void);
|
||||
|
||||
S3fsSignals();
|
||||
~S3fsSignals();
|
||||
S3fsSignals();
|
||||
~S3fsSignals();
|
||||
|
||||
bool InitUsr1Handler(void);
|
||||
bool DestroyUsr1Handler(void);
|
||||
bool WakeupUsr1Thread(void);
|
||||
bool InitUsr1Handler(void);
|
||||
bool DestroyUsr1Handler(void);
|
||||
bool WakeupUsr1Thread(void);
|
||||
|
||||
public:
|
||||
static bool Initialize(void);
|
||||
static bool Destroy(void);
|
||||
public:
|
||||
static bool Initialize(void);
|
||||
static bool Destroy(void);
|
||||
|
||||
static bool SetUsr1Handler(const char* path);
|
||||
static bool SetUsr1Handler(const char* path);
|
||||
|
||||
static s3fs_log_level SetLogLevel(s3fs_log_level level);
|
||||
static s3fs_log_level BumpupLogLevel(void);
|
||||
static s3fs_log_level SetLogLevel(s3fs_log_level level);
|
||||
static s3fs_log_level BumpupLogLevel(void);
|
||||
};
|
||||
|
||||
#endif // S3FS_SIGHANDLERS_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 2
|
||||
* c-basic-offset: 2
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=2 ts=2 fdm=marker
|
||||
* vim<600: expandtab sw=2 ts=2
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
|
@ -17,28 +17,35 @@
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
#include <cerrno>
|
||||
#include <climits>
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <syslog.h>
|
||||
#include <ctime>
|
||||
#include <cerrno>
|
||||
#include <climits>
|
||||
|
||||
#include <stdexcept>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
#include <map>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "string_util.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
template <class T> std::string str(T value) {
|
||||
std::ostringstream s;
|
||||
s << value;
|
||||
return s.str();
|
||||
//-------------------------------------------------------------------
|
||||
// Gloval variables
|
||||
//-------------------------------------------------------------------
|
||||
const std::string SPACES = " \t\r\n";
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Templates
|
||||
//-------------------------------------------------------------------
|
||||
template <class T> std::string str(T value)
|
||||
{
|
||||
std::ostringstream s;
|
||||
s << value;
|
||||
return s.str();
|
||||
}
|
||||
|
||||
template std::string str(short value);
|
||||
@ -50,22 +57,25 @@ template std::string str(unsigned long value);
|
||||
template std::string str(long long value);
|
||||
template std::string str(unsigned long long value);
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Functions
|
||||
//-------------------------------------------------------------------
|
||||
static const char hexAlphabet[] = "0123456789ABCDEF";
|
||||
|
||||
// replacement for C++11 std::stoll
|
||||
off_t s3fs_strtoofft(const char* str, int base)
|
||||
{
|
||||
errno = 0;
|
||||
char *temp;
|
||||
long long result = strtoll(str, &temp, base);
|
||||
errno = 0;
|
||||
char *temp;
|
||||
long long result = strtoll(str, &temp, base);
|
||||
|
||||
if(temp == str || *temp != '\0'){
|
||||
throw std::invalid_argument("s3fs_strtoofft");
|
||||
}
|
||||
if((result == LLONG_MIN || result == LLONG_MAX) && errno == ERANGE){
|
||||
throw std::out_of_range("s3fs_strtoofft");
|
||||
}
|
||||
return result;
|
||||
if(temp == str || *temp != '\0'){
|
||||
throw std::invalid_argument("s3fs_strtoofft");
|
||||
}
|
||||
if((result == LLONG_MIN || result == LLONG_MAX) && errno == ERANGE){
|
||||
throw std::out_of_range("s3fs_strtoofft");
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
// wrapped s3fs_strtoofft()
|
||||
@ -74,18 +84,18 @@ off_t s3fs_strtoofft(const char* str, int base)
|
||||
//
|
||||
bool try_strtoofft(const char* str, off_t& value, int base)
|
||||
{
|
||||
if(str){
|
||||
try{
|
||||
value = s3fs_strtoofft(str, base);
|
||||
}catch(std::exception &e){
|
||||
S3FS_PRN_WARN("something error is occurred in convert string(%s) to off_t.", str);
|
||||
return false;
|
||||
if(str){
|
||||
try{
|
||||
value = s3fs_strtoofft(str, base);
|
||||
}catch(std::exception &e){
|
||||
S3FS_PRN_WARN("something error is occurred in convert string(%s) to off_t.", str);
|
||||
return false;
|
||||
}
|
||||
}else{
|
||||
S3FS_PRN_WARN("parameter string is null.");
|
||||
return false;
|
||||
}
|
||||
}else{
|
||||
S3FS_PRN_WARN("parameter string is null.");
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
return true;
|
||||
}
|
||||
|
||||
// wrapped try_strtoofft -> s3fs_strtoofft()
|
||||
@ -95,144 +105,146 @@ bool try_strtoofft(const char* str, off_t& value, int base)
|
||||
//
|
||||
off_t cvt_strtoofft(const char* str, int base)
|
||||
{
|
||||
off_t result = 0;
|
||||
if(!try_strtoofft(str, result, base)){
|
||||
S3FS_PRN_WARN("something error is occurred in convert string(%s) to off_t, thus return 0 as default.", (str ? str : "null"));
|
||||
return 0;
|
||||
}
|
||||
return result;
|
||||
off_t result = 0;
|
||||
if(!try_strtoofft(str, result, base)){
|
||||
S3FS_PRN_WARN("something error is occurred in convert string(%s) to off_t, thus return 0 as default.", (str ? str : "null"));
|
||||
return 0;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
string lower(string s)
|
||||
{
|
||||
// change each character of the string to lower case
|
||||
for(size_t i = 0; i < s.length(); i++){
|
||||
s[i] = tolower(s[i]);
|
||||
}
|
||||
return s;
|
||||
// change each character of the string to lower case
|
||||
for(size_t i = 0; i < s.length(); i++){
|
||||
s[i] = tolower(s[i]);
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
string trim_left(const string &s, const string &t /* = SPACES */)
|
||||
{
|
||||
string d(s);
|
||||
return d.erase(0, s.find_first_not_of(t));
|
||||
string d(s);
|
||||
return d.erase(0, s.find_first_not_of(t));
|
||||
}
|
||||
|
||||
string trim_right(const string &s, const string &t /* = SPACES */)
|
||||
{
|
||||
string d(s);
|
||||
string::size_type i(d.find_last_not_of(t));
|
||||
if(i == string::npos){
|
||||
return "";
|
||||
}else{
|
||||
return d.erase(d.find_last_not_of(t) + 1);
|
||||
}
|
||||
string d(s);
|
||||
string::size_type i(d.find_last_not_of(t));
|
||||
if(i == string::npos){
|
||||
return "";
|
||||
}else{
|
||||
return d.erase(d.find_last_not_of(t) + 1);
|
||||
}
|
||||
}
|
||||
|
||||
string trim(const string &s, const string &t /* = SPACES */)
|
||||
{
|
||||
return trim_left(trim_right(s, t), t);
|
||||
return trim_left(trim_right(s, t), t);
|
||||
}
|
||||
|
||||
/**
|
||||
* urlEncode a fuse path,
|
||||
* taking into special consideration "/",
|
||||
* otherwise regular urlEncode.
|
||||
*/
|
||||
//
|
||||
// urlEncode a fuse path,
|
||||
// taking into special consideration "/",
|
||||
// otherwise regular urlEncode.
|
||||
//
|
||||
string urlEncode(const string &s)
|
||||
{
|
||||
string result;
|
||||
for (size_t i = 0; i < s.length(); ++i) {
|
||||
char c = s[i];
|
||||
if (c == '/' // Note- special case for fuse paths...
|
||||
|| c == '.'
|
||||
|| c == '-'
|
||||
|| c == '_'
|
||||
|| c == '~'
|
||||
|| (c >= 'a' && c <= 'z')
|
||||
|| (c >= 'A' && c <= 'Z')
|
||||
|| (c >= '0' && c <= '9')) {
|
||||
result += c;
|
||||
} else {
|
||||
result += "%";
|
||||
result += hexAlphabet[static_cast<unsigned char>(c) / 16];
|
||||
result += hexAlphabet[static_cast<unsigned char>(c) % 16];
|
||||
string result;
|
||||
for (size_t i = 0; i < s.length(); ++i) {
|
||||
char c = s[i];
|
||||
if (c == '/' // Note- special case for fuse paths...
|
||||
|| c == '.'
|
||||
|| c == '-'
|
||||
|| c == '_'
|
||||
|| c == '~'
|
||||
|| (c >= 'a' && c <= 'z')
|
||||
|| (c >= 'A' && c <= 'Z')
|
||||
|| (c >= '0' && c <= '9'))
|
||||
{
|
||||
result += c;
|
||||
}else{
|
||||
result += "%";
|
||||
result += hexAlphabet[static_cast<unsigned char>(c) / 16];
|
||||
result += hexAlphabet[static_cast<unsigned char>(c) % 16];
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* urlEncode a fuse path,
|
||||
* taking into special consideration "/",
|
||||
* otherwise regular urlEncode.
|
||||
*/
|
||||
//
|
||||
// urlEncode a fuse path,
|
||||
// taking into special consideration "/",
|
||||
// otherwise regular urlEncode.
|
||||
//
|
||||
string urlEncode2(const string &s)
|
||||
{
|
||||
string result;
|
||||
for (size_t i = 0; i < s.length(); ++i) {
|
||||
char c = s[i];
|
||||
if (c == '=' // Note- special case for fuse paths...
|
||||
|| c == '&' // Note- special case for s3...
|
||||
|| c == '%'
|
||||
|| c == '.'
|
||||
|| c == '-'
|
||||
|| c == '_'
|
||||
|| c == '~'
|
||||
|| (c >= 'a' && c <= 'z')
|
||||
|| (c >= 'A' && c <= 'Z')
|
||||
|| (c >= '0' && c <= '9')) {
|
||||
result += c;
|
||||
} else {
|
||||
result += "%";
|
||||
result += hexAlphabet[static_cast<unsigned char>(c) / 16];
|
||||
result += hexAlphabet[static_cast<unsigned char>(c) % 16];
|
||||
string result;
|
||||
for (size_t i = 0; i < s.length(); ++i) {
|
||||
char c = s[i];
|
||||
if (c == '=' // Note- special case for fuse paths...
|
||||
|| c == '&' // Note- special case for s3...
|
||||
|| c == '%'
|
||||
|| c == '.'
|
||||
|| c == '-'
|
||||
|| c == '_'
|
||||
|| c == '~'
|
||||
|| (c >= 'a' && c <= 'z')
|
||||
|| (c >= 'A' && c <= 'Z')
|
||||
|| (c >= '0' && c <= '9'))
|
||||
{
|
||||
result += c;
|
||||
}else{
|
||||
result += "%";
|
||||
result += hexAlphabet[static_cast<unsigned char>(c) / 16];
|
||||
result += hexAlphabet[static_cast<unsigned char>(c) % 16];
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
return result;
|
||||
}
|
||||
|
||||
string urlDecode(const string& s)
|
||||
{
|
||||
string result;
|
||||
for(size_t i = 0; i < s.length(); ++i){
|
||||
if(s[i] != '%'){
|
||||
result += s[i];
|
||||
}else{
|
||||
int ch = 0;
|
||||
if(s.length() <= ++i){
|
||||
break; // wrong format.
|
||||
}
|
||||
ch += ('0' <= s[i] && s[i] <= '9') ? (s[i] - '0') : ('A' <= s[i] && s[i] <= 'F') ? (s[i] - 'A' + 0x0a) : ('a' <= s[i] && s[i] <= 'f') ? (s[i] - 'a' + 0x0a) : 0x00;
|
||||
if(s.length() <= ++i){
|
||||
break; // wrong format.
|
||||
}
|
||||
ch *= 16;
|
||||
ch += ('0' <= s[i] && s[i] <= '9') ? (s[i] - '0') : ('A' <= s[i] && s[i] <= 'F') ? (s[i] - 'A' + 0x0a) : ('a' <= s[i] && s[i] <= 'f') ? (s[i] - 'a' + 0x0a) : 0x00;
|
||||
result += static_cast<char>(ch);
|
||||
string result;
|
||||
for(size_t i = 0; i < s.length(); ++i){
|
||||
if(s[i] != '%'){
|
||||
result += s[i];
|
||||
}else{
|
||||
int ch = 0;
|
||||
if(s.length() <= ++i){
|
||||
break; // wrong format.
|
||||
}
|
||||
ch += ('0' <= s[i] && s[i] <= '9') ? (s[i] - '0') : ('A' <= s[i] && s[i] <= 'F') ? (s[i] - 'A' + 0x0a) : ('a' <= s[i] && s[i] <= 'f') ? (s[i] - 'a' + 0x0a) : 0x00;
|
||||
if(s.length() <= ++i){
|
||||
break; // wrong format.
|
||||
}
|
||||
ch *= 16;
|
||||
ch += ('0' <= s[i] && s[i] <= '9') ? (s[i] - '0') : ('A' <= s[i] && s[i] <= 'F') ? (s[i] - 'A' + 0x0a) : ('a' <= s[i] && s[i] <= 'f') ? (s[i] - 'a' + 0x0a) : 0x00;
|
||||
result += static_cast<char>(ch);
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
return result;
|
||||
}
|
||||
|
||||
bool takeout_str_dquart(string& str)
|
||||
{
|
||||
size_t pos;
|
||||
size_t pos;
|
||||
|
||||
// '"' for start
|
||||
if(string::npos != (pos = str.find_first_of('\"'))){
|
||||
str = str.substr(pos + 1);
|
||||
// '"' for start
|
||||
if(string::npos != (pos = str.find_first_of('\"'))){
|
||||
str = str.substr(pos + 1);
|
||||
|
||||
// '"' for end
|
||||
if(string::npos == (pos = str.find_last_of('\"'))){
|
||||
return false;
|
||||
// '"' for end
|
||||
if(string::npos == (pos = str.find_last_of('\"'))){
|
||||
return false;
|
||||
}
|
||||
str = str.substr(0, pos);
|
||||
if(string::npos != str.find_first_of('\"')){
|
||||
return false;
|
||||
}
|
||||
}
|
||||
str = str.substr(0, pos);
|
||||
if(string::npos != str.find_first_of('\"')){
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
return true;
|
||||
}
|
||||
|
||||
//
|
||||
@ -240,77 +252,77 @@ bool takeout_str_dquart(string& str)
|
||||
//
|
||||
bool get_keyword_value(string& target, const char* keyword, string& value)
|
||||
{
|
||||
if(!keyword){
|
||||
return false;
|
||||
}
|
||||
size_t spos;
|
||||
size_t epos;
|
||||
if(string::npos == (spos = target.find(keyword))){
|
||||
return false;
|
||||
}
|
||||
spos += strlen(keyword);
|
||||
if('=' != target.at(spos)){
|
||||
return false;
|
||||
}
|
||||
spos++;
|
||||
if(string::npos == (epos = target.find('&', spos))){
|
||||
value = target.substr(spos);
|
||||
}else{
|
||||
value = target.substr(spos, (epos - spos));
|
||||
}
|
||||
return true;
|
||||
if(!keyword){
|
||||
return false;
|
||||
}
|
||||
size_t spos;
|
||||
size_t epos;
|
||||
if(string::npos == (spos = target.find(keyword))){
|
||||
return false;
|
||||
}
|
||||
spos += strlen(keyword);
|
||||
if('=' != target.at(spos)){
|
||||
return false;
|
||||
}
|
||||
spos++;
|
||||
if(string::npos == (epos = target.find('&', spos))){
|
||||
value = target.substr(spos);
|
||||
}else{
|
||||
value = target.substr(spos, (epos - spos));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the current date
|
||||
* in a format suitable for a HTTP request header.
|
||||
*/
|
||||
//
|
||||
// Returns the current date
|
||||
// in a format suitable for a HTTP request header.
|
||||
//
|
||||
string get_date_rfc850()
|
||||
{
|
||||
char buf[100];
|
||||
time_t t = time(NULL);
|
||||
struct tm res;
|
||||
strftime(buf, sizeof(buf), "%a, %d %b %Y %H:%M:%S GMT", gmtime_r(&t, &res));
|
||||
return buf;
|
||||
char buf[100];
|
||||
time_t t = time(NULL);
|
||||
struct tm res;
|
||||
strftime(buf, sizeof(buf), "%a, %d %b %Y %H:%M:%S GMT", gmtime_r(&t, &res));
|
||||
return buf;
|
||||
}
|
||||
|
||||
void get_date_sigv3(string& date, string& date8601)
|
||||
{
|
||||
time_t tm = time(NULL);
|
||||
date = get_date_string(tm);
|
||||
date8601 = get_date_iso8601(tm);
|
||||
time_t tm = time(NULL);
|
||||
date = get_date_string(tm);
|
||||
date8601 = get_date_iso8601(tm);
|
||||
}
|
||||
|
||||
string get_date_string(time_t tm)
|
||||
{
|
||||
char buf[100];
|
||||
struct tm res;
|
||||
strftime(buf, sizeof(buf), "%Y%m%d", gmtime_r(&tm, &res));
|
||||
return buf;
|
||||
char buf[100];
|
||||
struct tm res;
|
||||
strftime(buf, sizeof(buf), "%Y%m%d", gmtime_r(&tm, &res));
|
||||
return buf;
|
||||
}
|
||||
|
||||
string get_date_iso8601(time_t tm)
|
||||
{
|
||||
char buf[100];
|
||||
struct tm res;
|
||||
strftime(buf, sizeof(buf), "%Y%m%dT%H%M%SZ", gmtime_r(&tm, &res));
|
||||
return buf;
|
||||
char buf[100];
|
||||
struct tm res;
|
||||
strftime(buf, sizeof(buf), "%Y%m%dT%H%M%SZ", gmtime_r(&tm, &res));
|
||||
return buf;
|
||||
}
|
||||
|
||||
bool get_unixtime_from_iso8601(const char* pdate, time_t& unixtime)
|
||||
{
|
||||
if(!pdate){
|
||||
return false;
|
||||
}
|
||||
if(!pdate){
|
||||
return false;
|
||||
}
|
||||
|
||||
struct tm tm;
|
||||
char* prest = strptime(pdate, "%Y-%m-%dT%T", &tm);
|
||||
if(prest == pdate){
|
||||
// wrong format
|
||||
return false;
|
||||
}
|
||||
unixtime = mktime(&tm);
|
||||
return true;
|
||||
struct tm tm;
|
||||
char* prest = strptime(pdate, "%Y-%m-%dT%T", &tm);
|
||||
if(prest == pdate){
|
||||
// wrong format
|
||||
return false;
|
||||
}
|
||||
unixtime = mktime(&tm);
|
||||
return true;
|
||||
}
|
||||
|
||||
//
|
||||
@ -319,155 +331,155 @@ bool get_unixtime_from_iso8601(const char* pdate, time_t& unixtime)
|
||||
//
|
||||
bool convert_unixtime_from_option_arg(const char* argv, time_t& unixtime)
|
||||
{
|
||||
if(!argv){
|
||||
return false;
|
||||
}
|
||||
unixtime = 0;
|
||||
const char* ptmp;
|
||||
int last_unit_type = 0; // unit flag.
|
||||
bool is_last_number;
|
||||
time_t tmptime;
|
||||
for(ptmp = argv, is_last_number = true, tmptime = 0; ptmp && *ptmp; ++ptmp){
|
||||
if('0' <= *ptmp && *ptmp <= '9'){
|
||||
tmptime *= 10;
|
||||
tmptime += static_cast<time_t>(*ptmp - '0');
|
||||
is_last_number = true;
|
||||
}else if(is_last_number){
|
||||
if('Y' == *ptmp && 1 > last_unit_type){
|
||||
unixtime += (tmptime * (60 * 60 * 24 * 365)); // average 365 day / year
|
||||
last_unit_type = 1;
|
||||
}else if('M' == *ptmp && 2 > last_unit_type){
|
||||
unixtime += (tmptime * (60 * 60 * 24 * 30)); // average 30 day / month
|
||||
last_unit_type = 2;
|
||||
}else if('D' == *ptmp && 3 > last_unit_type){
|
||||
unixtime += (tmptime * (60 * 60 * 24));
|
||||
last_unit_type = 3;
|
||||
}else if('h' == *ptmp && 4 > last_unit_type){
|
||||
unixtime += (tmptime * (60 * 60));
|
||||
last_unit_type = 4;
|
||||
}else if('m' == *ptmp && 5 > last_unit_type){
|
||||
unixtime += (tmptime * 60);
|
||||
last_unit_type = 5;
|
||||
}else if('s' == *ptmp && 6 > last_unit_type){
|
||||
unixtime += tmptime;
|
||||
last_unit_type = 6;
|
||||
}else{
|
||||
return false;
|
||||
}
|
||||
tmptime = 0;
|
||||
is_last_number = false;
|
||||
}else{
|
||||
if(!argv){
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if(is_last_number){
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
unixtime = 0;
|
||||
const char* ptmp;
|
||||
int last_unit_type = 0; // unit flag.
|
||||
bool is_last_number;
|
||||
time_t tmptime;
|
||||
for(ptmp = argv, is_last_number = true, tmptime = 0; ptmp && *ptmp; ++ptmp){
|
||||
if('0' <= *ptmp && *ptmp <= '9'){
|
||||
tmptime *= 10;
|
||||
tmptime += static_cast<time_t>(*ptmp - '0');
|
||||
is_last_number = true;
|
||||
}else if(is_last_number){
|
||||
if('Y' == *ptmp && 1 > last_unit_type){
|
||||
unixtime += (tmptime * (60 * 60 * 24 * 365)); // average 365 day / year
|
||||
last_unit_type = 1;
|
||||
}else if('M' == *ptmp && 2 > last_unit_type){
|
||||
unixtime += (tmptime * (60 * 60 * 24 * 30)); // average 30 day / month
|
||||
last_unit_type = 2;
|
||||
}else if('D' == *ptmp && 3 > last_unit_type){
|
||||
unixtime += (tmptime * (60 * 60 * 24));
|
||||
last_unit_type = 3;
|
||||
}else if('h' == *ptmp && 4 > last_unit_type){
|
||||
unixtime += (tmptime * (60 * 60));
|
||||
last_unit_type = 4;
|
||||
}else if('m' == *ptmp && 5 > last_unit_type){
|
||||
unixtime += (tmptime * 60);
|
||||
last_unit_type = 5;
|
||||
}else if('s' == *ptmp && 6 > last_unit_type){
|
||||
unixtime += tmptime;
|
||||
last_unit_type = 6;
|
||||
}else{
|
||||
return false;
|
||||
}
|
||||
tmptime = 0;
|
||||
is_last_number = false;
|
||||
}else{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if(is_last_number){
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
std::string s3fs_hex(const unsigned char* input, size_t length)
|
||||
{
|
||||
std::string hex;
|
||||
for(size_t pos = 0; pos < length; ++pos){
|
||||
char hexbuf[3];
|
||||
snprintf(hexbuf, 3, "%02x", input[pos]);
|
||||
hex += hexbuf;
|
||||
}
|
||||
return hex;
|
||||
std::string hex;
|
||||
for(size_t pos = 0; pos < length; ++pos){
|
||||
char hexbuf[3];
|
||||
snprintf(hexbuf, 3, "%02x", input[pos]);
|
||||
hex += hexbuf;
|
||||
}
|
||||
return hex;
|
||||
}
|
||||
|
||||
char* s3fs_base64(const unsigned char* input, size_t length)
|
||||
{
|
||||
static const char* base = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";
|
||||
char* result;
|
||||
static const char* base = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";
|
||||
char* result;
|
||||
|
||||
if(!input || 0 == length){
|
||||
return NULL;
|
||||
}
|
||||
result = new char[((length / 3) + 1) * 4 + 1];
|
||||
if(!input || 0 == length){
|
||||
return NULL;
|
||||
}
|
||||
result = new char[((length / 3) + 1) * 4 + 1];
|
||||
|
||||
unsigned char parts[4];
|
||||
size_t rpos;
|
||||
size_t wpos;
|
||||
for(rpos = 0, wpos = 0; rpos < length; rpos += 3){
|
||||
parts[0] = (input[rpos] & 0xfc) >> 2;
|
||||
parts[1] = ((input[rpos] & 0x03) << 4) | ((((rpos + 1) < length ? input[rpos + 1] : 0x00) & 0xf0) >> 4);
|
||||
parts[2] = (rpos + 1) < length ? (((input[rpos + 1] & 0x0f) << 2) | ((((rpos + 2) < length ? input[rpos + 2] : 0x00) & 0xc0) >> 6)) : 0x40;
|
||||
parts[3] = (rpos + 2) < length ? (input[rpos + 2] & 0x3f) : 0x40;
|
||||
unsigned char parts[4];
|
||||
size_t rpos;
|
||||
size_t wpos;
|
||||
for(rpos = 0, wpos = 0; rpos < length; rpos += 3){
|
||||
parts[0] = (input[rpos] & 0xfc) >> 2;
|
||||
parts[1] = ((input[rpos] & 0x03) << 4) | ((((rpos + 1) < length ? input[rpos + 1] : 0x00) & 0xf0) >> 4);
|
||||
parts[2] = (rpos + 1) < length ? (((input[rpos + 1] & 0x0f) << 2) | ((((rpos + 2) < length ? input[rpos + 2] : 0x00) & 0xc0) >> 6)) : 0x40;
|
||||
parts[3] = (rpos + 2) < length ? (input[rpos + 2] & 0x3f) : 0x40;
|
||||
|
||||
result[wpos++] = base[parts[0]];
|
||||
result[wpos++] = base[parts[1]];
|
||||
result[wpos++] = base[parts[2]];
|
||||
result[wpos++] = base[parts[3]];
|
||||
}
|
||||
result[wpos] = '\0';
|
||||
result[wpos++] = base[parts[0]];
|
||||
result[wpos++] = base[parts[1]];
|
||||
result[wpos++] = base[parts[2]];
|
||||
result[wpos++] = base[parts[3]];
|
||||
}
|
||||
result[wpos] = '\0';
|
||||
|
||||
return result;
|
||||
return result;
|
||||
}
|
||||
|
||||
inline unsigned char char_decode64(const char ch)
|
||||
{
|
||||
unsigned char by;
|
||||
if('A' <= ch && ch <= 'Z'){ // A - Z
|
||||
by = static_cast<unsigned char>(ch - 'A');
|
||||
}else if('a' <= ch && ch <= 'z'){ // a - z
|
||||
by = static_cast<unsigned char>(ch - 'a' + 26);
|
||||
}else if('0' <= ch && ch <= '9'){ // 0 - 9
|
||||
by = static_cast<unsigned char>(ch - '0' + 52);
|
||||
}else if('+' == ch){ // +
|
||||
by = 62;
|
||||
}else if('/' == ch){ // /
|
||||
by = 63;
|
||||
}else if('=' == ch){ // =
|
||||
by = 64;
|
||||
}else{ // something wrong
|
||||
by = UCHAR_MAX;
|
||||
}
|
||||
return by;
|
||||
unsigned char by;
|
||||
if('A' <= ch && ch <= 'Z'){ // A - Z
|
||||
by = static_cast<unsigned char>(ch - 'A');
|
||||
}else if('a' <= ch && ch <= 'z'){ // a - z
|
||||
by = static_cast<unsigned char>(ch - 'a' + 26);
|
||||
}else if('0' <= ch && ch <= '9'){ // 0 - 9
|
||||
by = static_cast<unsigned char>(ch - '0' + 52);
|
||||
}else if('+' == ch){ // +
|
||||
by = 62;
|
||||
}else if('/' == ch){ // /
|
||||
by = 63;
|
||||
}else if('=' == ch){ // =
|
||||
by = 64;
|
||||
}else{ // something wrong
|
||||
by = UCHAR_MAX;
|
||||
}
|
||||
return by;
|
||||
}
|
||||
|
||||
unsigned char* s3fs_decode64(const char* input, size_t* plength)
|
||||
{
|
||||
unsigned char* result;
|
||||
if(!input || 0 == strlen(input) || !plength){
|
||||
return NULL;
|
||||
}
|
||||
result = new unsigned char[strlen(input) + 1];
|
||||
|
||||
unsigned char parts[4];
|
||||
size_t input_len = strlen(input);
|
||||
size_t rpos;
|
||||
size_t wpos;
|
||||
for(rpos = 0, wpos = 0; rpos < input_len; rpos += 4){
|
||||
parts[0] = char_decode64(input[rpos]);
|
||||
parts[1] = (rpos + 1) < input_len ? char_decode64(input[rpos + 1]) : 64;
|
||||
parts[2] = (rpos + 2) < input_len ? char_decode64(input[rpos + 2]) : 64;
|
||||
parts[3] = (rpos + 3) < input_len ? char_decode64(input[rpos + 3]) : 64;
|
||||
|
||||
result[wpos++] = ((parts[0] << 2) & 0xfc) | ((parts[1] >> 4) & 0x03);
|
||||
if(64 == parts[2]){
|
||||
break;
|
||||
unsigned char* result;
|
||||
if(!input || 0 == strlen(input) || !plength){
|
||||
return NULL;
|
||||
}
|
||||
result[wpos++] = ((parts[1] << 4) & 0xf0) | ((parts[2] >> 2) & 0x0f);
|
||||
if(64 == parts[3]){
|
||||
break;
|
||||
result = new unsigned char[strlen(input) + 1];
|
||||
|
||||
unsigned char parts[4];
|
||||
size_t input_len = strlen(input);
|
||||
size_t rpos;
|
||||
size_t wpos;
|
||||
for(rpos = 0, wpos = 0; rpos < input_len; rpos += 4){
|
||||
parts[0] = char_decode64(input[rpos]);
|
||||
parts[1] = (rpos + 1) < input_len ? char_decode64(input[rpos + 1]) : 64;
|
||||
parts[2] = (rpos + 2) < input_len ? char_decode64(input[rpos + 2]) : 64;
|
||||
parts[3] = (rpos + 3) < input_len ? char_decode64(input[rpos + 3]) : 64;
|
||||
|
||||
result[wpos++] = ((parts[0] << 2) & 0xfc) | ((parts[1] >> 4) & 0x03);
|
||||
if(64 == parts[2]){
|
||||
break;
|
||||
}
|
||||
result[wpos++] = ((parts[1] << 4) & 0xf0) | ((parts[2] >> 2) & 0x0f);
|
||||
if(64 == parts[3]){
|
||||
break;
|
||||
}
|
||||
result[wpos++] = ((parts[2] << 6) & 0xc0) | (parts[3] & 0x3f);
|
||||
}
|
||||
result[wpos++] = ((parts[2] << 6) & 0xc0) | (parts[3] & 0x3f);
|
||||
}
|
||||
result[wpos] = '\0';
|
||||
*plength = wpos;
|
||||
return result;
|
||||
result[wpos] = '\0';
|
||||
*plength = wpos;
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* detect and rewrite invalid utf8. We take invalid bytes
|
||||
* and encode them into a private region of the unicode
|
||||
* space. This is sometimes known as wtf8, wobbly transformation format.
|
||||
* it is necessary because S3 validates the utf8 used for identifiers for
|
||||
* correctness, while some clients may provide invalid utf, notably
|
||||
* windows using cp1252.
|
||||
*/
|
||||
//
|
||||
// detect and rewrite invalid utf8. We take invalid bytes
|
||||
// and encode them into a private region of the unicode
|
||||
// space. This is sometimes known as wtf8, wobbly transformation format.
|
||||
// it is necessary because S3 validates the utf8 used for identifiers for
|
||||
// correctness, while some clients may provide invalid utf, notably
|
||||
// windows using cp1252.
|
||||
//
|
||||
|
||||
// Base location for transform. The range 0xE000 - 0xF8ff
|
||||
// is a private range, se use the start of this range.
|
||||
@ -477,123 +489,122 @@ static unsigned int escape_base = 0xe000;
|
||||
// 'result' can be null. returns true if transform was needed.
|
||||
bool s3fs_wtf8_encode(const char *s, string *result)
|
||||
{
|
||||
bool invalid = false;
|
||||
bool invalid = false;
|
||||
|
||||
// Pass valid utf8 code through
|
||||
for (; *s; s++) {
|
||||
const unsigned char c = *s;
|
||||
// Pass valid utf8 code through
|
||||
for (; *s; s++) {
|
||||
const unsigned char c = *s;
|
||||
|
||||
// single byte encoding
|
||||
if (c <= 0x7f) {
|
||||
if (result) {
|
||||
*result += c;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
// single byte encoding
|
||||
if (c <= 0x7f) {
|
||||
if (result) {
|
||||
*result += c;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// otherwise, it must be one of the valid start bytes
|
||||
if ( c >= 0xc2 && c <= 0xf5 ) {
|
||||
|
||||
// two byte encoding
|
||||
// don't need bounds check, string is zero terminated
|
||||
if ((c & 0xe0) == 0xc0 && (s[1] & 0xc0) == 0x80) {
|
||||
// all two byte encodings starting higher than c1 are valid
|
||||
// otherwise, it must be one of the valid start bytes
|
||||
if ( c >= 0xc2 && c <= 0xf5 ) {
|
||||
// two byte encoding
|
||||
// don't need bounds check, string is zero terminated
|
||||
if ((c & 0xe0) == 0xc0 && (s[1] & 0xc0) == 0x80) {
|
||||
// all two byte encodings starting higher than c1 are valid
|
||||
if (result) {
|
||||
*result += c;
|
||||
*result += *(++s);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
// three byte encoding
|
||||
if ((c & 0xf0) == 0xe0 && (s[1] & 0xc0) == 0x80 && (s[2] & 0xc0) == 0x80) {
|
||||
const unsigned code = ((c & 0x0f) << 12) | ((s[1] & 0x3f) << 6) | (s[2] & 0x3f);
|
||||
if (code >= 0x800 && ! (code >= 0xd800 && code <= 0xd8ff)) {
|
||||
// not overlong and not a surrogate pair
|
||||
if (result) {
|
||||
*result += c;
|
||||
*result += *(++s);
|
||||
*result += *(++s);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
// four byte encoding
|
||||
if ((c & 0xf8) == 0xf0 && (s[1] & 0xc0) == 0x80 && (s[2] & 0xc0) == 0x80 && (s[3] & 0xc0) == 0x80) {
|
||||
const unsigned code = ((c & 0x07) << 18) | ((s[1] & 0x3f) << 12) | ((s[2] & 0x3f) << 6) | (s[3] & 0x3f);
|
||||
if (code >= 0x10000 && code <= 0x10ffff) {
|
||||
// not overlong and in defined unicode space
|
||||
if (result) {
|
||||
*result += c;
|
||||
*result += *(++s);
|
||||
*result += *(++s);
|
||||
*result += *(++s);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
// printf("invalid %02x at %d\n", c, i);
|
||||
// Invalid utf8 code. Convert it to a private two byte area of unicode
|
||||
// e.g. the e000 - f8ff area. This will be a three byte encoding
|
||||
invalid = true;
|
||||
if (result) {
|
||||
*result += c;
|
||||
*result += *(++s);
|
||||
unsigned escape = escape_base + c;
|
||||
*result += static_cast<char>(0xe0 | ((escape >> 12) & 0x0f));
|
||||
*result += static_cast<char>(0x80 | ((escape >> 06) & 0x3f));
|
||||
*result += static_cast<char>(0x80 | ((escape >> 00) & 0x3f));
|
||||
}
|
||||
continue;
|
||||
}
|
||||
// three byte encoding
|
||||
if ((c & 0xf0) == 0xe0 && (s[1] & 0xc0) == 0x80 && (s[2] & 0xc0) == 0x80) {
|
||||
const unsigned code = ((c & 0x0f) << 12) | ((s[1] & 0x3f) << 6) | (s[2] & 0x3f);
|
||||
if (code >= 0x800 && ! (code >= 0xd800 && code <= 0xd8ff)) {
|
||||
// not overlong and not a surrogate pair
|
||||
if (result) {
|
||||
*result += c;
|
||||
*result += *(++s);
|
||||
*result += *(++s);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
// four byte encoding
|
||||
if ((c & 0xf8) == 0xf0 && (s[1] & 0xc0) == 0x80 && (s[2] & 0xc0) == 0x80 && (s[3] & 0xc0) == 0x80) {
|
||||
const unsigned code = ((c & 0x07) << 18) | ((s[1] & 0x3f) << 12) | ((s[2] & 0x3f) << 6) | (s[3] & 0x3f);
|
||||
if (code >= 0x10000 && code <= 0x10ffff) {
|
||||
// not overlong and in defined unicode space
|
||||
if (result) {
|
||||
*result += c;
|
||||
*result += *(++s);
|
||||
*result += *(++s);
|
||||
*result += *(++s);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
// printf("invalid %02x at %d\n", c, i);
|
||||
// Invalid utf8 code. Convert it to a private two byte area of unicode
|
||||
// e.g. the e000 - f8ff area. This will be a three byte encoding
|
||||
invalid = true;
|
||||
if (result) {
|
||||
unsigned escape = escape_base + c;
|
||||
*result += static_cast<char>(0xe0 | ((escape >> 12) & 0x0f));
|
||||
*result += static_cast<char>(0x80 | ((escape >> 06) & 0x3f));
|
||||
*result += static_cast<char>(0x80 | ((escape >> 00) & 0x3f));
|
||||
}
|
||||
}
|
||||
return invalid;
|
||||
return invalid;
|
||||
}
|
||||
|
||||
string s3fs_wtf8_encode(const string &s)
|
||||
{
|
||||
string result;
|
||||
s3fs_wtf8_encode(s.c_str(), &result);
|
||||
return result;
|
||||
string result;
|
||||
s3fs_wtf8_encode(s.c_str(), &result);
|
||||
return result;
|
||||
}
|
||||
|
||||
// The reverse operation, turn encoded bytes back into their original values
|
||||
// The code assumes that we map to a three-byte code point.
|
||||
bool s3fs_wtf8_decode(const char *s, string *result)
|
||||
{
|
||||
bool encoded = false;
|
||||
for (; *s; s++) {
|
||||
unsigned char c = *s;
|
||||
// look for a three byte tuple matching our encoding code
|
||||
if ((c & 0xf0) == 0xe0 && (s[1] & 0xc0) == 0x80 && (s[2] & 0xc0) == 0x80) {
|
||||
unsigned code = (c & 0x0f) << 12;
|
||||
code |= (s[1] & 0x3f) << 6;
|
||||
code |= (s[2] & 0x3f) << 0;
|
||||
if (code >= escape_base && code <= escape_base + 0xff) {
|
||||
// convert back
|
||||
encoded = true;
|
||||
if(result){
|
||||
*result += static_cast<char>(code - escape_base);
|
||||
bool encoded = false;
|
||||
for (; *s; s++) {
|
||||
unsigned char c = *s;
|
||||
// look for a three byte tuple matching our encoding code
|
||||
if ((c & 0xf0) == 0xe0 && (s[1] & 0xc0) == 0x80 && (s[2] & 0xc0) == 0x80) {
|
||||
unsigned code = (c & 0x0f) << 12;
|
||||
code |= (s[1] & 0x3f) << 6;
|
||||
code |= (s[2] & 0x3f) << 0;
|
||||
if (code >= escape_base && code <= escape_base + 0xff) {
|
||||
// convert back
|
||||
encoded = true;
|
||||
if(result){
|
||||
*result += static_cast<char>(code - escape_base);
|
||||
}
|
||||
s+=2;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (result) {
|
||||
*result += c;
|
||||
}
|
||||
s+=2;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (result) {
|
||||
*result += c;
|
||||
}
|
||||
}
|
||||
return encoded;
|
||||
return encoded;
|
||||
}
|
||||
|
||||
string s3fs_wtf8_decode(const string &s)
|
||||
{
|
||||
string result;
|
||||
s3fs_wtf8_decode(s.c_str(), &result);
|
||||
return result;
|
||||
string result;
|
||||
s3fs_wtf8_decode(s.c_str(), &result);
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 2
|
||||
* c-basic-offset: 2
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=2 ts=2 fdm=marker
|
||||
* vim<600: expandtab sw=2 ts=2
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
|
@ -17,49 +17,88 @@
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_STRING_UTIL_H_
|
||||
#define S3FS_STRING_UTIL_H_
|
||||
|
||||
/*
|
||||
* A collection of string utilities for manipulating URLs and HTTP responses.
|
||||
*/
|
||||
#include <string.h>
|
||||
#include <syslog.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
#include <string>
|
||||
|
||||
static const std::string SPACES = " \t\r\n";
|
||||
//
|
||||
// A collection of string utilities for manipulating URLs and HTTP responses.
|
||||
//
|
||||
//-------------------------------------------------------------------
|
||||
// Gloval variables
|
||||
//-------------------------------------------------------------------
|
||||
extern const std::string SPACES;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Inline functions
|
||||
//-------------------------------------------------------------------
|
||||
static inline int STR2NCMP(const char *str1, const char *str2) { return strncmp(str1, str2, strlen(str2)); }
|
||||
static inline const char* SAFESTRPTR(const char *strptr) { return strptr ? strptr : ""; }
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Templates
|
||||
//-------------------------------------------------------------------
|
||||
template <class T> std::string str(T value);
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Macros(WTF8)
|
||||
//-------------------------------------------------------------------
|
||||
#define WTF8_ENCODE(ARG) \
|
||||
std::string ARG##_buf; \
|
||||
const char * ARG = _##ARG; \
|
||||
if (use_wtf8 && s3fs_wtf8_encode( _##ARG, 0 )) { \
|
||||
s3fs_wtf8_encode( _##ARG, &ARG##_buf); \
|
||||
ARG = ARG##_buf.c_str(); \
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utilities
|
||||
//-------------------------------------------------------------------
|
||||
//
|
||||
// Convert string to off_t. Throws std::invalid_argument and std::out_of_range on bad input.
|
||||
//
|
||||
off_t s3fs_strtoofft(const char* str, int base = 0);
|
||||
bool try_strtoofft(const char* str, off_t& value, int base = 0);
|
||||
off_t cvt_strtoofft(const char* str, int base = 0);
|
||||
|
||||
//
|
||||
// String Manipulation
|
||||
//
|
||||
std::string trim_left(const std::string &s, const std::string &t = SPACES);
|
||||
std::string trim_right(const std::string &s, const std::string &t = SPACES);
|
||||
std::string trim(const std::string &s, const std::string &t = SPACES);
|
||||
std::string lower(std::string s);
|
||||
|
||||
//
|
||||
// Date string
|
||||
//
|
||||
std::string get_date_rfc850(void);
|
||||
void get_date_sigv3(std::string& date, std::string& date8601);
|
||||
std::string get_date_string(time_t tm);
|
||||
std::string get_date_iso8601(time_t tm);
|
||||
bool get_unixtime_from_iso8601(const char* pdate, time_t& unixtime);
|
||||
bool convert_unixtime_from_option_arg(const char* argv, time_t& unixtime);
|
||||
|
||||
//
|
||||
// For encoding
|
||||
//
|
||||
std::string urlEncode(const std::string &s);
|
||||
std::string urlEncode2(const std::string &s);
|
||||
std::string urlDecode(const std::string& s);
|
||||
|
||||
bool takeout_str_dquart(std::string& str);
|
||||
bool get_keyword_value(std::string& target, const char* keyword, std::string& value);
|
||||
|
||||
//
|
||||
// For binary string
|
||||
//
|
||||
std::string s3fs_hex(const unsigned char* input, size_t length);
|
||||
char* s3fs_base64(const unsigned char* input, size_t length);
|
||||
unsigned char* s3fs_decode64(const char* input, size_t* plength);
|
||||
|
||||
//
|
||||
// WTF8
|
||||
//
|
||||
bool s3fs_wtf8_encode(const char *s, std::string *result);
|
||||
std::string s3fs_wtf8_encode(const std::string &s);
|
||||
bool s3fs_wtf8_decode(const char *s, std::string *result);
|
||||
@ -69,9 +108,9 @@ std::string s3fs_wtf8_decode(const std::string &s);
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 2
|
||||
* c-basic-offset: 2
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=2 ts=2 fdm=marker
|
||||
* vim<600: expandtab sw=2 ts=2
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
|
@ -18,6 +18,7 @@
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdlib>
|
||||
#include <limits>
|
||||
#include <stdint.h>
|
||||
#include <strings.h>
|
||||
@ -25,6 +26,7 @@
|
||||
#include <map>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "string_util.h"
|
||||
#include "test_util.h"
|
||||
|
||||
@ -37,109 +39,110 @@ std::string instance_name;
|
||||
|
||||
void test_trim()
|
||||
{
|
||||
ASSERT_EQUALS(std::string("1234"), trim(" 1234 "));
|
||||
ASSERT_EQUALS(std::string("1234"), trim("1234 "));
|
||||
ASSERT_EQUALS(std::string("1234"), trim(" 1234"));
|
||||
ASSERT_EQUALS(std::string("1234"), trim("1234"));
|
||||
ASSERT_EQUALS(std::string("1234"), trim(" 1234 "));
|
||||
ASSERT_EQUALS(std::string("1234"), trim("1234 "));
|
||||
ASSERT_EQUALS(std::string("1234"), trim(" 1234"));
|
||||
ASSERT_EQUALS(std::string("1234"), trim("1234"));
|
||||
|
||||
ASSERT_EQUALS(std::string("1234 "), trim_left(" 1234 "));
|
||||
ASSERT_EQUALS(std::string("1234 "), trim_left("1234 "));
|
||||
ASSERT_EQUALS(std::string("1234"), trim_left(" 1234"));
|
||||
ASSERT_EQUALS(std::string("1234"), trim_left("1234"));
|
||||
ASSERT_EQUALS(std::string("1234 "), trim_left(" 1234 "));
|
||||
ASSERT_EQUALS(std::string("1234 "), trim_left("1234 "));
|
||||
ASSERT_EQUALS(std::string("1234"), trim_left(" 1234"));
|
||||
ASSERT_EQUALS(std::string("1234"), trim_left("1234"));
|
||||
|
||||
ASSERT_EQUALS(std::string(" 1234"), trim_right(" 1234 "));
|
||||
ASSERT_EQUALS(std::string("1234"), trim_right("1234 "));
|
||||
ASSERT_EQUALS(std::string(" 1234"), trim_right(" 1234"));
|
||||
ASSERT_EQUALS(std::string("1234"), trim_right("1234"));
|
||||
ASSERT_EQUALS(std::string(" 1234"), trim_right(" 1234 "));
|
||||
ASSERT_EQUALS(std::string("1234"), trim_right("1234 "));
|
||||
ASSERT_EQUALS(std::string(" 1234"), trim_right(" 1234"));
|
||||
ASSERT_EQUALS(std::string("1234"), trim_right("1234"));
|
||||
|
||||
ASSERT_EQUALS(std::string("0"), str(0));
|
||||
ASSERT_EQUALS(std::string("1"), str(1));
|
||||
ASSERT_EQUALS(std::string("-1"), str(-1));
|
||||
ASSERT_EQUALS(std::string("9223372036854775807"), str(std::numeric_limits<int64_t>::max()));
|
||||
ASSERT_EQUALS(std::string("-9223372036854775808"), str(std::numeric_limits<int64_t>::min()));
|
||||
ASSERT_EQUALS(std::string("0"), str(std::numeric_limits<uint64_t>::min()));
|
||||
ASSERT_EQUALS(std::string("18446744073709551615"), str(std::numeric_limits<uint64_t>::max()));
|
||||
ASSERT_EQUALS(std::string("0"), str(0));
|
||||
ASSERT_EQUALS(std::string("1"), str(1));
|
||||
ASSERT_EQUALS(std::string("-1"), str(-1));
|
||||
ASSERT_EQUALS(std::string("9223372036854775807"), str(std::numeric_limits<int64_t>::max()));
|
||||
ASSERT_EQUALS(std::string("-9223372036854775808"), str(std::numeric_limits<int64_t>::min()));
|
||||
ASSERT_EQUALS(std::string("0"), str(std::numeric_limits<uint64_t>::min()));
|
||||
ASSERT_EQUALS(std::string("18446744073709551615"), str(std::numeric_limits<uint64_t>::max()));
|
||||
}
|
||||
|
||||
void test_base64()
|
||||
{
|
||||
size_t len;
|
||||
ASSERT_STREQUALS(s3fs_base64(NULL, 0), NULL);
|
||||
ASSERT_STREQUALS(reinterpret_cast<const char *>(s3fs_decode64(NULL, &len)), NULL);
|
||||
ASSERT_STREQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>(""), 0), NULL);
|
||||
ASSERT_STREQUALS(reinterpret_cast<const char *>(s3fs_decode64("", &len)), NULL);
|
||||
size_t len;
|
||||
ASSERT_STREQUALS(s3fs_base64(NULL, 0), NULL);
|
||||
ASSERT_STREQUALS(reinterpret_cast<const char *>(s3fs_decode64(NULL, &len)), NULL);
|
||||
ASSERT_STREQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>(""), 0), NULL);
|
||||
ASSERT_STREQUALS(reinterpret_cast<const char *>(s3fs_decode64("", &len)), NULL);
|
||||
|
||||
ASSERT_STREQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("1"), 1), "MQ==");
|
||||
ASSERT_STREQUALS(reinterpret_cast<const char *>(s3fs_decode64("MQ==", &len)), "1");
|
||||
ASSERT_EQUALS(len, static_cast<size_t>(1));
|
||||
ASSERT_STREQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("12"), 2), "MTI=");
|
||||
ASSERT_STREQUALS(reinterpret_cast<const char *>(s3fs_decode64("MTI=", &len)), "12");
|
||||
ASSERT_EQUALS(len, static_cast<size_t>(2));
|
||||
ASSERT_STREQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("123"), 3), "MTIz");
|
||||
ASSERT_STREQUALS(reinterpret_cast<const char *>(s3fs_decode64("MTIz", &len)), "123");
|
||||
ASSERT_EQUALS(len, static_cast<size_t>(3));
|
||||
ASSERT_STREQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("1234"), 4), "MTIzNA==");
|
||||
ASSERT_STREQUALS(reinterpret_cast<const char *>(s3fs_decode64("MTIzNA==", &len)), "1234");
|
||||
ASSERT_EQUALS(len, static_cast<size_t>(4));
|
||||
ASSERT_STREQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("1"), 1), "MQ==");
|
||||
ASSERT_STREQUALS(reinterpret_cast<const char *>(s3fs_decode64("MQ==", &len)), "1");
|
||||
ASSERT_EQUALS(len, static_cast<size_t>(1));
|
||||
ASSERT_STREQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("12"), 2), "MTI=");
|
||||
ASSERT_STREQUALS(reinterpret_cast<const char *>(s3fs_decode64("MTI=", &len)), "12");
|
||||
ASSERT_EQUALS(len, static_cast<size_t>(2));
|
||||
ASSERT_STREQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("123"), 3), "MTIz");
|
||||
ASSERT_STREQUALS(reinterpret_cast<const char *>(s3fs_decode64("MTIz", &len)), "123");
|
||||
ASSERT_EQUALS(len, static_cast<size_t>(3));
|
||||
ASSERT_STREQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("1234"), 4), "MTIzNA==");
|
||||
ASSERT_STREQUALS(reinterpret_cast<const char *>(s3fs_decode64("MTIzNA==", &len)), "1234");
|
||||
ASSERT_EQUALS(len, static_cast<size_t>(4));
|
||||
|
||||
// TODO: invalid input
|
||||
// TODO: invalid input
|
||||
}
|
||||
|
||||
void test_strtoofft()
|
||||
{
|
||||
ASSERT_EQUALS(s3fs_strtoofft("0"), static_cast<off_t>(0L));
|
||||
ASSERT_EQUALS(s3fs_strtoofft("9"), static_cast<off_t>(9L));
|
||||
try{
|
||||
s3fs_strtoofft("A");
|
||||
abort();
|
||||
}catch(std::exception &e){
|
||||
// expected
|
||||
}
|
||||
ASSERT_EQUALS(s3fs_strtoofft("A", /*base=*/ 16), static_cast<off_t>(10L));
|
||||
ASSERT_EQUALS(s3fs_strtoofft("F", /*base=*/ 16), static_cast<off_t>(15L));
|
||||
ASSERT_EQUALS(s3fs_strtoofft("a", /*base=*/ 16), static_cast<off_t>(10L));
|
||||
ASSERT_EQUALS(s3fs_strtoofft("f", /*base=*/ 16), static_cast<off_t>(15L));
|
||||
ASSERT_EQUALS(s3fs_strtoofft("deadbeef", /*base=*/ 16), static_cast<off_t>(3735928559L));
|
||||
ASSERT_EQUALS(s3fs_strtoofft("0"), static_cast<off_t>(0L));
|
||||
ASSERT_EQUALS(s3fs_strtoofft("9"), static_cast<off_t>(9L));
|
||||
try{
|
||||
s3fs_strtoofft("A");
|
||||
abort();
|
||||
}catch(std::exception &e){
|
||||
// expected
|
||||
}
|
||||
ASSERT_EQUALS(s3fs_strtoofft("A", /*base=*/ 16), static_cast<off_t>(10L));
|
||||
ASSERT_EQUALS(s3fs_strtoofft("F", /*base=*/ 16), static_cast<off_t>(15L));
|
||||
ASSERT_EQUALS(s3fs_strtoofft("a", /*base=*/ 16), static_cast<off_t>(10L));
|
||||
ASSERT_EQUALS(s3fs_strtoofft("f", /*base=*/ 16), static_cast<off_t>(15L));
|
||||
ASSERT_EQUALS(s3fs_strtoofft("deadbeef", /*base=*/ 16), static_cast<off_t>(3735928559L));
|
||||
}
|
||||
|
||||
void test_wtf8_encoding()
|
||||
{
|
||||
std::string ascii("normal string");
|
||||
std::string utf8("Hyld\xc3\xbdpi \xc3\xbej\xc3\xb3\xc3\xb0""f\xc3\xa9lagsins vex \xc3\xbar k\xc3\xa6rkomnu b\xc3\xb6li \xc3\xad \xc3\xa1st");
|
||||
std::string cp1252("Hyld\xfdpi \xfej\xf3\xf0""f\xe9lagsins vex \xfar k\xe6rkomnu b\xf6li \xed \xe1st");
|
||||
std::string broken = utf8;
|
||||
broken[14] = 0x97;
|
||||
std::string mixed = ascii + utf8 + cp1252;
|
||||
std::string ascii("normal string");
|
||||
std::string utf8("Hyld\xc3\xbdpi \xc3\xbej\xc3\xb3\xc3\xb0""f\xc3\xa9lagsins vex \xc3\xbar k\xc3\xa6rkomnu b\xc3\xb6li \xc3\xad \xc3\xa1st");
|
||||
std::string cp1252("Hyld\xfdpi \xfej\xf3\xf0""f\xe9lagsins vex \xfar k\xe6rkomnu b\xf6li \xed \xe1st");
|
||||
std::string broken = utf8;
|
||||
broken[14] = 0x97;
|
||||
std::string mixed = ascii + utf8 + cp1252;
|
||||
|
||||
ASSERT_EQUALS(s3fs_wtf8_encode(ascii), ascii);
|
||||
ASSERT_EQUALS(s3fs_wtf8_decode(ascii), ascii);
|
||||
ASSERT_EQUALS(s3fs_wtf8_encode(utf8), utf8);
|
||||
ASSERT_EQUALS(s3fs_wtf8_decode(utf8), utf8);
|
||||
ASSERT_EQUALS(s3fs_wtf8_encode(ascii), ascii);
|
||||
ASSERT_EQUALS(s3fs_wtf8_decode(ascii), ascii);
|
||||
ASSERT_EQUALS(s3fs_wtf8_encode(utf8), utf8);
|
||||
ASSERT_EQUALS(s3fs_wtf8_decode(utf8), utf8);
|
||||
|
||||
ASSERT_NEQUALS(s3fs_wtf8_encode(cp1252), cp1252);
|
||||
ASSERT_EQUALS(s3fs_wtf8_decode(s3fs_wtf8_encode(cp1252)), cp1252);
|
||||
ASSERT_NEQUALS(s3fs_wtf8_encode(cp1252), cp1252);
|
||||
ASSERT_EQUALS(s3fs_wtf8_decode(s3fs_wtf8_encode(cp1252)), cp1252);
|
||||
|
||||
ASSERT_NEQUALS(s3fs_wtf8_encode(broken), broken);
|
||||
ASSERT_EQUALS(s3fs_wtf8_decode(s3fs_wtf8_encode(broken)), broken);
|
||||
ASSERT_NEQUALS(s3fs_wtf8_encode(broken), broken);
|
||||
ASSERT_EQUALS(s3fs_wtf8_decode(s3fs_wtf8_encode(broken)), broken);
|
||||
|
||||
ASSERT_NEQUALS(s3fs_wtf8_encode(mixed), mixed);
|
||||
ASSERT_EQUALS(s3fs_wtf8_decode(s3fs_wtf8_encode(mixed)), mixed);
|
||||
ASSERT_NEQUALS(s3fs_wtf8_encode(mixed), mixed);
|
||||
ASSERT_EQUALS(s3fs_wtf8_decode(s3fs_wtf8_encode(mixed)), mixed);
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
test_trim();
|
||||
test_base64();
|
||||
test_strtoofft();
|
||||
test_wtf8_encoding();
|
||||
return 0;
|
||||
test_trim();
|
||||
test_base64();
|
||||
test_strtoofft();
|
||||
test_wtf8_encoding();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 2
|
||||
* c-basic-offset: 2
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=2 ts=2 fdm=marker
|
||||
* vim<600: expandtab sw=2 ts=2
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
|
@ -18,81 +18,85 @@
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_TEST_UTIL_H_
|
||||
#define S3FS_TEST_UTIL_H_
|
||||
|
||||
#include <cstdlib>
|
||||
#include <iostream>
|
||||
#include <stdio.h>
|
||||
|
||||
template <typename T> void assert_equals(const T &x, const T &y, const char *file, int line)
|
||||
{
|
||||
if (x != y) {
|
||||
std::cerr << x << " != " << y << " at " << file << ":" << line << std::endl;
|
||||
std::cerr << std::endl;
|
||||
std::exit(1);
|
||||
}
|
||||
if (x != y) {
|
||||
std::cerr << x << " != " << y << " at " << file << ":" << line << std::endl;
|
||||
std::cerr << std::endl;
|
||||
std::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
template <> void assert_equals(const std::string &x, const std::string &y, const char *file, int line)
|
||||
{
|
||||
if (x != y) {
|
||||
std::cerr << x << " != " << y << " at " << file << ":" << line << std::endl;
|
||||
for (unsigned i=0; i<x.length(); i++)
|
||||
fprintf(stderr, "%02x ", (unsigned char)x[i]);
|
||||
std::cerr << std::endl;
|
||||
for (unsigned i=0; i<y.length(); i++)
|
||||
fprintf(stderr, "%02x ", (unsigned char)y[i]);
|
||||
std::cerr << std::endl;
|
||||
std::exit(1);
|
||||
}
|
||||
if (x != y) {
|
||||
std::cerr << x << " != " << y << " at " << file << ":" << line << std::endl;
|
||||
for (unsigned i=0; i<x.length(); i++) {
|
||||
fprintf(stderr, "%02x ", (unsigned char)x[i]);
|
||||
}
|
||||
std::cerr << std::endl;
|
||||
for (unsigned i=0; i<y.length(); i++) {
|
||||
fprintf(stderr, "%02x ", (unsigned char)y[i]);
|
||||
}
|
||||
std::cerr << std::endl;
|
||||
std::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template <typename T> void assert_nequals(const T &x, const T &y, const char *file, int line)
|
||||
{
|
||||
if (x == y) {
|
||||
std::cerr << x << " == " << y << " at " << file << ":" << line << std::endl;
|
||||
std::exit(1);
|
||||
}
|
||||
if (x == y) {
|
||||
std::cerr << x << " == " << y << " at " << file << ":" << line << std::endl;
|
||||
std::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
template <> void assert_nequals(const std::string &x, const std::string &y, const char *file, int line)
|
||||
{
|
||||
if (x == y) {
|
||||
std::cerr << x << " == " << y << " at " << file << ":" << line << std::endl;
|
||||
for (unsigned i=0; i<x.length(); i++)
|
||||
fprintf(stderr, "%02x ", (unsigned char)x[i]);
|
||||
std::cerr << std::endl;
|
||||
for (unsigned i=0; i<y.length(); i++)
|
||||
fprintf(stderr, "%02x ", (unsigned char)y[i]);
|
||||
std::cerr << std::endl;
|
||||
std::exit(1);
|
||||
}
|
||||
if (x == y) {
|
||||
std::cerr << x << " == " << y << " at " << file << ":" << line << std::endl;
|
||||
for (unsigned i=0; i<x.length(); i++) {
|
||||
fprintf(stderr, "%02x ", (unsigned char)x[i]);
|
||||
}
|
||||
std::cerr << std::endl;
|
||||
for (unsigned i=0; i<y.length(); i++) {
|
||||
fprintf(stderr, "%02x ", (unsigned char)y[i]);
|
||||
}
|
||||
std::cerr << std::endl;
|
||||
std::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
void assert_strequals(const char *x, const char *y, const char *file, int line)
|
||||
{
|
||||
if(x == NULL && y == NULL){
|
||||
return;
|
||||
return;
|
||||
// cppcheck-suppress nullPointerRedundantCheck
|
||||
} else if(x == NULL || y == NULL || strcmp(x, y) != 0){
|
||||
std::cerr << (x ? x : "null") << " != " << (y ? y : "null") << " at " << file << ":" << line << std::endl;
|
||||
std::exit(1);
|
||||
std::cerr << (x ? x : "null") << " != " << (y ? y : "null") << " at " << file << ":" << line << std::endl;
|
||||
std::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
#define ASSERT_EQUALS(x, y) \
|
||||
assert_equals((x), (y), __FILE__, __LINE__)
|
||||
#define ASSERT_EQUALS(x, y) assert_equals((x), (y), __FILE__, __LINE__)
|
||||
#define ASSERT_NEQUALS(x, y) assert_nequals((x), (y), __FILE__, __LINE__)
|
||||
#define ASSERT_STREQUALS(x, y) assert_strequals((x), (y), __FILE__, __LINE__)
|
||||
|
||||
#define ASSERT_NEQUALS(x, y) \
|
||||
assert_nequals((x), (y), __FILE__, __LINE__)
|
||||
|
||||
#define ASSERT_STREQUALS(x, y) \
|
||||
assert_strequals((x), (y), __FILE__, __LINE__)
|
||||
#endif // S3FS_TEST_UTIL_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 2
|
||||
* c-basic-offset: 2
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=2 ts=2 fdm=marker
|
||||
* vim<600: expandtab sw=2 ts=2
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
|
397
src/types.h
397
src/types.h
@ -21,167 +21,292 @@
|
||||
#ifndef S3FS_TYPES_H_
|
||||
#define S3FS_TYPES_H_
|
||||
|
||||
#include <cstring>
|
||||
#include <string>
|
||||
#include <map>
|
||||
#include <list>
|
||||
#include <vector>
|
||||
|
||||
//
|
||||
// For extended attribute
|
||||
// (HAVE_XXX symbols are defined in config.h)
|
||||
//
|
||||
#ifdef HAVE_SYS_EXTATTR_H
|
||||
#include <sys/extattr.h>
|
||||
#elif HAVE_ATTR_XATTR_H
|
||||
#include <attr/xattr.h>
|
||||
#elif HAVE_SYS_XATTR_H
|
||||
#include <sys/xattr.h>
|
||||
#endif
|
||||
|
||||
#if __cplusplus < 201103L
|
||||
#define OPERATOR_EXPLICIT
|
||||
#else
|
||||
#define OPERATOR_EXPLICIT explicit
|
||||
#endif
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// xattrs_t
|
||||
//-------------------------------------------------------------------
|
||||
//
|
||||
// Header "x-amz-meta-xattr" is for extended attributes.
|
||||
// This header is url encoded string which is json formatted.
|
||||
// x-amz-meta-xattr:urlencode({"xattr-1":"base64(value-1)","xattr-2":"base64(value-2)","xattr-3":"base64(value-3)"})
|
||||
//
|
||||
typedef struct xattr_value
|
||||
{
|
||||
unsigned char* pvalue;
|
||||
size_t length;
|
||||
|
||||
explicit xattr_value(unsigned char* pval = NULL, size_t len = 0) : pvalue(pval), length(len) {}
|
||||
~xattr_value()
|
||||
{
|
||||
delete[] pvalue;
|
||||
}
|
||||
}XATTRVAL, *PXATTRVAL;
|
||||
|
||||
typedef std::map<std::string, PXATTRVAL> xattrs_t;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// storage_class_t
|
||||
//-------------------------------------------------------------------
|
||||
class storage_class_t{
|
||||
public:
|
||||
enum Value{
|
||||
STANDARD,
|
||||
STANDARD_IA,
|
||||
ONEZONE_IA,
|
||||
REDUCED_REDUNDANCY,
|
||||
INTELLIGENT_TIERING,
|
||||
GLACIER,
|
||||
DEEP_ARCHIVE,
|
||||
UNKNOWN
|
||||
};
|
||||
public:
|
||||
enum Value{
|
||||
STANDARD,
|
||||
STANDARD_IA,
|
||||
ONEZONE_IA,
|
||||
REDUCED_REDUNDANCY,
|
||||
INTELLIGENT_TIERING,
|
||||
GLACIER,
|
||||
DEEP_ARCHIVE,
|
||||
UNKNOWN
|
||||
};
|
||||
|
||||
// cppcheck-suppress noExplicitConstructor
|
||||
storage_class_t(Value value) : value_(value) {}
|
||||
// cppcheck-suppress noExplicitConstructor
|
||||
storage_class_t(Value value) : value_(value) {}
|
||||
|
||||
operator Value() const { return value_; }
|
||||
operator Value() const { return value_; }
|
||||
|
||||
const char* str() const {
|
||||
switch(value_){
|
||||
case STANDARD:
|
||||
return "STANDARD";
|
||||
case STANDARD_IA:
|
||||
return "STANDARD_IA";
|
||||
case ONEZONE_IA:
|
||||
return "ONEZONE_IA";
|
||||
case REDUCED_REDUNDANCY:
|
||||
return "REDUCED_REDUNDANCY";
|
||||
case INTELLIGENT_TIERING:
|
||||
return "INTELLIGENT_TIERING";
|
||||
case GLACIER:
|
||||
return "GLACIER";
|
||||
case DEEP_ARCHIVE:
|
||||
return "DEEP_ARCHIVE";
|
||||
case UNKNOWN:
|
||||
return NULL;
|
||||
}
|
||||
abort();
|
||||
}
|
||||
const char* str() const
|
||||
{
|
||||
switch(value_){
|
||||
case STANDARD:
|
||||
return "STANDARD";
|
||||
case STANDARD_IA:
|
||||
return "STANDARD_IA";
|
||||
case ONEZONE_IA:
|
||||
return "ONEZONE_IA";
|
||||
case REDUCED_REDUNDANCY:
|
||||
return "REDUCED_REDUNDANCY";
|
||||
case INTELLIGENT_TIERING:
|
||||
return "INTELLIGENT_TIERING";
|
||||
case GLACIER:
|
||||
return "GLACIER";
|
||||
case DEEP_ARCHIVE:
|
||||
return "DEEP_ARCHIVE";
|
||||
case UNKNOWN:
|
||||
return NULL;
|
||||
}
|
||||
abort();
|
||||
}
|
||||
|
||||
static storage_class_t from_str(const char* str) {
|
||||
if(0 == strcmp(str, "standard")){
|
||||
return STANDARD;
|
||||
}else if(0 == strcmp(str, "standard_ia")){
|
||||
return STANDARD_IA;
|
||||
}else if(0 == strcmp(str, "onezone_ia")){
|
||||
return ONEZONE_IA;
|
||||
}else if(0 == strcmp(str, "reduced_redundancy")){
|
||||
return REDUCED_REDUNDANCY;
|
||||
}else if(0 == strcmp(str, "intelligent_tiering")){
|
||||
return INTELLIGENT_TIERING;
|
||||
}else if(0 == strcmp(str, "glacier")){
|
||||
return GLACIER;
|
||||
}else if(0 == strcmp(str, "deep_archive")){
|
||||
return DEEP_ARCHIVE;
|
||||
}else{
|
||||
return UNKNOWN;
|
||||
}
|
||||
}
|
||||
static storage_class_t from_str(const char* str)
|
||||
{
|
||||
if(0 == strcmp(str, "standard")){
|
||||
return STANDARD;
|
||||
}else if(0 == strcmp(str, "standard_ia")){
|
||||
return STANDARD_IA;
|
||||
}else if(0 == strcmp(str, "onezone_ia")){
|
||||
return ONEZONE_IA;
|
||||
}else if(0 == strcmp(str, "reduced_redundancy")){
|
||||
return REDUCED_REDUNDANCY;
|
||||
}else if(0 == strcmp(str, "intelligent_tiering")){
|
||||
return INTELLIGENT_TIERING;
|
||||
}else if(0 == strcmp(str, "glacier")){
|
||||
return GLACIER;
|
||||
}else if(0 == strcmp(str, "deep_archive")){
|
||||
return DEEP_ARCHIVE;
|
||||
}else{
|
||||
return UNKNOWN;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
explicit operator bool();
|
||||
Value value_;
|
||||
private:
|
||||
OPERATOR_EXPLICIT operator bool();
|
||||
Value value_;
|
||||
};
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// acl_t
|
||||
//-------------------------------------------------------------------
|
||||
class acl_t{
|
||||
public:
|
||||
enum Value{
|
||||
PRIVATE,
|
||||
PUBLIC_READ,
|
||||
PUBLIC_READ_WRITE,
|
||||
AWS_EXEC_READ,
|
||||
AUTHENTICATED_READ,
|
||||
BUCKET_OWNER_READ,
|
||||
BUCKET_OWNER_FULL_CONTROL,
|
||||
LOG_DELIVERY_WRITE,
|
||||
UNKNOWN
|
||||
};
|
||||
public:
|
||||
enum Value{
|
||||
PRIVATE,
|
||||
PUBLIC_READ,
|
||||
PUBLIC_READ_WRITE,
|
||||
AWS_EXEC_READ,
|
||||
AUTHENTICATED_READ,
|
||||
BUCKET_OWNER_READ,
|
||||
BUCKET_OWNER_FULL_CONTROL,
|
||||
LOG_DELIVERY_WRITE,
|
||||
UNKNOWN
|
||||
};
|
||||
|
||||
// cppcheck-suppress noExplicitConstructor
|
||||
acl_t(Value value) : value_(value) {}
|
||||
// cppcheck-suppress noExplicitConstructor
|
||||
acl_t(Value value) : value_(value) {}
|
||||
|
||||
operator Value() const { return value_; }
|
||||
operator Value() const { return value_; }
|
||||
|
||||
const char* str() const {
|
||||
switch(value_){
|
||||
case PRIVATE:
|
||||
return "private";
|
||||
case PUBLIC_READ:
|
||||
return "public-read";
|
||||
case PUBLIC_READ_WRITE:
|
||||
return "public-read-write";
|
||||
case AWS_EXEC_READ:
|
||||
return "aws-exec-read";
|
||||
case AUTHENTICATED_READ:
|
||||
return "authenticated-read";
|
||||
case BUCKET_OWNER_READ:
|
||||
return "bucket-owner-read";
|
||||
case BUCKET_OWNER_FULL_CONTROL:
|
||||
return "bucket-owner-full-control";
|
||||
case LOG_DELIVERY_WRITE:
|
||||
return "log-delivery-write";
|
||||
case UNKNOWN:
|
||||
return NULL;
|
||||
}
|
||||
abort();
|
||||
}
|
||||
const char* str() const
|
||||
{
|
||||
switch(value_){
|
||||
case PRIVATE:
|
||||
return "private";
|
||||
case PUBLIC_READ:
|
||||
return "public-read";
|
||||
case PUBLIC_READ_WRITE:
|
||||
return "public-read-write";
|
||||
case AWS_EXEC_READ:
|
||||
return "aws-exec-read";
|
||||
case AUTHENTICATED_READ:
|
||||
return "authenticated-read";
|
||||
case BUCKET_OWNER_READ:
|
||||
return "bucket-owner-read";
|
||||
case BUCKET_OWNER_FULL_CONTROL:
|
||||
return "bucket-owner-full-control";
|
||||
case LOG_DELIVERY_WRITE:
|
||||
return "log-delivery-write";
|
||||
case UNKNOWN:
|
||||
return NULL;
|
||||
}
|
||||
abort();
|
||||
}
|
||||
|
||||
static acl_t from_str(const char *acl) {
|
||||
if(0 == strcmp(acl, "private")){
|
||||
return PRIVATE;
|
||||
}else if(0 == strcmp(acl, "public-read")){
|
||||
return PUBLIC_READ;
|
||||
}else if(0 == strcmp(acl, "public-read-write")){
|
||||
return PUBLIC_READ_WRITE;
|
||||
}else if(0 == strcmp(acl, "aws-exec-read")){
|
||||
return AWS_EXEC_READ;
|
||||
}else if(0 == strcmp(acl, "authenticated-read")){
|
||||
return AUTHENTICATED_READ;
|
||||
}else if(0 == strcmp(acl, "bucket-owner-read")){
|
||||
return BUCKET_OWNER_READ;
|
||||
}else if(0 == strcmp(acl, "bucket-owner-full-control")){
|
||||
return BUCKET_OWNER_FULL_CONTROL;
|
||||
}else if(0 == strcmp(acl, "log-delivery-write")){
|
||||
return LOG_DELIVERY_WRITE;
|
||||
}else{
|
||||
return UNKNOWN;
|
||||
}
|
||||
}
|
||||
static acl_t from_str(const char *acl)
|
||||
{
|
||||
if(0 == strcmp(acl, "private")){
|
||||
return PRIVATE;
|
||||
}else if(0 == strcmp(acl, "public-read")){
|
||||
return PUBLIC_READ;
|
||||
}else if(0 == strcmp(acl, "public-read-write")){
|
||||
return PUBLIC_READ_WRITE;
|
||||
}else if(0 == strcmp(acl, "aws-exec-read")){
|
||||
return AWS_EXEC_READ;
|
||||
}else if(0 == strcmp(acl, "authenticated-read")){
|
||||
return AUTHENTICATED_READ;
|
||||
}else if(0 == strcmp(acl, "bucket-owner-read")){
|
||||
return BUCKET_OWNER_READ;
|
||||
}else if(0 == strcmp(acl, "bucket-owner-full-control")){
|
||||
return BUCKET_OWNER_FULL_CONTROL;
|
||||
}else if(0 == strcmp(acl, "log-delivery-write")){
|
||||
return LOG_DELIVERY_WRITE;
|
||||
}else{
|
||||
return UNKNOWN;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
explicit operator bool();
|
||||
Value value_;
|
||||
private:
|
||||
OPERATOR_EXPLICIT operator bool();
|
||||
Value value_;
|
||||
};
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// sse_type_t
|
||||
//-------------------------------------------------------------------
|
||||
class sse_type_t{
|
||||
public:
|
||||
enum Value{
|
||||
SSE_DISABLE = 0, // not use server side encrypting
|
||||
SSE_S3, // server side encrypting by S3 key
|
||||
SSE_C, // server side encrypting by custom key
|
||||
SSE_KMS // server side encrypting by kms id
|
||||
};
|
||||
public:
|
||||
enum Value{
|
||||
SSE_DISABLE = 0, // not use server side encrypting
|
||||
SSE_S3, // server side encrypting by S3 key
|
||||
SSE_C, // server side encrypting by custom key
|
||||
SSE_KMS // server side encrypting by kms id
|
||||
};
|
||||
|
||||
// cppcheck-suppress noExplicitConstructor
|
||||
sse_type_t(Value value) : value_(value) {}
|
||||
// cppcheck-suppress noExplicitConstructor
|
||||
sse_type_t(Value value) : value_(value) {}
|
||||
|
||||
operator Value() const { return value_; }
|
||||
operator Value() const { return value_; }
|
||||
|
||||
private:
|
||||
explicit operator bool();
|
||||
Value value_;
|
||||
private:
|
||||
//OPERATOR_EXPLICIT operator bool();
|
||||
Value value_;
|
||||
};
|
||||
|
||||
//----------------------------------------------
|
||||
// etaglist_t / filepart
|
||||
//----------------------------------------------
|
||||
typedef std::vector<std::string> etaglist_t;
|
||||
|
||||
//
|
||||
// Each part information for Multipart upload
|
||||
//
|
||||
struct filepart
|
||||
{
|
||||
bool uploaded; // does finish uploading
|
||||
std::string etag; // expected etag value
|
||||
int fd; // base file(temporary full file) descriptor
|
||||
off_t startpos; // seek fd point for uploading
|
||||
off_t size; // uploading size
|
||||
etaglist_t* etaglist; // use only parallel upload
|
||||
int etagpos; // use only parallel upload
|
||||
|
||||
filepart() : uploaded(false), fd(-1), startpos(0), size(-1), etaglist(NULL), etagpos(-1) {}
|
||||
~filepart()
|
||||
{
|
||||
clear();
|
||||
}
|
||||
|
||||
void clear(void)
|
||||
{
|
||||
uploaded = false;
|
||||
etag = "";
|
||||
fd = -1;
|
||||
startpos = 0;
|
||||
size = -1;
|
||||
etaglist = NULL;
|
||||
etagpos = - 1;
|
||||
}
|
||||
|
||||
void add_etag_list(etaglist_t* list)
|
||||
{
|
||||
if(list){
|
||||
list->push_back(std::string(""));
|
||||
etaglist = list;
|
||||
etagpos = list->size() - 1;
|
||||
}else{
|
||||
etaglist = NULL;
|
||||
etagpos = - 1;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// mimes_t
|
||||
//-------------------------------------------------------------------
|
||||
struct case_insensitive_compare_func
|
||||
{
|
||||
bool operator()(const std::string& a, const std::string& b) const {
|
||||
return strcasecmp(a.c_str(), b.c_str()) < 0;
|
||||
}
|
||||
};
|
||||
typedef std::map<std::string, std::string, case_insensitive_compare_func> mimes_t;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Typedefs specialized for use
|
||||
//-------------------------------------------------------------------
|
||||
typedef std::list<std::string> readline_t;
|
||||
typedef std::map<std::string, std::string> kvmap_t;
|
||||
typedef std::map<std::string, kvmap_t> bucketkvmap_t;
|
||||
|
||||
#endif // S3FS_TYPES_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 2
|
||||
* c-basic-offset: 2
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=2 ts=2 fdm=marker
|
||||
* vim<600: expandtab sw=2 ts=2
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
|
@ -17,14 +17,24 @@
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
######################################################################
|
||||
|
||||
TESTS=small-integration-test.sh
|
||||
|
||||
EXTRA_DIST = \
|
||||
integration-test-common.sh \
|
||||
require-root.sh \
|
||||
small-integration-test.sh \
|
||||
mergedir.sh \
|
||||
sample_delcache.sh \
|
||||
sample_ahbe.conf
|
||||
integration-test-common.sh \
|
||||
require-root.sh \
|
||||
small-integration-test.sh \
|
||||
mergedir.sh \
|
||||
sample_delcache.sh \
|
||||
sample_ahbe.conf
|
||||
|
||||
testdir = test
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: noet sw=4 ts=4 fdm=marker
|
||||
# vim<600: noet sw=4 ts=4
|
||||
#
|
||||
|
@ -1,4 +1,23 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# s3fs - FUSE-based file system backed by Amazon S3
|
||||
#
|
||||
# Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
|
||||
#
|
||||
# Common code for starting an s3fs-fuse mountpoint and an S3Proxy instance
|
||||
@ -257,3 +276,12 @@ function common_exit_handler {
|
||||
stop_s3proxy
|
||||
}
|
||||
trap common_exit_handler EXIT
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: noet sw=4 ts=4 fdm=marker
|
||||
# vim<600: noet sw=4 ts=4
|
||||
#
|
||||
|
@ -1,4 +1,23 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# s3fs - FUSE-based file system backed by Amazon S3
|
||||
#
|
||||
# Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
|
||||
set -o errexit
|
||||
set -o pipefail
|
||||
@ -1059,3 +1078,12 @@ function add_all_tests {
|
||||
init_suite
|
||||
add_all_tests
|
||||
run_suite
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: noet sw=4 ts=4 fdm=marker
|
||||
# vim<600: noet sw=4 ts=4
|
||||
#
|
||||
|
@ -1,4 +1,24 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# s3fs - FUSE-based file system backed by Amazon S3
|
||||
#
|
||||
# Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
|
||||
#
|
||||
# Merge old directory object to new.
|
||||
# For s3fs after v1.64
|
||||
@ -165,5 +185,10 @@ echo -n "# Finished : " >> $LOGFILE
|
||||
echo `date` >> $LOGFILE
|
||||
|
||||
#
|
||||
# END
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: noet sw=4 ts=4 fdm=marker
|
||||
# vim<600: noet sw=4 ts=4
|
||||
#
|
||||
|
@ -1,7 +1,35 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# s3fs - FUSE-based file system backed by Amazon S3
|
||||
#
|
||||
# Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
|
||||
if [[ $EUID -ne 0 ]]
|
||||
then
|
||||
echo "This test script must be run as root" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: noet sw=4 ts=4 fdm=marker
|
||||
# vim<600: noet sw=4 ts=4
|
||||
#
|
||||
|
@ -1,4 +1,23 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# s3fs - FUSE-based file system backed by Amazon S3
|
||||
#
|
||||
# Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
@ -27,3 +46,12 @@ make clean
|
||||
./configure CXXFLAGS='-O1 -g'
|
||||
make
|
||||
RETRIES=200 VALGRIND='--error-exitcode=1 --leak-check=full' make check -C test/
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: noet sw=4 ts=4 fdm=marker
|
||||
# vim<600: noet sw=4 ts=4
|
||||
#
|
||||
|
@ -1,4 +1,24 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# s3fs - FUSE-based file system backed by Amazon S3
|
||||
#
|
||||
# Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
|
||||
#
|
||||
# This is unsupport sample deleting cache files script.
|
||||
# So s3fs's local cache files(stats and objects) grow up,
|
||||
@ -12,29 +32,29 @@
|
||||
|
||||
func_usage()
|
||||
{
|
||||
echo ""
|
||||
echo "Usage: $1 <bucket name> <cache path> <limit size> [-silent]"
|
||||
echo " $1 -h"
|
||||
echo "Sample: $1 mybucket /tmp/s3fs/cache 1073741824"
|
||||
echo ""
|
||||
echo " bucket name = bucket name which specified s3fs option"
|
||||
echo " cache path = cache directory path which specified by"
|
||||
echo " use_cache s3fs option."
|
||||
echo " limit size = limit for total cache files size."
|
||||
echo " specify by BYTE"
|
||||
echo " -silent = silent mode"
|
||||
echo ""
|
||||
echo ""
|
||||
echo "Usage: $1 <bucket name> <cache path> <limit size> [-silent]"
|
||||
echo " $1 -h"
|
||||
echo "Sample: $1 mybucket /tmp/s3fs/cache 1073741824"
|
||||
echo ""
|
||||
echo " bucket name = bucket name which specified s3fs option"
|
||||
echo " cache path = cache directory path which specified by"
|
||||
echo " use_cache s3fs option."
|
||||
echo " limit size = limit for total cache files size."
|
||||
echo " specify by BYTE"
|
||||
echo " -silent = silent mode"
|
||||
echo ""
|
||||
}
|
||||
|
||||
PRGNAME=`basename $0`
|
||||
|
||||
if [ "X$1" = "X-h" -o "X$1" = "X-H" ]; then
|
||||
func_usage $PRGNAME
|
||||
exit 0
|
||||
func_usage $PRGNAME
|
||||
exit 0
|
||||
fi
|
||||
if [ "X$1" = "X" -o "X$2" = "X" -o "X$3" = "X" ]; then
|
||||
func_usage $PRGNAME
|
||||
exit 1
|
||||
func_usage $PRGNAME
|
||||
exit 1
|
||||
fi
|
||||
|
||||
BUCKET=$1
|
||||
@ -42,7 +62,7 @@ CDIR="$2"
|
||||
LIMIT=$3
|
||||
SILENT=0
|
||||
if [ "X$4" = "X-silent" ]; then
|
||||
SILENT=1
|
||||
SILENT=1
|
||||
fi
|
||||
FILES_CDIR="${CDIR}/${BUCKET}"
|
||||
STATS_CDIR="${CDIR}/.${BUCKET}.stat"
|
||||
@ -51,10 +71,10 @@ CURRENT_CACHE_SIZE=`du -sb "$FILES_CDIR" | awk '{print $1}'`
|
||||
# Check total size
|
||||
#
|
||||
if [ $LIMIT -ge $CURRENT_CACHE_SIZE ]; then
|
||||
if [ $SILENT -ne 1 ]; then
|
||||
echo "$FILES_CDIR ($CURRENT_CACHE_SIZE) is below allowed $LIMIT"
|
||||
fi
|
||||
exit 0
|
||||
if [ $SILENT -ne 1 ]; then
|
||||
echo "$FILES_CDIR ($CURRENT_CACHE_SIZE) is below allowed $LIMIT"
|
||||
fi
|
||||
exit 0
|
||||
fi
|
||||
|
||||
#
|
||||
@ -68,39 +88,44 @@ TMP_CFILE=""
|
||||
#
|
||||
find "$STATS_CDIR" -type f -exec stat -c "%X:%n" "{}" \; | sort | while read part
|
||||
do
|
||||
echo Looking at $part
|
||||
TMP_ATIME=`echo "$part" | cut -d: -f1`
|
||||
TMP_STATS="`echo "$part" | cut -d: -f2`"
|
||||
TMP_CFILE=`echo "$TMP_STATS" | sed s/\.$BUCKET\.stat/$BUCKET/`
|
||||
|
||||
if [ `stat -c %X "$TMP_STATS"` -eq $TMP_ATIME ]; then
|
||||
rm -f "$TMP_STATS" "$TMP_CFILE" > /dev/null 2>&1
|
||||
if [ $? -ne 0 ]; then
|
||||
if [ $SILENT -ne 1 ]; then
|
||||
echo "ERROR: Could not remove files($TMP_STATS,$TMP_CFILE)"
|
||||
fi
|
||||
exit 1
|
||||
else
|
||||
if [ $SILENT -ne 1 ]; then
|
||||
echo "remove file: $TMP_CFILE $TMP_STATS"
|
||||
fi
|
||||
echo Looking at $part
|
||||
TMP_ATIME=`echo "$part" | cut -d: -f1`
|
||||
TMP_STATS="`echo "$part" | cut -d: -f2`"
|
||||
TMP_CFILE=`echo "$TMP_STATS" | sed s/\.$BUCKET\.stat/$BUCKET/`
|
||||
|
||||
if [ `stat -c %X "$TMP_STATS"` -eq $TMP_ATIME ]; then
|
||||
rm -f "$TMP_STATS" "$TMP_CFILE" > /dev/null 2>&1
|
||||
if [ $? -ne 0 ]; then
|
||||
if [ $SILENT -ne 1 ]; then
|
||||
echo "ERROR: Could not remove files($TMP_STATS,$TMP_CFILE)"
|
||||
fi
|
||||
exit 1
|
||||
else
|
||||
if [ $SILENT -ne 1 ]; then
|
||||
echo "remove file: $TMP_CFILE $TMP_STATS"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
if [ $LIMIT -ge `du -sb "$FILES_CDIR" | awk '{print $1}'` ]; then
|
||||
if [ $SILENT -ne 1 ]; then
|
||||
echo "finish removing files"
|
||||
if [ $LIMIT -ge `du -sb "$FILES_CDIR" | awk '{print $1}'` ]; then
|
||||
if [ $SILENT -ne 1 ]; then
|
||||
echo "finish removing files"
|
||||
fi
|
||||
break
|
||||
fi
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [ $SILENT -ne 1 ]; then
|
||||
TOTAL_SIZE=`du -sb "$FILES_CDIR" | awk '{print $1}'`
|
||||
echo "Finish: $FILES_CDIR total size is $TOTAL_SIZE"
|
||||
TOTAL_SIZE=`du -sb "$FILES_CDIR" | awk '{print $1}'`
|
||||
echo "Finish: $FILES_CDIR total size is $TOTAL_SIZE"
|
||||
fi
|
||||
|
||||
exit 0
|
||||
|
||||
#
|
||||
# End
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: noet sw=4 ts=4 fdm=marker
|
||||
# vim<600: noet sw=4 ts=4
|
||||
#
|
||||
|
@ -1,4 +1,23 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# s3fs - FUSE-based file system backed by Amazon S3
|
||||
#
|
||||
# Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
|
||||
#
|
||||
# Test s3fs-fuse file system operations with
|
||||
@ -63,3 +82,12 @@ done
|
||||
stop_s3proxy
|
||||
|
||||
echo "$0: tests complete."
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: noet sw=4 ts=4 fdm=marker
|
||||
# vim<600: noet sw=4 ts=4
|
||||
#
|
||||
|
@ -1,4 +1,23 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# s3fs - FUSE-based file system backed by Amazon S3
|
||||
#
|
||||
# Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
|
||||
#### Test utils
|
||||
|
||||
@ -279,3 +298,12 @@ function aws_cli() {
|
||||
fi
|
||||
aws $* --endpoint-url "${S3_URL}" --no-verify-ssl $FLAGS
|
||||
}
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: noet sw=4 ts=4 fdm=marker
|
||||
# vim<600: noet sw=4 ts=4
|
||||
#
|
||||
|
@ -1,4 +1,23 @@
|
||||
#!/usr/bin/env python2
|
||||
#
|
||||
# s3fs - FUSE-based file system backed by Amazon S3
|
||||
#
|
||||
# Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
|
||||
import os
|
||||
import unittest
|
||||
@ -79,3 +98,11 @@ class OssfsUnitTest(unittest.TestCase):
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: noet sw=4 ts=4 fdm=marker
|
||||
# vim<600: noet sw=4 ts=4
|
||||
#
|
||||
|
@ -1,4 +1,23 @@
|
||||
#!/usr/bin/env python2
|
||||
#
|
||||
# s3fs - FUSE-based file system backed by Amazon S3
|
||||
#
|
||||
# Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
|
||||
import os
|
||||
import sys
|
||||
@ -16,3 +35,12 @@ try:
|
||||
os.write(fd, data)
|
||||
finally:
|
||||
os.close(fd)
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: noet sw=4 ts=4 fdm=marker
|
||||
# vim<600: noet sw=4 ts=4
|
||||
#
|
||||
|
Loading…
Reference in New Issue
Block a user