mirror of
https://github.com/s3fs-fuse/s3fs-fuse.git
synced 2025-01-22 13:28:25 +00:00
Changes codes
1) Changed codes about memory leak For memory leak, below codes are changed. * calls malloc_trim function * calls initializing NSS function, and adds configure option "--enable-nss-init". If libcurl is with NSS, s3fs initializes NSS manually. This initializing NSS is enabled by "--enable-nss-init" option at configure. if this option is specified, you need "nss-devel" package. * calls initializing libxml2(xmlInitParser). * BIO functions have memory leak, calls CRYPTO_free_ex_data. * changes cache structure. * changes cache out logic to LRU. * sets alignment for allcated memory in body data structure. * adds ssl session into share handle. and adds nosscache option. * deletes unused allocated memory.(bug) * changes defaule parallel count of head request in readdir (500->20) * fixes some bugs. git-svn-id: http://s3fs.googlecode.com/svn/trunk@482 df820570-a93a-0410-bd06-b72b767a4274
This commit is contained in:
parent
d45f4707ea
commit
42b74c9d2e
91
configure.ac
91
configure.ac
@ -1,9 +1,27 @@
|
||||
######################################################################
|
||||
# s3fs - FUSE-based file system backed by Amazon S3
|
||||
#
|
||||
# Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
######################################################################
|
||||
dnl Process this file with autoconf to produce a configure script.
|
||||
|
||||
AC_PREREQ(2.59)
|
||||
AC_INIT(s3fs, 1.73)
|
||||
|
||||
|
||||
AC_CANONICAL_SYSTEM
|
||||
AM_INIT_AUTOMAKE()
|
||||
|
||||
@ -13,6 +31,77 @@ CXXFLAGS="$CXXFLAGS -Wall -D_FILE_OFFSET_BITS=64"
|
||||
|
||||
PKG_CHECK_MODULES([DEPS], [fuse >= 2.8.4 libcurl >= 7.0 libxml-2.0 >= 2.6 libcrypto >= 0.9])
|
||||
|
||||
dnl malloc_trim function
|
||||
AC_CHECK_FUNCS(malloc_trim, , )
|
||||
|
||||
dnl Initializing NSS(temporally)
|
||||
AC_MSG_CHECKING([Initializing libcurl build with NSS])
|
||||
AC_ARG_ENABLE(
|
||||
nss-init,
|
||||
[
|
||||
AS_HELP_STRING(
|
||||
[--enable-nss-init],
|
||||
[Inilializing libcurl with NSS (default is no)]
|
||||
)
|
||||
],
|
||||
[
|
||||
case "${enableval}" in
|
||||
yes)
|
||||
AC_MSG_RESULT(yes)
|
||||
nss_init_enabled=yes
|
||||
;;
|
||||
*)
|
||||
AC_MSG_RESULT(no)
|
||||
nss_init_enabled=no
|
||||
;;
|
||||
esac
|
||||
],
|
||||
[
|
||||
AC_MSG_RESULT(no)
|
||||
nss_init_enabled=no
|
||||
])
|
||||
|
||||
AS_IF(
|
||||
[test $nss_init_enabled = yes],
|
||||
[
|
||||
AC_DEFINE(NSS_INIT_ENABLED, 1)
|
||||
AC_CHECK_LIB(nss3, NSS_NoDB_Init, , [AC_MSG_ERROR(not found NSS libraries)])
|
||||
AC_CHECK_LIB(plds4, PL_ArenaFinish, , [AC_MSG_ERROR(not found PL_ArenaFinish)])
|
||||
AC_CHECK_LIB(nspr4, PR_Cleanup, , [AC_MSG_ERROR(not found PR_Cleanup)])
|
||||
AC_CHECK_HEADER(nss.h, , [AC_MSG_ERROR(not found nss.h)])
|
||||
AC_CHECK_HEADER(nspr4/prinit.h, , [AC_MSG_ERROR(not found prinit.h)])
|
||||
AC_PATH_PROG(NSSCONFIG, [nss-config], no)
|
||||
AS_IF(
|
||||
[test $NSSCONFIG = no],
|
||||
[
|
||||
DEPS_CFLAGS="$DEPS_CFLAGS -I/usr/include/nss3"
|
||||
DEPS_LIBS="$DEPS_LIBS -lnss3"
|
||||
],
|
||||
[
|
||||
addcflags=`nss-config --cflags`
|
||||
DEPS_CFLAGS="$DEPS_CFLAGS $addcflags"
|
||||
dnl addlib=`nss-config --libs`
|
||||
dnl DEPS_LIBS="$DEPS_LIBS $addlib"
|
||||
DEPS_LIBS="$DEPS_LIBS -lnss3"
|
||||
])
|
||||
AC_PATH_PROG(NSPRCONFIG, [nspr-config], no)
|
||||
AS_IF(
|
||||
[test $NSPRCONFIG = no],
|
||||
[
|
||||
DEPS_CFLAGS="$DEPS_CFLAGS -I/usr/include/nspr4"
|
||||
DEPS_LIBS="$DEPS_LIBS -lnspr4 -lplds4"
|
||||
],
|
||||
[
|
||||
addcflags=`nspr-config --cflags`
|
||||
DEPS_CFLAGS="$DEPS_CFLAGS $addcflags"
|
||||
dnl addlib=`nspr-config --libs`
|
||||
dnl DEPS_LIBS="$DEPS_LIBS $addlib"
|
||||
DEPS_LIBS="$DEPS_LIBS -lnspr4 -lplds4"
|
||||
])
|
||||
])
|
||||
|
||||
AS_UNSET(nss_enabled)
|
||||
|
||||
AC_CONFIG_FILES(Makefile src/Makefile test/Makefile doc/Makefile)
|
||||
AC_OUTPUT
|
||||
|
||||
|
@ -117,7 +117,10 @@ You can specify this option for performance, s3fs memorizes in stat cache that t
|
||||
\fB\-o\fR nodnscache - disable dns cache.
|
||||
s3fs is always using dns cache, this option make dns cache disable.
|
||||
.TP
|
||||
\fB\-o\fR multireq_max (default="500")
|
||||
\fB\-o\fR nosscache - disable ssl session cache.
|
||||
s3fs is always using ssl session cache, this option make ssl session cache disable.
|
||||
.TP
|
||||
\fB\-o\fR multireq_max (default="20")
|
||||
maximum number of parallel request for listing objects.
|
||||
.TP
|
||||
\fB\-o\fR parallel_count (default="5")
|
||||
|
180
src/cache.cpp
180
src/cache.cpp
@ -33,6 +33,7 @@
|
||||
#include <list>
|
||||
|
||||
#include "cache.h"
|
||||
#include "s3fs.h"
|
||||
#include "s3fs_util.h"
|
||||
|
||||
using namespace std;
|
||||
@ -40,28 +41,26 @@ using namespace std;
|
||||
//-------------------------------------------------------------------
|
||||
// Static
|
||||
//-------------------------------------------------------------------
|
||||
StatCache StatCache::singleton;
|
||||
StatCache StatCache::singleton;
|
||||
pthread_mutex_t StatCache::stat_cache_lock;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Constructor/Destructor
|
||||
//-------------------------------------------------------------------
|
||||
StatCache::StatCache()
|
||||
StatCache::StatCache() : IsExpireTime(false), ExpireTime(0), CacheSize(1000), IsCacheNoObject(false)
|
||||
{
|
||||
if(this == StatCache::getStatCacheData()){
|
||||
stat_cache.clear();
|
||||
pthread_mutex_init(&(StatCache::stat_cache_lock), NULL);
|
||||
}else{
|
||||
assert(false);
|
||||
}
|
||||
CacheSize = 1000;
|
||||
ExpireTime = 0;
|
||||
IsExpireTime = false;
|
||||
IsCacheNoObject = false;
|
||||
}
|
||||
|
||||
StatCache::~StatCache()
|
||||
{
|
||||
if(this == StatCache::getStatCacheData()){
|
||||
Clear();
|
||||
pthread_mutex_destroy(&(StatCache::stat_cache_lock));
|
||||
}else{
|
||||
assert(false);
|
||||
@ -111,6 +110,20 @@ bool StatCache::SetCacheNoObject(bool flag)
|
||||
return old;
|
||||
}
|
||||
|
||||
void StatCache::Clear(void)
|
||||
{
|
||||
pthread_mutex_lock(&StatCache::stat_cache_lock);
|
||||
|
||||
for(stat_cache_t::iterator iter = stat_cache.begin(); iter != stat_cache.end(); stat_cache.erase(iter++)){
|
||||
if((*iter).second){
|
||||
delete (*iter).second;
|
||||
}
|
||||
}
|
||||
S3FS_MALLOCTRIM(0);
|
||||
|
||||
pthread_mutex_unlock(&StatCache::stat_cache_lock);
|
||||
}
|
||||
|
||||
bool StatCache::GetStat(string& key, struct stat* pst, headers_t* meta, bool overcheck, const char* petag, bool* pisforce)
|
||||
{
|
||||
bool is_delete_cache = false;
|
||||
@ -128,9 +141,10 @@ bool StatCache::GetStat(string& key, struct stat* pst, headers_t* meta, bool ove
|
||||
iter = stat_cache.find(strpath.c_str());
|
||||
}
|
||||
|
||||
if(iter != stat_cache.end()) {
|
||||
if(!IsExpireTime|| ((*iter).second.cache_date + ExpireTime) >= time(NULL)){
|
||||
if((*iter).second.noobjcache){
|
||||
if(iter != stat_cache.end() && (*iter).second){
|
||||
stat_cache_entry* ent = (*iter).second;
|
||||
if(!IsExpireTime|| (ent->cache_date + ExpireTime) >= time(NULL)){
|
||||
if(ent->noobjcache){
|
||||
pthread_mutex_unlock(&StatCache::stat_cache_lock);
|
||||
if(!IsCacheNoObject){
|
||||
// need to delete this cache.
|
||||
@ -142,7 +156,7 @@ bool StatCache::GetStat(string& key, struct stat* pst, headers_t* meta, bool ove
|
||||
}
|
||||
// hit without checking etag
|
||||
if(petag){
|
||||
string stretag = (*iter).second.meta["ETag"];
|
||||
string stretag = ent->meta["ETag"];
|
||||
if('\0' != petag[0] && 0 != strcmp(petag, stretag.c_str())){
|
||||
is_delete_cache = true;
|
||||
}
|
||||
@ -150,24 +164,22 @@ bool StatCache::GetStat(string& key, struct stat* pst, headers_t* meta, bool ove
|
||||
if(is_delete_cache){
|
||||
// not hit by different ETag
|
||||
DPRNNN("stat cache not hit by ETag[path=%s][time=%jd][hit count=%lu][ETag(%s)!=(%s)]",
|
||||
strpath.c_str(), (intmax_t)((*iter).second.cache_date), (*iter).second.hit_count,
|
||||
petag ? petag : "null", (*iter).second.meta["ETag"].c_str());
|
||||
strpath.c_str(), (intmax_t)(ent->cache_date), ent->hit_count, petag ? petag : "null", ent->meta["ETag"].c_str());
|
||||
}else{
|
||||
// hit
|
||||
DPRNNN("stat cache hit [path=%s][time=%jd][hit count=%lu]",
|
||||
strpath.c_str(), (intmax_t)((*iter).second.cache_date), (*iter).second.hit_count);
|
||||
DPRNNN("stat cache hit [path=%s][time=%jd][hit count=%lu]", strpath.c_str(), (intmax_t)(ent->cache_date), ent->hit_count);
|
||||
|
||||
if(pst!= NULL){
|
||||
*pst= (*iter).second.stbuf;
|
||||
*pst= ent->stbuf;
|
||||
}
|
||||
if(meta != NULL){
|
||||
meta->clear();
|
||||
(*meta) = (*iter).second.meta;
|
||||
*meta = ent->meta;
|
||||
}
|
||||
if(pisforce != NULL){
|
||||
(*pisforce) = (*iter).second.isforce;
|
||||
(*pisforce) = ent->isforce;
|
||||
}
|
||||
(*iter).second.hit_count++;
|
||||
ent->hit_count++;
|
||||
ent->cache_date = time(NULL);
|
||||
pthread_mutex_unlock(&StatCache::stat_cache_lock);
|
||||
return true;
|
||||
}
|
||||
@ -206,10 +218,11 @@ bool StatCache::IsNoObjectCache(string& key, bool overcheck)
|
||||
iter = stat_cache.find(strpath.c_str());
|
||||
}
|
||||
|
||||
if(iter != stat_cache.end()) {
|
||||
if(!IsExpireTime|| ((*iter).second.cache_date + ExpireTime) >= time(NULL)){
|
||||
if((*iter).second.noobjcache){
|
||||
if(iter != stat_cache.end() && (*iter).second) {
|
||||
if(!IsExpireTime|| ((*iter).second->cache_date + ExpireTime) >= time(NULL)){
|
||||
if((*iter).second->noobjcache){
|
||||
// noobjcache = true means no object.
|
||||
(*iter).second->cache_date = time(NULL);
|
||||
pthread_mutex_unlock(&StatCache::stat_cache_lock);
|
||||
return true;
|
||||
}
|
||||
@ -233,46 +246,52 @@ bool StatCache::AddStat(std::string& key, headers_t& meta, bool forcedir)
|
||||
}
|
||||
DPRNNN("add stat cache entry[path=%s]", key.c_str());
|
||||
|
||||
if(stat_cache.size() > CacheSize){
|
||||
if(!TruncateCache()){
|
||||
return false;
|
||||
if(stat_cache.end() != stat_cache.find(key)){
|
||||
DelStat(key.c_str());
|
||||
}else{
|
||||
if(stat_cache.size() > CacheSize){
|
||||
if(!TruncateCache()){
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct stat st;
|
||||
if(!convert_header_to_stat(key.c_str(), meta, &st, forcedir)){
|
||||
// make new
|
||||
stat_cache_entry* ent = new stat_cache_entry();
|
||||
if(!convert_header_to_stat(key.c_str(), meta, &(ent->stbuf), forcedir)){
|
||||
delete ent;
|
||||
return false;
|
||||
}
|
||||
|
||||
pthread_mutex_lock(&StatCache::stat_cache_lock);
|
||||
stat_cache[key].stbuf = st;
|
||||
stat_cache[key].hit_count = 0;
|
||||
stat_cache[key].cache_date = time(NULL); // Set time.
|
||||
stat_cache[key].isforce = forcedir;
|
||||
stat_cache[key].noobjcache = false;
|
||||
|
||||
ent->hit_count = 0;
|
||||
ent->cache_date = time(NULL); // Set time.
|
||||
ent->isforce = forcedir;
|
||||
ent->noobjcache = false;
|
||||
ent->meta.clear();
|
||||
//copy only some keys
|
||||
for (headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter) {
|
||||
for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){
|
||||
string tag = (*iter).first;
|
||||
string value = (*iter).second;
|
||||
if(tag == "Content-Type"){
|
||||
stat_cache[key].meta[tag] = value;
|
||||
ent->meta[tag] = value;
|
||||
}else if(tag == "Content-Length"){
|
||||
stat_cache[key].meta[tag] = value;
|
||||
ent->meta[tag] = value;
|
||||
}else if(tag == "ETag"){
|
||||
stat_cache[key].meta[tag] = value;
|
||||
ent->meta[tag] = value;
|
||||
}else if(tag == "Last-Modified"){
|
||||
stat_cache[key].meta[tag] = value;
|
||||
ent->meta[tag] = value;
|
||||
}else if(tag.substr(0, 5) == "x-amz"){
|
||||
stat_cache[key].meta[tag] = value;
|
||||
ent->meta[tag] = value;
|
||||
}else{
|
||||
// Check for upper case
|
||||
transform(tag.begin(), tag.end(), tag.begin(), static_cast<int (*)(int)>(std::tolower));
|
||||
if(tag.substr(0, 5) == "x-amz"){
|
||||
stat_cache[key].meta[tag] = value;
|
||||
ent->meta[tag] = value;
|
||||
}
|
||||
}
|
||||
}
|
||||
// add
|
||||
pthread_mutex_lock(&StatCache::stat_cache_lock);
|
||||
stat_cache[key] = ent;
|
||||
pthread_mutex_unlock(&StatCache::stat_cache_lock);
|
||||
|
||||
return true;
|
||||
@ -288,21 +307,27 @@ bool StatCache::AddNoObjectCache(string& key)
|
||||
}
|
||||
DPRNNN("add no object cache entry[path=%s]", key.c_str());
|
||||
|
||||
if(stat_cache.size() > CacheSize){
|
||||
if(!TruncateCache()){
|
||||
return false;
|
||||
if(stat_cache.end() != stat_cache.find(key)){
|
||||
DelStat(key.c_str());
|
||||
}else{
|
||||
if(stat_cache.size() > CacheSize){
|
||||
if(!TruncateCache()){
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct stat st;
|
||||
memset(&st, 0, sizeof(struct stat));
|
||||
|
||||
// make new
|
||||
stat_cache_entry* ent = new stat_cache_entry();
|
||||
memset(&(ent->stbuf), 0, sizeof(struct stat));
|
||||
ent->hit_count = 0;
|
||||
ent->cache_date = time(NULL); // Set time.
|
||||
ent->isforce = false;
|
||||
ent->noobjcache = true;
|
||||
ent->meta.clear();
|
||||
// add
|
||||
pthread_mutex_lock(&StatCache::stat_cache_lock);
|
||||
stat_cache[key].stbuf = st;
|
||||
stat_cache[key].hit_count = 0;
|
||||
stat_cache[key].cache_date = time(NULL); // Set time.
|
||||
stat_cache[key].isforce = false;
|
||||
stat_cache[key].noobjcache = true;
|
||||
stat_cache[key] = ent;
|
||||
pthread_mutex_unlock(&StatCache::stat_cache_lock);
|
||||
|
||||
return true;
|
||||
@ -310,25 +335,34 @@ bool StatCache::AddNoObjectCache(string& key)
|
||||
|
||||
bool StatCache::TruncateCache(void)
|
||||
{
|
||||
string path_to_delete;
|
||||
unsigned int lowest_hit_count = 0;
|
||||
if(0 == stat_cache.size()){
|
||||
return true;
|
||||
}
|
||||
|
||||
pthread_mutex_lock(&StatCache::stat_cache_lock);
|
||||
|
||||
time_t lowest_time = time(NULL) + 1;
|
||||
stat_cache_t::iterator iter_to_delete = stat_cache.end();
|
||||
stat_cache_t::iterator iter;
|
||||
|
||||
for(iter = stat_cache.begin(); iter != stat_cache.end(); iter++) {
|
||||
if(!lowest_hit_count) {
|
||||
lowest_hit_count = (*iter).second.hit_count;
|
||||
path_to_delete = (*iter).first;
|
||||
}
|
||||
if(lowest_hit_count > (*iter).second.hit_count){
|
||||
lowest_hit_count = (*iter).second.hit_count;
|
||||
path_to_delete = (*iter).first;
|
||||
if((*iter).second){
|
||||
if(lowest_time > (*iter).second->cache_date){
|
||||
lowest_time = (*iter).second->cache_date;
|
||||
iter_to_delete = iter;
|
||||
}
|
||||
}
|
||||
}
|
||||
stat_cache.erase(path_to_delete);
|
||||
pthread_mutex_unlock(&StatCache::stat_cache_lock);
|
||||
if(stat_cache.end() != iter_to_delete){
|
||||
DPRNNN("truncate stat cache[path=%s]", (*iter_to_delete).first.c_str());
|
||||
if((*iter_to_delete).second){
|
||||
delete (*iter_to_delete).second;
|
||||
}
|
||||
stat_cache.erase(iter_to_delete);
|
||||
S3FS_MALLOCTRIM(0);
|
||||
}
|
||||
|
||||
DPRNNN("truncate stat cache[path=%s]", path_to_delete.c_str());
|
||||
pthread_mutex_unlock(&StatCache::stat_cache_lock);
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -341,8 +375,12 @@ bool StatCache::DelStat(const char* key)
|
||||
DPRNNN("delete stat cache entry[path=%s]", key);
|
||||
|
||||
pthread_mutex_lock(&StatCache::stat_cache_lock);
|
||||
stat_cache_t::iterator iter = stat_cache.find(key);
|
||||
if(iter != stat_cache.end()){
|
||||
|
||||
stat_cache_t::iterator iter;
|
||||
if(stat_cache.end() != (iter = stat_cache.find(string(key)))){
|
||||
if((*iter).second){
|
||||
delete (*iter).second;
|
||||
}
|
||||
stat_cache.erase(iter);
|
||||
}
|
||||
if(0 < strlen(key) && 0 != strcmp(key, "/")){
|
||||
@ -354,11 +392,15 @@ bool StatCache::DelStat(const char* key)
|
||||
// If there is "path/" cache, delete it.
|
||||
strpath += "/";
|
||||
}
|
||||
iter = stat_cache.find(strpath.c_str());
|
||||
if(iter != stat_cache.end()){
|
||||
if(stat_cache.end() != (iter = stat_cache.find(strpath.c_str()))){
|
||||
if((*iter).second){
|
||||
delete (*iter).second;
|
||||
}
|
||||
stat_cache.erase(iter);
|
||||
}
|
||||
}
|
||||
S3FS_MALLOCTRIM(0);
|
||||
|
||||
pthread_mutex_unlock(&StatCache::stat_cache_lock);
|
||||
|
||||
return true;
|
||||
|
13
src/cache.h
13
src/cache.h
@ -20,7 +20,7 @@ struct stat_cache_entry {
|
||||
}
|
||||
};
|
||||
|
||||
typedef std::map<std::string, struct stat_cache_entry> stat_cache_t; // key=path
|
||||
typedef std::map<std::string, stat_cache_entry*> stat_cache_t; // key=path
|
||||
|
||||
//
|
||||
// Class
|
||||
@ -28,15 +28,16 @@ typedef std::map<std::string, struct stat_cache_entry> stat_cache_t; // key=path
|
||||
class StatCache
|
||||
{
|
||||
private:
|
||||
static StatCache singleton;
|
||||
static StatCache singleton;
|
||||
static pthread_mutex_t stat_cache_lock;
|
||||
stat_cache_t stat_cache;
|
||||
bool IsExpireTime;
|
||||
time_t ExpireTime;
|
||||
stat_cache_t stat_cache;
|
||||
bool IsExpireTime;
|
||||
time_t ExpireTime;
|
||||
unsigned long CacheSize;
|
||||
bool IsCacheNoObject;
|
||||
bool IsCacheNoObject;
|
||||
|
||||
private:
|
||||
void Clear(void);
|
||||
bool GetStat(std::string& key, struct stat* pst, headers_t* meta, bool overcheck, const char* petag, bool* pisforce);
|
||||
// Truncate stat cache
|
||||
bool TruncateCache(void);
|
||||
|
302
src/curl.cpp
302
src/curl.cpp
@ -61,14 +61,17 @@ using namespace std;
|
||||
#define BODYDATA_RESIZE_APPEND_MIN (1 * 1024) // 1KB
|
||||
#define BODYDATA_RESIZE_APPEND_MID (1 * 1024 * 1024) // 1MB
|
||||
#define BODYDATA_RESIZE_APPEND_MAX (10 * 1024 * 1024) // 10MB
|
||||
#define AJUST_BLOCK(bytes, block) (((bytes / block) + ((bytes % block) ? 1 : 0)) * block)
|
||||
|
||||
bool BodyData::Resize(size_t addbytes)
|
||||
{
|
||||
if(IsSafeSize(addbytes)){
|
||||
return true;
|
||||
}
|
||||
|
||||
// New size
|
||||
size_t need_size = (lastpos + addbytes + 1) - bufsize;
|
||||
size_t need_size = AJUST_BLOCK((lastpos + addbytes + 1) - bufsize, sizeof(off_t));
|
||||
|
||||
if(BODYDATA_RESIZE_APPEND_MAX < bufsize){
|
||||
need_size = (BODYDATA_RESIZE_APPEND_MAX < need_size ? need_size : BODYDATA_RESIZE_APPEND_MAX);
|
||||
}else if(BODYDATA_RESIZE_APPEND_MID < bufsize){
|
||||
@ -79,11 +82,16 @@ bool BodyData::Resize(size_t addbytes)
|
||||
need_size = (BODYDATA_RESIZE_APPEND_MIN < need_size ? need_size : BODYDATA_RESIZE_APPEND_MIN);
|
||||
}
|
||||
// realloc
|
||||
if(NULL == (text = (char*)realloc(text, (bufsize + need_size)))){
|
||||
char* newtext;
|
||||
if(NULL == (newtext = (char*)realloc(text, (bufsize + need_size)))){
|
||||
DPRNCRIT("not enough memory (realloc returned NULL)");
|
||||
free(text);
|
||||
text = NULL;
|
||||
return false;
|
||||
}
|
||||
text = newtext;
|
||||
bufsize += need_size;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -131,11 +139,12 @@ const char* BodyData::str(void) const
|
||||
#define MAX_MULTI_COPY_SOURCE_SIZE 524288000 // 500MB
|
||||
|
||||
pthread_mutex_t S3fsCurl::curl_handles_lock;
|
||||
pthread_mutex_t S3fsCurl::curl_share_lock;
|
||||
pthread_mutex_t S3fsCurl::curl_share_lock[SHARE_MUTEX_MAX];
|
||||
pthread_mutex_t* S3fsCurl::crypt_mutex = NULL;
|
||||
bool S3fsCurl::is_initglobal_done = false;
|
||||
CURLSH* S3fsCurl::hCurlShare = NULL;
|
||||
bool S3fsCurl::is_dns_cache = true; // default
|
||||
bool S3fsCurl::is_ssl_session_cache= true; // default
|
||||
long S3fsCurl::connect_timeout = 10; // default
|
||||
time_t S3fsCurl::readwrite_timeout = 30; // default
|
||||
int S3fsCurl::retries = 3; // default
|
||||
@ -163,7 +172,10 @@ bool S3fsCurl::InitS3fsCurl(const char* MimeFile)
|
||||
if(0 != pthread_mutex_init(&S3fsCurl::curl_handles_lock, NULL)){
|
||||
return false;
|
||||
}
|
||||
if(0 != pthread_mutex_init(&S3fsCurl::curl_share_lock, NULL)){
|
||||
if(0 != pthread_mutex_init(&S3fsCurl::curl_share_lock[SHARE_MUTEX_DNS], NULL)){
|
||||
return false;
|
||||
}
|
||||
if(0 != pthread_mutex_init(&S3fsCurl::curl_share_lock[SHARE_MUTEX_SSL_SESSION], NULL)){
|
||||
return false;
|
||||
}
|
||||
if(!S3fsCurl::InitMimeType(MimeFile)){
|
||||
@ -186,15 +198,18 @@ bool S3fsCurl::DestroyS3fsCurl(void)
|
||||
int result = true;
|
||||
|
||||
if(!S3fsCurl::DestroyCryptMutex()){
|
||||
return false;
|
||||
result = false;
|
||||
}
|
||||
if(!S3fsCurl::DestroyShareCurl()){
|
||||
return false;
|
||||
result = false;
|
||||
}
|
||||
if(!S3fsCurl::DestroyGlobalCurl()){
|
||||
return false;
|
||||
result = false;
|
||||
}
|
||||
if(0 != pthread_mutex_destroy(&S3fsCurl::curl_share_lock)){
|
||||
if(0 != pthread_mutex_destroy(&S3fsCurl::curl_share_lock[SHARE_MUTEX_DNS])){
|
||||
result = false;
|
||||
}
|
||||
if(0 != pthread_mutex_destroy(&S3fsCurl::curl_share_lock[SHARE_MUTEX_SSL_SESSION])){
|
||||
result = false;
|
||||
}
|
||||
if(0 != pthread_mutex_destroy(&S3fsCurl::curl_handles_lock)){
|
||||
@ -230,12 +245,9 @@ bool S3fsCurl::InitShareCurl(void)
|
||||
{
|
||||
CURLSHcode nSHCode;
|
||||
|
||||
if(!S3fsCurl::is_dns_cache){
|
||||
return false;
|
||||
}
|
||||
if(!S3fsCurl::is_initglobal_done){
|
||||
DPRN("could not initialize global curl.");
|
||||
return false;
|
||||
if(!S3fsCurl::is_dns_cache && !S3fsCurl::is_ssl_session_cache){
|
||||
DPRN("Curl does not share DNS data.");
|
||||
return true;
|
||||
}
|
||||
if(S3fsCurl::hCurlShare){
|
||||
DPRN("already initiated.");
|
||||
@ -253,11 +265,19 @@ bool S3fsCurl::InitShareCurl(void)
|
||||
DPRN("curl_share_setopt(UNLOCKFUNC) returns %d(%s)", nSHCode, curl_share_strerror(nSHCode));
|
||||
return false;
|
||||
}
|
||||
if(CURLSHE_OK != (nSHCode = curl_share_setopt(S3fsCurl::hCurlShare, CURLSHOPT_SHARE, CURL_LOCK_DATA_DNS))){
|
||||
DPRN("curl_share_setopt(DNS) returns %d(%s)", nSHCode, curl_share_strerror(nSHCode));
|
||||
return false;
|
||||
if(!S3fsCurl::is_dns_cache){
|
||||
if(CURLSHE_OK != (nSHCode = curl_share_setopt(S3fsCurl::hCurlShare, CURLSHOPT_SHARE, CURL_LOCK_DATA_DNS))){
|
||||
DPRN("curl_share_setopt(DNS) returns %d(%s)", nSHCode, curl_share_strerror(nSHCode));
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if(CURLSHE_OK != (nSHCode = curl_share_setopt(S3fsCurl::hCurlShare, CURLSHOPT_USERDATA, (void*)&S3fsCurl::curl_share_lock))){
|
||||
if(!S3fsCurl::is_ssl_session_cache){
|
||||
if(CURLSHE_OK != (nSHCode = curl_share_setopt(S3fsCurl::hCurlShare, CURLSHOPT_SHARE, CURL_LOCK_DATA_SSL_SESSION))){
|
||||
DPRN("curl_share_setopt(SSL SESSION) returns %d(%s)", nSHCode, curl_share_strerror(nSHCode));
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if(CURLSHE_OK != (nSHCode = curl_share_setopt(S3fsCurl::hCurlShare, CURLSHOPT_USERDATA, (void*)&S3fsCurl::curl_share_lock[0]))){
|
||||
DPRN("curl_share_setopt(USERDATA) returns %d(%s)", nSHCode, curl_share_strerror(nSHCode));
|
||||
return false;
|
||||
}
|
||||
@ -266,14 +286,11 @@ bool S3fsCurl::InitShareCurl(void)
|
||||
|
||||
bool S3fsCurl::DestroyShareCurl(void)
|
||||
{
|
||||
if(!S3fsCurl::is_initglobal_done){
|
||||
DPRN("already destroy global curl.");
|
||||
return false;
|
||||
}
|
||||
if(!S3fsCurl::hCurlShare){
|
||||
if(S3fsCurl::is_dns_cache){
|
||||
DPRN("already destroy share curl.");
|
||||
if(!S3fsCurl::is_dns_cache && !S3fsCurl::is_ssl_session_cache){
|
||||
return true;
|
||||
}
|
||||
DPRN("already destroy share curl.");
|
||||
return false;
|
||||
}
|
||||
if(CURLSHE_OK != curl_share_cleanup(S3fsCurl::hCurlShare)){
|
||||
@ -285,17 +302,27 @@ bool S3fsCurl::DestroyShareCurl(void)
|
||||
|
||||
void S3fsCurl::LockCurlShare(CURL* handle, curl_lock_data nLockData, curl_lock_access laccess, void* useptr)
|
||||
{
|
||||
if(hCurlShare && useptr && CURL_LOCK_DATA_DNS == nLockData){
|
||||
pthread_mutex_t* lockmutex = static_cast<pthread_mutex_t*>(useptr);
|
||||
pthread_mutex_lock(lockmutex);
|
||||
if(!hCurlShare){
|
||||
return;
|
||||
}
|
||||
pthread_mutex_t* lockmutex = static_cast<pthread_mutex_t*>(useptr);
|
||||
if(CURL_LOCK_DATA_DNS == nLockData){
|
||||
pthread_mutex_lock(&lockmutex[SHARE_MUTEX_DNS]);
|
||||
}else if(CURL_LOCK_DATA_SSL_SESSION == nLockData){
|
||||
pthread_mutex_lock(&lockmutex[SHARE_MUTEX_SSL_SESSION]);
|
||||
}
|
||||
}
|
||||
|
||||
void S3fsCurl::UnlockCurlShare(CURL* handle, curl_lock_data nLockData, void* useptr)
|
||||
{
|
||||
if(hCurlShare && useptr && CURL_LOCK_DATA_DNS == nLockData){
|
||||
pthread_mutex_t* lockmutex = static_cast<pthread_mutex_t*>(useptr);
|
||||
pthread_mutex_unlock(lockmutex);
|
||||
if(!hCurlShare){
|
||||
return;
|
||||
}
|
||||
pthread_mutex_t* lockmutex = static_cast<pthread_mutex_t*>(useptr);
|
||||
if(CURL_LOCK_DATA_DNS == nLockData){
|
||||
pthread_mutex_unlock(&lockmutex[SHARE_MUTEX_DNS]);
|
||||
}else if(CURL_LOCK_DATA_SSL_SESSION == nLockData){
|
||||
pthread_mutex_unlock(&lockmutex[SHARE_MUTEX_SSL_SESSION]);
|
||||
}
|
||||
}
|
||||
|
||||
@ -341,6 +368,7 @@ bool S3fsCurl::DestroyCryptMutex(void)
|
||||
for(int cnt = 0; cnt < CRYPTO_num_locks(); cnt++){
|
||||
pthread_mutex_destroy(&S3fsCurl::crypt_mutex[cnt]);
|
||||
}
|
||||
CRYPTO_cleanup_all_ex_data();
|
||||
free(S3fsCurl::crypt_mutex);
|
||||
S3fsCurl::crypt_mutex = NULL;
|
||||
|
||||
@ -554,6 +582,9 @@ bool S3fsCurl::LocateBundle(void)
|
||||
if(BF.good()){
|
||||
BF.close();
|
||||
S3fsCurl::curl_ca_bundle.assign("/etc/pki/tls/certs/ca-bundle.crt");
|
||||
}else{
|
||||
DPRN("%s: /etc/pki/tls/certs/ca-bundle.crt is not readable", program_name.c_str());
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
@ -683,6 +714,13 @@ bool S3fsCurl::SetDnsCache(bool isCache)
|
||||
return old;
|
||||
}
|
||||
|
||||
bool S3fsCurl::SetSslSessionCache(bool isCache)
|
||||
{
|
||||
bool old = S3fsCurl::is_ssl_session_cache;
|
||||
S3fsCurl::is_ssl_session_cache = isCache;
|
||||
return old;
|
||||
}
|
||||
|
||||
long S3fsCurl::SetConnectTimeout(long timeout)
|
||||
{
|
||||
long old = S3fsCurl::connect_timeout;
|
||||
@ -829,7 +867,6 @@ int S3fsCurl::ParallelMultipartUploadRequest(const char* tpath, headers_t& meta,
|
||||
string upload_id;
|
||||
struct stat st;
|
||||
int fd2;
|
||||
FILE* file;
|
||||
etaglist_t list;
|
||||
off_t remaining_bytes;
|
||||
unsigned char* buf;
|
||||
@ -838,7 +875,7 @@ int S3fsCurl::ParallelMultipartUploadRequest(const char* tpath, headers_t& meta,
|
||||
FPRNNN("[tpath=%s][fd=%d]", SAFESTRPTR(tpath), fd);
|
||||
|
||||
// duplicate fd
|
||||
if(-1 == (fd2 = dup(fd)) || 0 != lseek(fd2, 0, SEEK_SET) || NULL == (file = fdopen(fd2, "rb"))){
|
||||
if(-1 == (fd2 = dup(fd)) || 0 != lseek(fd2, 0, SEEK_SET)){
|
||||
DPRN("Cloud not duplicate file discriptor(errno=%d)", errno);
|
||||
if(-1 != fd2){
|
||||
close(fd2);
|
||||
@ -847,21 +884,12 @@ int S3fsCurl::ParallelMultipartUploadRequest(const char* tpath, headers_t& meta,
|
||||
}
|
||||
if(-1 == fstat(fd2, &st)){
|
||||
DPRN("Invalid file discriptor(errno=%d)", errno);
|
||||
fclose(file);
|
||||
close(fd2);
|
||||
return -errno;
|
||||
}
|
||||
|
||||
// make Tempolary buf(maximum size + 4)
|
||||
if(NULL == (buf = (unsigned char*)malloc(sizeof(unsigned char) * (MULTIPART_SIZE + 4)))){
|
||||
DPRNCRIT("Could not allocate memory for buffer");
|
||||
fclose(file);
|
||||
S3FS_FUSE_EXIT();
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if(0 != (result = s3fscurl.PreMultipartPostRequest(tpath, meta, upload_id, ow_sse_flg))){
|
||||
free(buf);
|
||||
fclose(file);
|
||||
close(fd2);
|
||||
return result;
|
||||
}
|
||||
s3fscurl.DestroyCurlHandle();
|
||||
@ -891,8 +919,7 @@ int S3fsCurl::ParallelMultipartUploadRequest(const char* tpath, headers_t& meta,
|
||||
// initiate upload part for parallel
|
||||
if(0 != (result = s3fscurl_para->UploadMultipartPostSetup(tpath, list.size(), upload_id))){
|
||||
DPRN("failed uploading part setup(%d)", result);
|
||||
free(buf);
|
||||
fclose(file);
|
||||
close(fd2);
|
||||
delete s3fscurl_para;
|
||||
return result;
|
||||
}
|
||||
@ -900,8 +927,7 @@ int S3fsCurl::ParallelMultipartUploadRequest(const char* tpath, headers_t& meta,
|
||||
// set into parallel object
|
||||
if(!curlmulti.SetS3fsCurlObject(s3fscurl_para)){
|
||||
DPRN("Could not make curl object into multi curl(%s).", tpath);
|
||||
free(buf);
|
||||
fclose(file);
|
||||
close(fd2);
|
||||
delete s3fscurl_para;
|
||||
return -1;
|
||||
}
|
||||
@ -916,8 +942,7 @@ int S3fsCurl::ParallelMultipartUploadRequest(const char* tpath, headers_t& meta,
|
||||
// reinit for loop.
|
||||
curlmulti.Clear();
|
||||
}
|
||||
free(buf);
|
||||
fclose(file);
|
||||
close(fd2);
|
||||
|
||||
if(0 != (result = s3fscurl.CompleteMultipartPostRequest(tpath, upload_id, list))){
|
||||
return result;
|
||||
@ -1019,14 +1044,14 @@ bool S3fsCurl::ResetHandle(void)
|
||||
curl_easy_setopt(hCurl, CURLOPT_PROGRESSFUNCTION, S3fsCurl::CurlProgress);
|
||||
curl_easy_setopt(hCurl, CURLOPT_PROGRESSDATA, hCurl);
|
||||
// curl_easy_setopt(hCurl, CURLOPT_FORBID_REUSE, 1);
|
||||
|
||||
|
||||
if(0 == S3fsCurl::ssl_verify_hostname){
|
||||
curl_easy_setopt(hCurl, CURLOPT_SSL_VERIFYHOST, 0);
|
||||
}
|
||||
if(S3fsCurl::curl_ca_bundle.size() != 0){
|
||||
curl_easy_setopt(hCurl, CURLOPT_CAINFO, S3fsCurl::curl_ca_bundle.c_str());
|
||||
}
|
||||
if(S3fsCurl::is_dns_cache && S3fsCurl::hCurlShare){
|
||||
if((S3fsCurl::is_dns_cache || S3fsCurl::is_ssl_session_cache) && S3fsCurl::hCurlShare){
|
||||
curl_easy_setopt(hCurl, CURLOPT_SHARE, S3fsCurl::hCurlShare);
|
||||
}
|
||||
if(S3fsCurl::is_verbose){
|
||||
@ -1052,7 +1077,6 @@ bool S3fsCurl::CreateCurlHandle(bool force)
|
||||
DPRN("could not destroy handle.");
|
||||
return false;
|
||||
}
|
||||
ClearInternalData();
|
||||
DPRN("already has handle, so destroied it.");
|
||||
}
|
||||
|
||||
@ -1118,6 +1142,8 @@ bool S3fsCurl::ClearInternalData(void)
|
||||
b_partdata_size = 0;
|
||||
partdata.clear();
|
||||
|
||||
S3FS_MALLOCTRIM(0);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1414,12 +1440,7 @@ int S3fsCurl::RequestPerform(void)
|
||||
if(!S3fsCurl::LocateBundle()){
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
if(0 != S3fsCurl::curl_ca_bundle.size()){
|
||||
retrycnt++;
|
||||
curl_easy_setopt(hCurl, CURLOPT_CAINFO, S3fsCurl::curl_ca_bundle.c_str());
|
||||
// break for switch-case, and continue loop.
|
||||
break;
|
||||
}
|
||||
break; // retry with CAINFO
|
||||
}
|
||||
DPRNCRIT("curlCode: %d msg: %s", curlCode, curl_easy_strerror(curlCode));
|
||||
exit(EXIT_FAILURE);
|
||||
@ -1535,6 +1556,7 @@ string S3fsCurl::CalcSignature(string method, string strMD5, string content_type
|
||||
}
|
||||
// Too many write attempts
|
||||
DPRNNN("Failure during BIO_write, returning null String");
|
||||
CRYPTO_free_ex_data(CRYPTO_EX_INDEX_BIO, b64, &b64->ex_data);
|
||||
BIO_free_all(b64);
|
||||
Signature.clear();
|
||||
return Signature;
|
||||
@ -1542,6 +1564,7 @@ string S3fsCurl::CalcSignature(string method, string strMD5, string content_type
|
||||
}else{
|
||||
// If not a retry then it is an error
|
||||
DPRNNN("Failure during BIO_write, returning null String");
|
||||
CRYPTO_free_ex_data(CRYPTO_EX_INDEX_BIO, b64, &b64->ex_data);
|
||||
BIO_free_all(b64);
|
||||
Signature.clear();
|
||||
return Signature;
|
||||
@ -1563,6 +1586,7 @@ string S3fsCurl::CalcSignature(string method, string strMD5, string content_type
|
||||
ret = BIO_flush(b64);
|
||||
if(ret <= 0){
|
||||
DPRNNN("Failure during BIO_flush, returning null String");
|
||||
CRYPTO_free_ex_data(CRYPTO_EX_INDEX_BIO, b64, &b64->ex_data);
|
||||
BIO_free_all(b64);
|
||||
Signature.clear();
|
||||
return Signature;
|
||||
@ -1570,10 +1594,9 @@ string S3fsCurl::CalcSignature(string method, string strMD5, string content_type
|
||||
|
||||
BUF_MEM *bptr;
|
||||
BIO_get_mem_ptr(b64, &bptr);
|
||||
Signature.assign(bptr->data, bptr->length - 1);
|
||||
|
||||
Signature.resize(bptr->length - 1);
|
||||
memcpy(&Signature[0], bptr->data, bptr->length-1);
|
||||
|
||||
CRYPTO_free_ex_data(CRYPTO_EX_INDEX_BIO, b64, &b64->ex_data);
|
||||
BIO_free_all(b64);
|
||||
|
||||
return Signature;
|
||||
@ -1589,8 +1612,12 @@ bool S3fsCurl::GetUploadId(string& upload_id)
|
||||
}
|
||||
upload_id.clear();
|
||||
|
||||
xmlDocPtr doc = xmlReadMemory(bodydata->str(), bodydata->size(), "", NULL, 0);
|
||||
if(NULL == doc || NULL == doc->children){
|
||||
xmlDocPtr doc;
|
||||
if(NULL == (doc = xmlReadMemory(bodydata->str(), bodydata->size(), "", NULL, 0))){
|
||||
return result;
|
||||
}
|
||||
if(NULL == doc->children){
|
||||
S3FS_XMLFREEDOC(doc);
|
||||
return result;
|
||||
}
|
||||
for(xmlNodePtr cur_node = doc->children->children; NULL != cur_node; cur_node = cur_node->next){
|
||||
@ -1614,7 +1641,7 @@ bool S3fsCurl::GetUploadId(string& upload_id)
|
||||
}
|
||||
}
|
||||
}
|
||||
xmlFreeDoc(doc);
|
||||
S3FS_XMLFREEDOC(doc);
|
||||
|
||||
return result;
|
||||
}
|
||||
@ -2045,7 +2072,6 @@ int S3fsCurl::CheckBucket(void)
|
||||
string("Authorization: AWS " + AWSAccessKeyId + ":" +
|
||||
CalcSignature("GET", "", "", date, resource)).c_str());
|
||||
}
|
||||
|
||||
// setopt
|
||||
curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str());
|
||||
curl_easy_setopt(hCurl, CURLOPT_FAILONERROR, true);
|
||||
@ -2580,16 +2606,14 @@ int S3fsCurl::MultipartUploadRequest(const char* tpath, headers_t& meta, int fd,
|
||||
string upload_id;
|
||||
struct stat st;
|
||||
int fd2;
|
||||
FILE* file;
|
||||
etaglist_t list;
|
||||
off_t remaining_bytes;
|
||||
off_t chunk;
|
||||
unsigned char* buf;
|
||||
|
||||
FPRNNN("[tpath=%s][fd=%d]", SAFESTRPTR(tpath), fd);
|
||||
|
||||
// duplicate fd
|
||||
if(-1 == (fd2 = dup(fd)) || 0 != lseek(fd2, 0, SEEK_SET) || NULL == (file = fdopen(fd2, "rb"))){
|
||||
if(-1 == (fd2 = dup(fd)) || 0 != lseek(fd2, 0, SEEK_SET)){
|
||||
DPRN("Cloud not duplicate file discriptor(errno=%d)", errno);
|
||||
if(-1 != fd2){
|
||||
close(fd2);
|
||||
@ -2598,21 +2622,12 @@ int S3fsCurl::MultipartUploadRequest(const char* tpath, headers_t& meta, int fd,
|
||||
}
|
||||
if(-1 == fstat(fd2, &st)){
|
||||
DPRN("Invalid file discriptor(errno=%d)", errno);
|
||||
fclose(file);
|
||||
close(fd2);
|
||||
return -errno;
|
||||
}
|
||||
|
||||
// make Tempolary buf(maximum size + 4)
|
||||
if(NULL == (buf = (unsigned char*)malloc(sizeof(unsigned char) * (MULTIPART_SIZE + 4)))){
|
||||
DPRNCRIT("Could not allocate memory for buffer");
|
||||
fclose(file);
|
||||
S3FS_FUSE_EXIT();
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if(0 != (result = PreMultipartPostRequest(tpath, meta, upload_id, ow_sse_flg))){
|
||||
free(buf);
|
||||
fclose(file);
|
||||
close(fd2);
|
||||
return result;
|
||||
}
|
||||
DestroyCurlHandle();
|
||||
@ -2632,15 +2647,13 @@ int S3fsCurl::MultipartUploadRequest(const char* tpath, headers_t& meta, int fd,
|
||||
// upload part
|
||||
if(0 != (result = UploadMultipartPostRequest(tpath, (list.size() + 1), upload_id))){
|
||||
DPRN("failed uploading part(%d)", result);
|
||||
free(buf);
|
||||
fclose(file);
|
||||
close(fd2);
|
||||
return result;
|
||||
}
|
||||
list.push_back(partdata.etag);
|
||||
DestroyCurlHandle();
|
||||
}
|
||||
free(buf);
|
||||
fclose(file);
|
||||
close(fd2);
|
||||
|
||||
if(0 != (result = CompleteMultipartPostRequest(tpath, upload_id, list))){
|
||||
return result;
|
||||
@ -2694,7 +2707,7 @@ int S3fsCurl::MultipartRenameRequest(const char* from, const char* to, headers_t
|
||||
//-------------------------------------------------------------------
|
||||
// Class S3fsMultiCurl
|
||||
//-------------------------------------------------------------------
|
||||
#define MAX_MULTI_HEADREQ 500 // default: max request count in readdir curl_multi.
|
||||
#define MAX_MULTI_HEADREQ 20 // default: max request count in readdir curl_multi.
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Class method for S3fsMultiCurl
|
||||
@ -2720,27 +2733,35 @@ S3fsMultiCurl::~S3fsMultiCurl()
|
||||
Clear();
|
||||
}
|
||||
|
||||
bool S3fsMultiCurl::Clear(void)
|
||||
bool S3fsMultiCurl::ClearEx(bool is_all)
|
||||
{
|
||||
s3fscurlmap_t::iterator iter;
|
||||
for(iter = cMap_req.begin(); iter != cMap_req.end(); cMap_req.erase(iter++)){
|
||||
CURL* hCurl = (*iter).first;
|
||||
S3fsCurl* s3fscurl = (*iter).second;
|
||||
if(hMulti && hCurl){
|
||||
curl_multi_remove_handle(hMulti, hCurl);
|
||||
}
|
||||
if(s3fscurl){
|
||||
s3fscurl->DestroyCurlHandle();
|
||||
delete s3fscurl; // with destroy curl handle.
|
||||
}
|
||||
}
|
||||
|
||||
if(hMulti){
|
||||
curl_multi_cleanup(hMulti);
|
||||
hMulti = NULL;
|
||||
}
|
||||
|
||||
s3fscurlmap_t::iterator iter;
|
||||
for(iter = cMap_all.begin(); iter != cMap_all.end(); iter++){
|
||||
S3fsCurl* s3fscurl = (*iter).second;
|
||||
s3fscurl->DestroyCurlHandle();
|
||||
delete s3fscurl;
|
||||
if(is_all){
|
||||
for(iter = cMap_all.begin(); iter != cMap_all.end(); cMap_all.erase(iter++)){
|
||||
S3fsCurl* s3fscurl = (*iter).second;
|
||||
s3fscurl->DestroyCurlHandle();
|
||||
delete s3fscurl;
|
||||
}
|
||||
}
|
||||
cMap_all.clear();
|
||||
S3FS_MALLOCTRIM(0);
|
||||
|
||||
for(iter = cMap_req.begin(); iter != cMap_req.end(); iter++){
|
||||
S3fsCurl* s3fscurl = (*iter).second;
|
||||
s3fscurl->DestroyCurlHandle();
|
||||
delete s3fscurl;
|
||||
}
|
||||
cMap_req.clear();
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -2760,6 +2781,10 @@ S3fsMultiRetryCallback S3fsMultiCurl::SetRetryCallback(S3fsMultiRetryCallback fu
|
||||
|
||||
bool S3fsMultiCurl::SetS3fsCurlObject(S3fsCurl* s3fscurl)
|
||||
{
|
||||
if(hMulti){
|
||||
DPRN("Internal error: hMulti is not null");
|
||||
return false;
|
||||
}
|
||||
if(!s3fscurl){
|
||||
return false;
|
||||
}
|
||||
@ -2842,39 +2867,50 @@ int S3fsMultiCurl::MultiRead(void)
|
||||
return -EIO;
|
||||
}
|
||||
hCurl = msg->easy_handle;
|
||||
s3fscurl = cMap_req[hCurl];
|
||||
if(cMap_req.end() != cMap_req.find(hCurl)){
|
||||
s3fscurl = cMap_req[hCurl];
|
||||
}else{
|
||||
s3fscurl = NULL;
|
||||
}
|
||||
retrycurl= NULL;
|
||||
|
||||
if(CURLE_OK == msg->data.result && s3fscurl){
|
||||
long responseCode;
|
||||
if(s3fscurl->GetResponseCode(responseCode) && 400 > responseCode){
|
||||
// add into stat cache
|
||||
if(SuccessCallback && !SuccessCallback(s3fscurl)){
|
||||
DPRNNN("S3fsMultiCurl::MultiRead: error from callback function(%s).", s3fscurl->base_path.c_str());
|
||||
if(s3fscurl){
|
||||
if(CURLE_OK == msg->data.result){
|
||||
long responseCode;
|
||||
if(s3fscurl->GetResponseCode(responseCode) && 400 > responseCode){
|
||||
// add into stat cache
|
||||
if(SuccessCallback && !SuccessCallback(s3fscurl)){
|
||||
DPRNNN("error from callback function(%s).", s3fscurl->base_path.c_str());
|
||||
}
|
||||
}else{
|
||||
// This case is directory object("dir", "non dir object", "_$folder$", etc)
|
||||
DPRNINFO("failed a request(%s)", s3fscurl->base_path.c_str());
|
||||
}
|
||||
}else{
|
||||
// This case is directory object("dir", "non dir object", "_$folder$", etc)
|
||||
DPRNINFO("S3fsMultiCurl::MultiRead: failed a request(%s)", s3fscurl->base_path.c_str());
|
||||
}
|
||||
cMap_req.erase(hCurl);
|
||||
curl_multi_remove_handle(hMulti, hCurl);
|
||||
|
||||
}else{
|
||||
DPRNNN("failed to read(remaining: %d code: %d msg: %s), so retry this.",
|
||||
s3fscurl->DestroyCurlHandle();
|
||||
delete s3fscurl;
|
||||
|
||||
}else{
|
||||
DPRNNN("failed to read(remaining: %d code: %d msg: %s), so retry this.",
|
||||
remaining_messages, msg->data.result, curl_easy_strerror(msg->data.result));
|
||||
|
||||
// For retry
|
||||
if(RetryCallback){
|
||||
retrycurl = RetryCallback(s3fscurl);
|
||||
}
|
||||
}
|
||||
cMap_req.erase(hCurl);
|
||||
curl_multi_remove_handle(hMulti, hCurl);
|
||||
|
||||
// Cleanup this curl object and set retrying object(if there is).
|
||||
curl_multi_remove_handle(hMulti, hCurl);
|
||||
cMap_req.erase(hCurl);
|
||||
if(s3fscurl && s3fscurl != retrycurl){
|
||||
delete s3fscurl; // with destroy curl handle.
|
||||
}
|
||||
if(retrycurl){
|
||||
cMap_all[retrycurl->hCurl] = retrycurl;
|
||||
// For retry
|
||||
if(RetryCallback){
|
||||
retrycurl = RetryCallback(s3fscurl);
|
||||
cMap_all[retrycurl->hCurl] = retrycurl;
|
||||
}
|
||||
if(s3fscurl != retrycurl){
|
||||
s3fscurl->DestroyCurlHandle();
|
||||
delete s3fscurl;
|
||||
}
|
||||
}
|
||||
}else{
|
||||
assert(false);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
@ -2888,7 +2924,8 @@ int S3fsMultiCurl::Request(void)
|
||||
FPRNNN("[count=%zu]", cMap_all.size());
|
||||
|
||||
if(hMulti){
|
||||
Clear();
|
||||
DPRNNN("Warning: hMulti is not null, thus clear itself.");
|
||||
ClearEx(false);
|
||||
}
|
||||
|
||||
// Make request list.
|
||||
@ -2930,8 +2967,8 @@ int S3fsMultiCurl::Request(void)
|
||||
return result;
|
||||
}
|
||||
|
||||
// cleanup
|
||||
curl_multi_cleanup(hMulti);
|
||||
// Cleanup curl handle in multi handle
|
||||
ClearEx(false);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -3084,6 +3121,8 @@ struct curl_slist* AdditionalHeader::AddHeader(struct curl_slist* list, const ch
|
||||
// Adding header
|
||||
list = curl_slist_sort_insert(list, slistval.c_str());
|
||||
}
|
||||
meta.clear();
|
||||
S3FS_MALLOCTRIM(0);
|
||||
return list;
|
||||
}
|
||||
|
||||
@ -3141,14 +3180,14 @@ string GetContentMD5(int fd)
|
||||
BIO_write(b64, md5hex, MD5_DIGEST_LENGTH);
|
||||
free(md5hex);
|
||||
if(1 != BIO_flush(b64)){
|
||||
CRYPTO_free_ex_data(CRYPTO_EX_INDEX_BIO, b64, &b64->ex_data);
|
||||
BIO_free_all(b64);
|
||||
return string("");
|
||||
}
|
||||
BIO_get_mem_ptr(b64, &bptr);
|
||||
Signature.assign(bptr->data, bptr->length - 1);
|
||||
|
||||
Signature.resize(bptr->length - 1);
|
||||
memcpy(&Signature[0], bptr->data, bptr->length - 1);
|
||||
|
||||
CRYPTO_free_ex_data(CRYPTO_EX_INDEX_BIO, b64, &b64->ex_data);
|
||||
BIO_free_all(b64);
|
||||
|
||||
return Signature;
|
||||
@ -3159,12 +3198,15 @@ unsigned char* md5hexsum(int fd, off_t start, ssize_t size)
|
||||
MD5_CTX c;
|
||||
char buf[512];
|
||||
ssize_t bytes;
|
||||
unsigned char* result = (unsigned char*)malloc(MD5_DIGEST_LENGTH);
|
||||
unsigned char* result;
|
||||
|
||||
// seek to top of file.
|
||||
if(-1 == lseek(fd, start, SEEK_SET)){
|
||||
return NULL;
|
||||
}
|
||||
if(NULL == (result = (unsigned char*)malloc(MD5_DIGEST_LENGTH))){
|
||||
return NULL;
|
||||
}
|
||||
|
||||
memset(buf, 0, 512);
|
||||
MD5_Init(&c);
|
||||
|
12
src/curl.h
12
src/curl.h
@ -99,6 +99,11 @@ class S3fsMultiCurl;
|
||||
//----------------------------------------------
|
||||
// class S3fsCurl
|
||||
//----------------------------------------------
|
||||
// share
|
||||
#define SHARE_MUTEX_DNS 0
|
||||
#define SHARE_MUTEX_SSL_SESSION 1
|
||||
#define SHARE_MUTEX_MAX 2
|
||||
|
||||
// internal use struct for openssl
|
||||
struct CRYPTO_dynlock_value
|
||||
{
|
||||
@ -130,11 +135,12 @@ class S3fsCurl
|
||||
|
||||
// class variables
|
||||
static pthread_mutex_t curl_handles_lock;
|
||||
static pthread_mutex_t curl_share_lock;
|
||||
static pthread_mutex_t curl_share_lock[SHARE_MUTEX_MAX];
|
||||
static pthread_mutex_t* crypt_mutex;
|
||||
static bool is_initglobal_done;
|
||||
static CURLSH* hCurlShare;
|
||||
static bool is_dns_cache;
|
||||
static bool is_ssl_session_cache;
|
||||
static long connect_timeout;
|
||||
static time_t readwrite_timeout;
|
||||
static int retries;
|
||||
@ -233,6 +239,7 @@ class S3fsCurl
|
||||
// class methods(valiables)
|
||||
static std::string LookupMimeType(std::string name);
|
||||
static bool SetDnsCache(bool isCache);
|
||||
static bool SetSslSessionCache(bool isCache);
|
||||
static long SetConnectTimeout(long timeout);
|
||||
static time_t SetReadwriteTimeout(time_t timeout);
|
||||
static time_t GetReadwriteTimeout(void) { return S3fsCurl::readwrite_timeout; }
|
||||
@ -314,6 +321,7 @@ class S3fsMultiCurl
|
||||
S3fsMultiRetryCallback RetryCallback;
|
||||
|
||||
private:
|
||||
bool ClearEx(bool is_all);
|
||||
int MultiPerform(void);
|
||||
int MultiRead(void);
|
||||
|
||||
@ -326,7 +334,7 @@ class S3fsMultiCurl
|
||||
|
||||
S3fsMultiSuccessCallback SetSuccessCallback(S3fsMultiSuccessCallback function);
|
||||
S3fsMultiRetryCallback SetRetryCallback(S3fsMultiRetryCallback function);
|
||||
bool Clear(void);
|
||||
bool Clear(void) { return ClearEx(true); }
|
||||
bool SetS3fsCurlObject(S3fsCurl* s3fscurl);
|
||||
int Request(void);
|
||||
};
|
||||
|
@ -202,7 +202,7 @@ void PageList::FreeList(fdpage_list_t& list)
|
||||
|
||||
PageList::PageList(size_t size, bool is_init)
|
||||
{
|
||||
Init(0, false);
|
||||
Init(size, is_init);
|
||||
}
|
||||
|
||||
PageList::~PageList()
|
||||
@ -417,7 +417,6 @@ bool PageList::Serialize(CacheFileStat& file, bool is_output)
|
||||
}
|
||||
string oneline;
|
||||
stringstream ssall(ptmp);
|
||||
free(ptmp);
|
||||
|
||||
// init
|
||||
Clear();
|
||||
@ -425,6 +424,7 @@ bool PageList::Serialize(CacheFileStat& file, bool is_output)
|
||||
// load(size)
|
||||
if(!getline(ssall, oneline, '\n')){
|
||||
DPRN("failed to parse stats.");
|
||||
free(ptmp);
|
||||
return false;
|
||||
}
|
||||
size_t total = static_cast<size_t>(atoi(oneline.c_str()));
|
||||
@ -455,6 +455,7 @@ bool PageList::Serialize(CacheFileStat& file, bool is_output)
|
||||
// add new area
|
||||
SetInit(offset, size, is_init);
|
||||
}
|
||||
free(ptmp);
|
||||
if(is_err){
|
||||
DPRN("failed to parse stats.");
|
||||
Clear();
|
||||
|
355
src/s3fs.cpp
355
src/s3fs.cpp
@ -111,17 +111,17 @@ static int check_parent_object_access(const char* path, int mask);
|
||||
static FdEntity* get_local_fent(const char* path, bool is_load = false);
|
||||
static bool multi_head_callback(S3fsCurl* s3fscurl);
|
||||
static S3fsCurl* multi_head_retry_callback(S3fsCurl* s3fscurl);
|
||||
static int readdir_multi_head(const char* path, S3ObjList& head);
|
||||
static int readdir_multi_head(const char* path, S3ObjList& head, void* buf, fuse_fill_dir_t filler);
|
||||
static int list_bucket(const char* path, S3ObjList& head, const char* delimiter);
|
||||
static int directory_empty(const char* path);
|
||||
static bool is_truncated(const char* xml);
|
||||
static bool is_truncated(xmlDocPtr doc);;
|
||||
static int append_objects_from_xml_ex(const char* path, xmlDocPtr doc, xmlXPathContextPtr ctx,
|
||||
const char* ex_contents, const char* ex_key, const char* ex_etag, int isCPrefix, S3ObjList& head);
|
||||
static int append_objects_from_xml(const char* path, const char* xml, S3ObjList& head);
|
||||
static int append_objects_from_xml(const char* path, xmlDocPtr doc, S3ObjList& head);
|
||||
static bool GetXmlNsUrl(xmlDocPtr doc, string& nsurl);
|
||||
static xmlChar* get_base_exp(const char* xml, const char* exp);
|
||||
static xmlChar* get_prefix(const char* xml);
|
||||
static xmlChar* get_next_marker(const char* xml);
|
||||
static xmlChar* get_base_exp(xmlDocPtr doc, const char* exp);
|
||||
static xmlChar* get_prefix(xmlDocPtr doc);
|
||||
static xmlChar* get_next_marker(xmlDocPtr doc);
|
||||
static char* get_object_name(xmlDocPtr doc, xmlNodePtr node, const char* path);
|
||||
static int put_headers(const char* path, headers_t& meta, bool ow_sse_flg);
|
||||
static int rename_large_object(const char* from, const char* to);
|
||||
@ -194,6 +194,8 @@ static bool is_special_name_folder_object(const char* path)
|
||||
if(0 != s3fscurl.HeadRequest(strpath.c_str(), header)){
|
||||
return false;
|
||||
}
|
||||
header.clear();
|
||||
S3FS_MALLOCTRIM(0);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -693,6 +695,7 @@ static int s3fs_getattr(const char* path, struct stat* stbuf)
|
||||
}
|
||||
}
|
||||
FPRNINFO("[path=%s] uid=%u, gid=%u, mode=%04o", path, (unsigned int)(stbuf->st_uid), (unsigned int)(stbuf->st_gid), stbuf->st_mode);
|
||||
S3FS_MALLOCTRIM(0);
|
||||
|
||||
return result;
|
||||
}
|
||||
@ -728,6 +731,7 @@ static int s3fs_readlink(const char* path, char* buf, size_t size)
|
||||
buf[ressize] = '\0';
|
||||
|
||||
FdManager::get()->Close(ent);
|
||||
S3FS_MALLOCTRIM(0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -765,6 +769,7 @@ static int s3fs_mknod(const char *path, mode_t mode, dev_t rdev)
|
||||
return result;
|
||||
}
|
||||
StatCache::getStatCacheData()->DelStat(path);
|
||||
S3FS_MALLOCTRIM(0);
|
||||
|
||||
return result;
|
||||
}
|
||||
@ -804,6 +809,7 @@ static int s3fs_create(const char* path, mode_t mode, struct fuse_file_info* fi)
|
||||
return -EIO;
|
||||
}
|
||||
fi->fh = ent->GetFd();
|
||||
S3FS_MALLOCTRIM(0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -855,6 +861,8 @@ static int s3fs_mkdir(const char* path, mode_t mode)
|
||||
|
||||
result = create_directory_object(path, mode, time(NULL), pcxt->uid, pcxt->gid);
|
||||
StatCache::getStatCacheData()->DelStat(path);
|
||||
S3FS_MALLOCTRIM(0);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -871,6 +879,7 @@ static int s3fs_unlink(const char* path)
|
||||
result = s3fscurl.DeleteRequest(path);
|
||||
FdManager::DeleteCacheFile(path);
|
||||
StatCache::getStatCacheData()->DelStat(path);
|
||||
S3FS_MALLOCTRIM(0);
|
||||
|
||||
return result;
|
||||
}
|
||||
@ -941,6 +950,8 @@ static int s3fs_rmdir(const char* path)
|
||||
strpath += "_$folder$";
|
||||
result = s3fscurl.DeleteRequest(strpath.c_str());
|
||||
}
|
||||
S3FS_MALLOCTRIM(0);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -991,6 +1002,7 @@ static int s3fs_symlink(const char* from, const char* to)
|
||||
FdManager::get()->Close(ent);
|
||||
|
||||
StatCache::getStatCacheData()->DelStat(to);
|
||||
S3FS_MALLOCTRIM(0);
|
||||
|
||||
return result;
|
||||
}
|
||||
@ -1293,6 +1305,8 @@ static int s3fs_rename(const char* from, const char* to)
|
||||
result = rename_object_nocopy(from, to);
|
||||
}
|
||||
}
|
||||
S3FS_MALLOCTRIM(0);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -1364,6 +1378,7 @@ static int s3fs_chmod(const char* path, mode_t mode)
|
||||
}
|
||||
StatCache::getStatCacheData()->DelStat(nowcache);
|
||||
}
|
||||
S3FS_MALLOCTRIM(0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1443,6 +1458,8 @@ static int s3fs_chmod_nocopy(const char* path, mode_t mode)
|
||||
|
||||
StatCache::getStatCacheData()->DelStat(nowcache);
|
||||
}
|
||||
S3FS_MALLOCTRIM(0);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -1523,6 +1540,7 @@ static int s3fs_chown(const char* path, uid_t uid, gid_t gid)
|
||||
}
|
||||
StatCache::getStatCacheData()->DelStat(nowcache);
|
||||
}
|
||||
S3FS_MALLOCTRIM(0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1612,6 +1630,8 @@ static int s3fs_chown_nocopy(const char* path, uid_t uid, gid_t gid)
|
||||
|
||||
StatCache::getStatCacheData()->DelStat(nowcache);
|
||||
}
|
||||
S3FS_MALLOCTRIM(0);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -1678,6 +1698,7 @@ static int s3fs_utimens(const char* path, const struct timespec ts[2])
|
||||
}
|
||||
StatCache::getStatCacheData()->DelStat(nowcache);
|
||||
}
|
||||
S3FS_MALLOCTRIM(0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1766,6 +1787,8 @@ static int s3fs_utimens_nocopy(const char* path, const struct timespec ts[2])
|
||||
|
||||
StatCache::getStatCacheData()->DelStat(nowcache);
|
||||
}
|
||||
S3FS_MALLOCTRIM(0);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -1814,6 +1837,7 @@ static int s3fs_truncate(const char* path, off_t size)
|
||||
FdManager::get()->Close(ent);
|
||||
|
||||
StatCache::getStatCacheData()->DelStat(path);
|
||||
S3FS_MALLOCTRIM(0);
|
||||
|
||||
return result;
|
||||
}
|
||||
@ -1852,6 +1876,7 @@ static int s3fs_open(const char* path, struct fuse_file_info* fi)
|
||||
return -EIO;
|
||||
}
|
||||
fi->fh = ent->GetFd();
|
||||
S3FS_MALLOCTRIM(0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1957,6 +1982,8 @@ static int s3fs_flush(const char* path, struct fuse_file_info* fi)
|
||||
result = ent->Flush(meta, true, false);
|
||||
FdManager::get()->Close(ent);
|
||||
}
|
||||
S3FS_MALLOCTRIM(0);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -1990,6 +2017,8 @@ static int s3fs_release(const char* path, struct fuse_file_info* fi)
|
||||
DPRNNN("Warning - file(%s),fd(%d) is still opened.", path, ent->GetFd());
|
||||
}
|
||||
}
|
||||
S3FS_MALLOCTRIM(0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2003,6 +2032,8 @@ static int s3fs_opendir(const char* path, struct fuse_file_info* fi)
|
||||
if(0 == (result = check_object_access(path, mask, NULL))){
|
||||
result = check_parent_object_access(path, mask);
|
||||
}
|
||||
S3FS_MALLOCTRIM(0);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -2037,11 +2068,12 @@ static S3fsCurl* multi_head_retry_callback(S3fsCurl* s3fscurl)
|
||||
return newcurl;
|
||||
}
|
||||
|
||||
static int readdir_multi_head(const char* path, S3ObjList& head)
|
||||
static int readdir_multi_head(const char* path, S3ObjList& head, void* buf, fuse_fill_dir_t filler)
|
||||
{
|
||||
S3fsMultiCurl curlmulti;
|
||||
s3obj_list_t headlist;
|
||||
int result;
|
||||
s3obj_list_t fillerlist;
|
||||
int result = 0;
|
||||
|
||||
FPRNN("[path=%s][list=%zu]", path, headlist.size());
|
||||
|
||||
@ -2057,11 +2089,18 @@ static int readdir_multi_head(const char* path, S3ObjList& head)
|
||||
s3obj_list_t::iterator iter;
|
||||
long cnt;
|
||||
|
||||
fillerlist.clear();
|
||||
// Make single head request(with max).
|
||||
for(iter = headlist.begin(), cnt = 0; headlist.end() != iter && cnt < S3fsMultiCurl::GetMaxMultiRequest(); iter = headlist.erase(iter)){
|
||||
string disppath = path + (*iter);
|
||||
string etag = head.GetETag((*iter).c_str());
|
||||
|
||||
string fillpath = disppath;
|
||||
if('/' == disppath[disppath.length() - 1]){
|
||||
fillpath = fillpath.substr(0, fillpath.length() -1);
|
||||
}
|
||||
fillerlist.push_back(fillpath);
|
||||
|
||||
if(StatCache::getStatCacheData()->HasStat(disppath, etag.c_str())){
|
||||
continue;
|
||||
}
|
||||
@ -2087,6 +2126,20 @@ static int readdir_multi_head(const char* path, S3ObjList& head)
|
||||
break;
|
||||
}
|
||||
|
||||
// populate fuse buffer
|
||||
// here is best posision, because a case is cache size < files in directory
|
||||
//
|
||||
for(iter = fillerlist.begin(); fillerlist.end() != iter; iter++){
|
||||
struct stat st;
|
||||
string bpath = mybasename((*iter));
|
||||
if(StatCache::getStatCacheData()->GetStat((*iter), &st)){
|
||||
filler(buf, bpath.c_str(), &st, 0);
|
||||
}else{
|
||||
FPRNNN("Could not find %s file in stat cache.", (*iter).c_str());
|
||||
filler(buf, bpath.c_str(), 0, 0);
|
||||
}
|
||||
}
|
||||
|
||||
// reinit for loop.
|
||||
curlmulti.Clear();
|
||||
}
|
||||
@ -2118,21 +2171,16 @@ static int s3fs_readdir(const char* path, void* buf, fuse_fill_dir_t filler, off
|
||||
return 0;
|
||||
}
|
||||
|
||||
// populate fuse buffer
|
||||
head.GetNameList(headlist);
|
||||
s3obj_list_t::const_iterator liter;
|
||||
for(liter = headlist.begin(); headlist.end() != liter; liter++){
|
||||
filler(buf, (*liter).c_str(), 0, 0);
|
||||
}
|
||||
|
||||
// Send multi head request for stats caching.
|
||||
string strpath = path;
|
||||
if(strcmp(path, "/") != 0){
|
||||
strpath += "/";
|
||||
}
|
||||
if(0 != (result = readdir_multi_head(strpath.c_str(), head))){
|
||||
if(0 != (result = readdir_multi_head(strpath.c_str(), head, buf, filler))){
|
||||
DPRN("readdir_multi_head returns error(%d).", result);
|
||||
}
|
||||
S3FS_MALLOCTRIM(0);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -2144,6 +2192,7 @@ static int list_bucket(const char* path, S3ObjList& head, const char* delimiter)
|
||||
string next_marker = "";
|
||||
bool truncated = true;
|
||||
S3fsCurl s3fscurl;
|
||||
xmlDocPtr doc;
|
||||
BodyData* body;
|
||||
|
||||
FPRNN("[path=%s]", path);
|
||||
@ -2177,21 +2226,34 @@ static int list_bucket(const char* path, S3ObjList& head, const char* delimiter)
|
||||
}
|
||||
body = s3fscurl.GetBodyData();
|
||||
|
||||
if(0 != append_objects_from_xml(path, body->str(), head)){
|
||||
DPRN("append_objects_from_xml returns with error.");
|
||||
// xmlDocPtr
|
||||
if(NULL == (doc = xmlReadMemory(body->str(), static_cast<int>(body->size()), "", NULL, 0))){
|
||||
DPRN("xmlReadMemory returns with error.");
|
||||
return -1;
|
||||
}
|
||||
truncated = is_truncated(body->str());
|
||||
if(truncated){
|
||||
xmlChar* tmpch = get_next_marker(body->str());
|
||||
if(0 != append_objects_from_xml(path, doc, head)){
|
||||
DPRN("append_objects_from_xml returns with error.");
|
||||
xmlFreeDoc(doc);
|
||||
return -1;
|
||||
}
|
||||
if(true == (truncated = is_truncated(doc))){
|
||||
xmlChar* tmpch = get_next_marker(doc);
|
||||
if(tmpch){
|
||||
next_marker = (char*)tmpch;
|
||||
xmlFree(tmpch);
|
||||
}else{
|
||||
DPRN("Could not find next marker, thus break loop.");
|
||||
truncated = false;
|
||||
}
|
||||
}
|
||||
xmlFreeDoc(doc);
|
||||
S3FS_XMLFREEDOC(doc);
|
||||
|
||||
// reset(initialize) curl object
|
||||
s3fscurl.DestroyCurlHandle();
|
||||
}
|
||||
S3FS_MALLOCTRIM(0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2203,31 +2265,52 @@ static int append_objects_from_xml_ex(const char* path, xmlDocPtr doc, xmlXPathC
|
||||
xmlXPathObjectPtr contents_xp;
|
||||
xmlNodeSetPtr content_nodes;
|
||||
|
||||
contents_xp = xmlXPathEvalExpression((xmlChar*)ex_contents, ctx);
|
||||
if(NULL == (contents_xp = xmlXPathEvalExpression((xmlChar*)ex_contents, ctx))){
|
||||
DPRNNN("xmlXPathEvalExpression returns null.");
|
||||
return -1;
|
||||
}
|
||||
if(xmlXPathNodeSetIsEmpty(contents_xp->nodesetval)){
|
||||
DPRNNN("contents_xp->nodesetval is empty.");
|
||||
S3FS_XMLXPATHFREEOBJECT(contents_xp);
|
||||
return 0;
|
||||
}
|
||||
content_nodes = contents_xp->nodesetval;
|
||||
|
||||
int i;
|
||||
bool is_dir;
|
||||
string stretag;
|
||||
int i;
|
||||
for(i = 0; i < content_nodes->nodeNr; i++){
|
||||
ctx->node = content_nodes->nodeTab[i];
|
||||
|
||||
// object name
|
||||
xmlXPathObjectPtr key = xmlXPathEvalExpression((xmlChar*)ex_key, ctx);
|
||||
xmlXPathObjectPtr key;
|
||||
if(NULL == (key = xmlXPathEvalExpression((xmlChar*)ex_key, ctx))){
|
||||
DPRNNN("key is null. but continue.");
|
||||
continue;
|
||||
}
|
||||
if(xmlXPathNodeSetIsEmpty(key->nodesetval)){
|
||||
DPRNNN("node is empty. but continue.");
|
||||
xmlXPathFreeObject(key);
|
||||
continue;
|
||||
}
|
||||
xmlNodeSetPtr key_nodes = key->nodesetval;
|
||||
char* name = get_object_name(doc, key_nodes->nodeTab[0]->xmlChildrenNode, path);
|
||||
|
||||
if(!name){
|
||||
DPRNNN("append_objects_from_xml_ex name is something wrong. but continue.");
|
||||
DPRNNN("name is something wrong. but continue.");
|
||||
|
||||
}else if((const char*)name != c_strErrorObjectName){
|
||||
bool is_dir = isCPrefix ? true : false;
|
||||
string stretag = "";
|
||||
is_dir = isCPrefix ? true : false;
|
||||
stretag = "";
|
||||
|
||||
if(!isCPrefix && ex_etag){
|
||||
// Get ETag
|
||||
xmlXPathObjectPtr ETag = xmlXPathEvalExpression((xmlChar*)ex_etag, ctx);
|
||||
if(ETag){
|
||||
xmlNodeSetPtr etag_nodes = ETag->nodesetval;
|
||||
if(etag_nodes){
|
||||
xmlXPathObjectPtr ETag;
|
||||
if(NULL != (ETag = xmlXPathEvalExpression((xmlChar*)ex_etag, ctx))){
|
||||
if(xmlXPathNodeSetIsEmpty(ETag->nodesetval)){
|
||||
DPRNNN("ETag->nodesetval is empty.");
|
||||
}else{
|
||||
xmlNodeSetPtr etag_nodes = ETag->nodesetval;
|
||||
xmlChar* petag = xmlNodeListGetString(doc, etag_nodes->nodeTab[0]->xmlChildrenNode, 1);
|
||||
if(petag){
|
||||
stretag = (char*)petag;
|
||||
@ -2242,44 +2325,53 @@ static int append_objects_from_xml_ex(const char* path, xmlDocPtr doc, xmlXPathC
|
||||
xmlXPathFreeObject(key);
|
||||
xmlXPathFreeObject(contents_xp);
|
||||
free(name);
|
||||
S3FS_MALLOCTRIM(0);
|
||||
return -1;
|
||||
}
|
||||
free(name);
|
||||
}else{
|
||||
DPRNINFO("append_objects_from_xml_ex name is file or subdir in dir. but continue.");
|
||||
DPRNINFO("name is file or subdir in dir. but continue.");
|
||||
}
|
||||
xmlXPathFreeObject(key);
|
||||
}
|
||||
xmlXPathFreeObject(contents_xp);
|
||||
S3FS_XMLXPATHFREEOBJECT(contents_xp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool GetXmlNsUrl(xmlDocPtr doc, string& nsurl)
|
||||
{
|
||||
static time_t tmLast = 0; // cache for 60 sec.
|
||||
static string strNs("");
|
||||
bool result = false;
|
||||
|
||||
if(!doc){
|
||||
return result;
|
||||
}
|
||||
xmlNodePtr pRootNode = xmlDocGetRootElement(doc);
|
||||
if(pRootNode){
|
||||
xmlNsPtr* nslist = xmlGetNsList(doc, pRootNode);
|
||||
if(nslist && nslist[0]){
|
||||
if(nslist[0]->href){
|
||||
nsurl = (const char*)(nslist[0]->href);
|
||||
result = true;
|
||||
if((tmLast + 60) < time(NULL)){
|
||||
// refresh
|
||||
tmLast = time(NULL);
|
||||
strNs = "";
|
||||
xmlNodePtr pRootNode = xmlDocGetRootElement(doc);
|
||||
if(pRootNode){
|
||||
xmlNsPtr* nslist = xmlGetNsList(doc, pRootNode);
|
||||
if(nslist){
|
||||
if(nslist[0] && nslist[0]->href){
|
||||
strNs = (const char*)(nslist[0]->href);
|
||||
}
|
||||
S3FS_XMLFREE(nslist);
|
||||
}
|
||||
xmlFree(nslist);
|
||||
}
|
||||
}
|
||||
if(0 < strNs.size()){
|
||||
nsurl = strNs;
|
||||
result = true;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
static int append_objects_from_xml(const char* path, const char* xml, S3ObjList& head)
|
||||
static int append_objects_from_xml(const char* path, xmlDocPtr doc, S3ObjList& head)
|
||||
{
|
||||
xmlDocPtr doc;
|
||||
xmlXPathContextPtr ctx;
|
||||
string xmlnsurl;
|
||||
string ex_contents = "//";
|
||||
string ex_key = "";
|
||||
@ -2287,17 +2379,18 @@ static int append_objects_from_xml(const char* path, const char* xml, S3ObjList&
|
||||
string ex_prefix = "";
|
||||
string ex_etag = "";
|
||||
|
||||
// If there is not <Prefix>, use path instead of it.
|
||||
xmlChar* pprefix = get_prefix(xml);
|
||||
string prefix = (pprefix ? (char*)pprefix : path ? path : "");
|
||||
xmlFree(pprefix);
|
||||
|
||||
doc = xmlReadMemory(xml, strlen(xml), "", NULL, 0);
|
||||
if(doc == NULL){
|
||||
DPRN("xmlReadMemory returns with error.");
|
||||
if(!doc){
|
||||
return -1;
|
||||
}
|
||||
ctx = xmlXPathNewContext(doc);
|
||||
|
||||
// If there is not <Prefix>, use path instead of it.
|
||||
xmlChar* pprefix = get_prefix(doc);
|
||||
string prefix = (pprefix ? (char*)pprefix : path ? path : "");
|
||||
if(pprefix){
|
||||
xmlFree(pprefix);
|
||||
}
|
||||
|
||||
xmlXPathContextPtr ctx = xmlXPathNewContext(doc);
|
||||
|
||||
if(!noxmlns && GetXmlNsUrl(doc, xmlnsurl)){
|
||||
xmlXPathRegisterNs(ctx, (xmlChar*)"s3", (xmlChar*)xmlnsurl.c_str());
|
||||
@ -2317,28 +2410,24 @@ static int append_objects_from_xml(const char* path, const char* xml, S3ObjList&
|
||||
-1 == append_objects_from_xml_ex(prefix.c_str(), doc, ctx, ex_cprefix.c_str(), ex_prefix.c_str(), NULL, 1, head) )
|
||||
{
|
||||
DPRN("append_objects_from_xml_ex returns with error.");
|
||||
xmlXPathFreeContext(ctx);
|
||||
xmlFreeDoc(doc);
|
||||
S3FS_XMLXPATHFREECONTEXT(ctx);
|
||||
return -1;
|
||||
}
|
||||
xmlXPathFreeContext(ctx);
|
||||
xmlFreeDoc(doc);
|
||||
S3FS_XMLXPATHFREECONTEXT(ctx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static xmlChar* get_base_exp(const char* xml, const char* exp)
|
||||
static xmlChar* get_base_exp(xmlDocPtr doc, const char* exp)
|
||||
{
|
||||
xmlDocPtr doc;
|
||||
xmlXPathContextPtr ctx;
|
||||
xmlXPathObjectPtr marker_xp;
|
||||
xmlNodeSetPtr nodes;
|
||||
xmlChar* result;
|
||||
xmlXPathObjectPtr marker_xp;
|
||||
string xmlnsurl;
|
||||
string exp_string = "//";
|
||||
|
||||
doc = xmlReadMemory(xml, strlen(xml), "", NULL, 0);
|
||||
ctx = xmlXPathNewContext(doc);
|
||||
if(!doc){
|
||||
return NULL;
|
||||
}
|
||||
xmlXPathContextPtr ctx = xmlXPathNewContext(doc);
|
||||
|
||||
if(!noxmlns && GetXmlNsUrl(doc, xmlnsurl)){
|
||||
xmlXPathRegisterNs(ctx, (xmlChar*)"s3", (xmlChar*)xmlnsurl.c_str());
|
||||
@ -2346,37 +2435,48 @@ static xmlChar* get_base_exp(const char* xml, const char* exp)
|
||||
}
|
||||
exp_string += exp;
|
||||
|
||||
marker_xp = xmlXPathEvalExpression((xmlChar *)exp_string.c_str(), ctx);
|
||||
nodes = marker_xp->nodesetval;
|
||||
|
||||
if(nodes->nodeNr < 1)
|
||||
if(NULL == (marker_xp = xmlXPathEvalExpression((xmlChar *)exp_string.c_str(), ctx))){
|
||||
xmlXPathFreeContext(ctx);
|
||||
return NULL;
|
||||
|
||||
result = xmlNodeListGetString(doc, nodes->nodeTab[0]->xmlChildrenNode, 1);
|
||||
}
|
||||
if(xmlXPathNodeSetIsEmpty(marker_xp->nodesetval)){
|
||||
DPRNNN("marker_xp->nodesetval is empty.");
|
||||
xmlXPathFreeObject(marker_xp);
|
||||
xmlXPathFreeContext(ctx);
|
||||
return NULL;
|
||||
}
|
||||
xmlNodeSetPtr nodes = marker_xp->nodesetval;
|
||||
xmlChar* result = xmlNodeListGetString(doc, nodes->nodeTab[0]->xmlChildrenNode, 1);
|
||||
|
||||
xmlXPathFreeObject(marker_xp);
|
||||
xmlXPathFreeContext(ctx);
|
||||
xmlFreeDoc(doc);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static xmlChar* get_prefix(const char* xml)
|
||||
static xmlChar* get_prefix(xmlDocPtr doc)
|
||||
{
|
||||
return get_base_exp(xml, "Prefix");
|
||||
return get_base_exp(doc, "Prefix");
|
||||
}
|
||||
|
||||
static xmlChar* get_next_marker(const char* xml)
|
||||
static xmlChar* get_next_marker(xmlDocPtr doc)
|
||||
{
|
||||
return get_base_exp(xml, "NextMarker");
|
||||
return get_base_exp(doc, "NextMarker");
|
||||
}
|
||||
|
||||
static bool is_truncated(const char* xml)
|
||||
static bool is_truncated(xmlDocPtr doc)
|
||||
{
|
||||
if(strstr(xml, "<IsTruncated>true</IsTruncated>")){
|
||||
return true;
|
||||
bool result = false;
|
||||
|
||||
xmlChar* strTruncate = get_base_exp(doc, "IsTruncated");
|
||||
if(!strTruncate){
|
||||
return result;
|
||||
}
|
||||
return false;
|
||||
if(0 == strcasecmp((const char*)strTruncate, "true")){
|
||||
result = true;
|
||||
}
|
||||
xmlFree(strTruncate);
|
||||
return result;
|
||||
}
|
||||
|
||||
// return: the pointer to object name on allocated memory.
|
||||
@ -2398,8 +2498,8 @@ static char* get_object_name(xmlDocPtr doc, xmlNodePtr node, const char* path)
|
||||
|
||||
// Make dir path and filename
|
||||
string strfullpath= (char*)fullpath;
|
||||
string strdirpath = mydirname((char*)fullpath);
|
||||
string strmybpath = mybasename((char*)fullpath);
|
||||
string strdirpath = mydirname(string((char*)fullpath));
|
||||
string strmybpath = mybasename(string((char*)fullpath));
|
||||
const char* dirpath = strdirpath.c_str();
|
||||
const char* mybname = strmybpath.c_str();
|
||||
const char* basepath= (!path || '\0' == path[0] || '/' != path[0] ? path : &path[1]);
|
||||
@ -2519,7 +2619,9 @@ static int s3fs_access(const char* path, int mask)
|
||||
((mask & X_OK) == X_OK) ? "X_OK " : "",
|
||||
(mask == F_OK) ? "F_OK" : "");
|
||||
|
||||
return check_object_access(path, mask, NULL);
|
||||
int result = check_object_access(path, mask, NULL);
|
||||
S3FS_MALLOCTRIM(0);
|
||||
return result;
|
||||
}
|
||||
|
||||
static int s3fs_utility_mode(void)
|
||||
@ -2590,6 +2692,8 @@ static int s3fs_check_service(void)
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
}
|
||||
S3FS_MALLOCTRIM(0);
|
||||
|
||||
return EXIT_SUCCESS;
|
||||
}
|
||||
|
||||
@ -3196,6 +3300,10 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar
|
||||
S3fsCurl::SetDnsCache(false);
|
||||
return 0;
|
||||
}
|
||||
if(0 == strcmp(arg, "nosscache")){
|
||||
S3fsCurl::SetSslSessionCache(false);
|
||||
return 0;
|
||||
}
|
||||
if(0 == STR2NCMP(arg, "parallel_count=") || 0 == STR2NCMP(arg, "parallel_upload=")){
|
||||
int maxpara = (int)strtoul(strchr(arg, '=') + sizeof(char), 0, 10);
|
||||
if(0 >= maxpara){
|
||||
@ -3315,41 +3423,45 @@ int main(int argc, char* argv[])
|
||||
{0, 0, 0, 0}
|
||||
};
|
||||
|
||||
// get progam name - emulate basename
|
||||
size_t found = string::npos;
|
||||
program_name.assign(argv[0]);
|
||||
found = program_name.find_last_of("/");
|
||||
if(found != string::npos){
|
||||
program_name.replace(0, found+1, "");
|
||||
}
|
||||
// init xml2
|
||||
xmlInitParser();
|
||||
LIBXML_TEST_VERSION
|
||||
|
||||
while((ch = getopt_long(argc, argv, "dho:fsu", long_opts, &option_index)) != -1){
|
||||
switch(ch){
|
||||
case 0:
|
||||
if(strcmp(long_opts[option_index].name, "version") == 0){
|
||||
show_version();
|
||||
exit(EXIT_SUCCESS);
|
||||
}
|
||||
break;
|
||||
case 'h':
|
||||
show_help();
|
||||
exit(EXIT_SUCCESS);
|
||||
case 'o':
|
||||
break;
|
||||
case 'd':
|
||||
break;
|
||||
case 'f':
|
||||
foreground = true;
|
||||
break;
|
||||
case 's':
|
||||
break;
|
||||
case 'u':
|
||||
utility_mode = 1;
|
||||
break;
|
||||
default:
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
}
|
||||
// get progam name - emulate basename
|
||||
size_t found = string::npos;
|
||||
program_name.assign(argv[0]);
|
||||
found = program_name.find_last_of("/");
|
||||
if(found != string::npos){
|
||||
program_name.replace(0, found+1, "");
|
||||
}
|
||||
|
||||
while((ch = getopt_long(argc, argv, "dho:fsu", long_opts, &option_index)) != -1){
|
||||
switch(ch){
|
||||
case 0:
|
||||
if(strcmp(long_opts[option_index].name, "version") == 0){
|
||||
show_version();
|
||||
exit(EXIT_SUCCESS);
|
||||
}
|
||||
break;
|
||||
case 'h':
|
||||
show_help();
|
||||
exit(EXIT_SUCCESS);
|
||||
case 'o':
|
||||
break;
|
||||
case 'd':
|
||||
break;
|
||||
case 'f':
|
||||
foreground = true;
|
||||
break;
|
||||
case 's':
|
||||
break;
|
||||
case 'u':
|
||||
utility_mode = 1;
|
||||
break;
|
||||
default:
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
}
|
||||
|
||||
// clear this structure
|
||||
memset(&s3fs_oper, 0, sizeof(s3fs_oper));
|
||||
@ -3483,10 +3595,19 @@ int main(int argc, char* argv[])
|
||||
s3fs_oper.access = s3fs_access;
|
||||
s3fs_oper.create = s3fs_create;
|
||||
|
||||
// init NSS
|
||||
S3FS_INIT_NSS();
|
||||
|
||||
// now passing things off to fuse, fuse will finish evaluating the command line args
|
||||
fuse_res = fuse_main(custom_args.argc, custom_args.argv, &s3fs_oper, NULL);
|
||||
fuse_opt_free_args(&custom_args);
|
||||
|
||||
// cleanup NSS
|
||||
S3FS_CLEANUP_NSS();
|
||||
// cleanup xml2
|
||||
xmlCleanupParser();
|
||||
S3FS_MALLOCTRIM(0);
|
||||
|
||||
exit(fuse_res);
|
||||
}
|
||||
|
||||
|
84
src/s3fs.h
84
src/s3fs.h
@ -13,4 +13,88 @@
|
||||
} \
|
||||
}
|
||||
|
||||
//
|
||||
// s3fs use many small allocated chunk in heap area for
|
||||
// stats cache and parsing xml, etc. The OS may decide
|
||||
// that giving this little memory back to the kernel
|
||||
// will cause too much overhead and delay the operation.
|
||||
// So s3fs calls malloc_trim function to really get the
|
||||
// memory back. Following macros is prepared for that
|
||||
// your system does not have it.
|
||||
//
|
||||
// Address of gratitude, this workaround quotes a document
|
||||
// of libxml2.
|
||||
// http://xmlsoft.org/xmlmem.html
|
||||
//
|
||||
#ifdef HAVE_MALLOC_TRIM
|
||||
|
||||
#include <malloc.h>
|
||||
|
||||
#define DISPWARN_MALLOCTRIM(str)
|
||||
#define S3FS_MALLOCTRIM(pad) malloc_trim(pad)
|
||||
#define S3FS_XMLFREEDOC(doc) \
|
||||
{ \
|
||||
xmlFreeDoc(doc); \
|
||||
S3FS_MALLOCTRIM(0); \
|
||||
}
|
||||
#define S3FS_XMLFREE(ptr) \
|
||||
{ \
|
||||
xmlFree(ptr); \
|
||||
S3FS_MALLOCTRIM(0); \
|
||||
}
|
||||
#define S3FS_XMLXPATHFREECONTEXT(ctx) \
|
||||
{ \
|
||||
xmlXPathFreeContext(ctx); \
|
||||
S3FS_MALLOCTRIM(0); \
|
||||
}
|
||||
#define S3FS_XMLXPATHFREEOBJECT(obj) \
|
||||
{ \
|
||||
xmlXPathFreeObject(obj); \
|
||||
S3FS_MALLOCTRIM(0); \
|
||||
}
|
||||
|
||||
#else // HAVE_MALLOC_TRIM
|
||||
|
||||
#define DISPWARN_MALLOCTRIM(str) \
|
||||
fprintf(stderr, "Warning: %s without malloc_trim is possibility of the use memory increase.\n", program_name.c_str())
|
||||
#define S3FS_MALLOCTRIM(pad)
|
||||
#define S3FS_XMLFREEDOC(doc) xmlFreeDoc(doc)
|
||||
#define S3FS_XMLFREE(ptr) xmlFree(ptr)
|
||||
#define S3FS_XMLXPATHFREECONTEXT(ctx) xmlXPathFreeContext(ctx)
|
||||
#define S3FS_XMLXPATHFREEOBJECT(obj) xmlXPathFreeObject(obj)
|
||||
|
||||
#endif // HAVE_MALLOC_TRIM
|
||||
|
||||
//
|
||||
// For initializing libcurl with NSS
|
||||
// Normally libcurl initializes the NSS library, but usually allows
|
||||
// you to initialize s3fs forcibly. Because Memory leak is reported
|
||||
// in valgrind(about curl_global_init() function), and this is for
|
||||
// the cancellation. When "--enable-nss-init" option is specified
|
||||
// at configurarion, it makes NSS_INIT_ENABLED flag into Makefile.
|
||||
// NOTICE
|
||||
// This defines and macros is temporary, and this should be deleted.
|
||||
//
|
||||
#ifdef NSS_INIT_ENABLED
|
||||
#include <nss.h>
|
||||
#include <prinit.h>
|
||||
|
||||
#define S3FS_INIT_NSS() \
|
||||
{ \
|
||||
NSS_NoDB_Init(NULL); \
|
||||
}
|
||||
#define S3FS_CLEANUP_NSS() \
|
||||
{ \
|
||||
NSS_Shutdown(); \
|
||||
PL_ArenaFinish(); \
|
||||
PR_Cleanup(); \
|
||||
}
|
||||
|
||||
#else // NSS_INIT_ENABLED
|
||||
|
||||
#define S3FS_INIT_NSS()
|
||||
#define S3FS_CLEANUP_NSS()
|
||||
|
||||
#endif // NSS_INIT_ENABLED
|
||||
|
||||
#endif // S3FS_S3_H_
|
||||
|
@ -456,15 +456,40 @@ bool AutoLock::Unlock(void)
|
||||
// get user name from uid
|
||||
string get_username(uid_t uid)
|
||||
{
|
||||
struct passwd* ppw;
|
||||
if(NULL == (ppw = getpwuid(uid)) || NULL == ppw->pw_name){
|
||||
DPRNNN("could not get username(errno=%d).", (int)errno);
|
||||
static size_t maxlen = 0; // set onece
|
||||
int result;
|
||||
char* pbuf;
|
||||
struct passwd pwinfo;
|
||||
struct passwd* ppwinfo = NULL;
|
||||
|
||||
// make buffer
|
||||
if(0 == maxlen){
|
||||
if(0 > (maxlen = (size_t)sysconf(_SC_GETPW_R_SIZE_MAX))){
|
||||
DPRNNN("could not get max pw length.");
|
||||
maxlen = 0;
|
||||
return string("");
|
||||
}
|
||||
}
|
||||
if(NULL == (pbuf = (char*)malloc(sizeof(char) * maxlen))){
|
||||
DPRNCRIT("failed to allocate memory.");
|
||||
return string("");
|
||||
}
|
||||
return string(ppw->pw_name);
|
||||
// get group infomation
|
||||
if(0 != (result = getpwuid_r(uid, &pwinfo, pbuf, maxlen, &ppwinfo))){
|
||||
DPRNNN("could not get pw infomation.");
|
||||
free(pbuf);
|
||||
return string("");
|
||||
}
|
||||
// check pw
|
||||
if(NULL == ppwinfo){
|
||||
free(pbuf);
|
||||
return string("");
|
||||
}
|
||||
string name = SAFESTRPTR(ppwinfo->pw_name);
|
||||
free(pbuf);
|
||||
return name;
|
||||
}
|
||||
|
||||
// check uid in group(gid)
|
||||
int is_uid_inculde_group(uid_t uid, gid_t gid)
|
||||
{
|
||||
static size_t maxlen = 0; // set onece
|
||||
@ -520,14 +545,14 @@ int is_uid_inculde_group(uid_t uid, gid_t gid)
|
||||
// dirname clobbers path so let it operate on a tmp copy
|
||||
string mydirname(string path)
|
||||
{
|
||||
return string(dirname(&path[0]));
|
||||
return string(dirname((char*)path.c_str()));
|
||||
}
|
||||
|
||||
// safe variant of basename
|
||||
// basename clobbers path so let it operate on a tmp copy
|
||||
string mybasename(string path)
|
||||
{
|
||||
return string(basename(&path[0]));
|
||||
return string(basename((char*)path.c_str()));
|
||||
}
|
||||
|
||||
// mkdir --parents
|
||||
@ -871,7 +896,11 @@ void show_help (void)
|
||||
" nodnscache (disable dns cache)\n"
|
||||
" - s3fs is always using dns cache, this option make dns cache disable.\n"
|
||||
"\n"
|
||||
" multireq_max (default=\"500\")\n"
|
||||
" nosscache (disable ssl session cache)\n"
|
||||
" - s3fs is always using ssl session cache, this option make ssl \n"
|
||||
" session cache disable.\n"
|
||||
"\n"
|
||||
" multireq_max (default=\"20\")\n"
|
||||
" - maximum number of parallel request for listing objects.\n"
|
||||
"\n"
|
||||
" parallel_count (default=\"5\")\n"
|
||||
|
Loading…
x
Reference in New Issue
Block a user