mirror of
https://github.com/s3fs-fuse/s3fs-fuse.git
synced 2025-01-23 13:58:24 +00:00
Replace some raw pointers with std::unique_ptr (#2195)
This simplifies code paths and makes memory leaks less likely. It also makes memory ownership more explicit by requiring std::move. This commit requires C++11. References #2179.
This commit is contained in:
parent
faec0d9d15
commit
6344d74ae3
@ -78,7 +78,6 @@ bool AdditionalHeader::Load(const char* file)
|
||||
|
||||
// read file
|
||||
std::string line;
|
||||
ADDHEAD *paddhead;
|
||||
while(getline(AH, line)){
|
||||
if(line.empty()){
|
||||
continue;
|
||||
@ -111,44 +110,41 @@ bool AdditionalHeader::Load(const char* file)
|
||||
return false;
|
||||
}
|
||||
|
||||
paddhead = new ADDHEAD;
|
||||
std::unique_ptr<ADDHEAD> paddhead(new ADDHEAD);
|
||||
if(0 == strncasecmp(key.c_str(), ADD_HEAD_REGEX, strlen(ADD_HEAD_REGEX))){
|
||||
// regex
|
||||
if(key.size() <= strlen(ADD_HEAD_REGEX)){
|
||||
S3FS_PRN_ERR("file format error: %s key(suffix) does not have key std::string.", key.c_str());
|
||||
delete paddhead;
|
||||
continue;
|
||||
}
|
||||
key.erase(0, strlen(ADD_HEAD_REGEX));
|
||||
|
||||
// compile
|
||||
regex_t* preg = new regex_t;
|
||||
std::unique_ptr<regex_t> preg(new regex_t);
|
||||
int result;
|
||||
if(0 != (result = regcomp(preg, key.c_str(), REG_EXTENDED | REG_NOSUB))){ // we do not need matching info
|
||||
if(0 != (result = regcomp(preg.get(), key.c_str(), REG_EXTENDED | REG_NOSUB))){ // we do not need matching info
|
||||
char errbuf[256];
|
||||
regerror(result, preg, errbuf, sizeof(errbuf));
|
||||
regerror(result, preg.get(), errbuf, sizeof(errbuf));
|
||||
S3FS_PRN_ERR("failed to compile regex from %s key by %s.", key.c_str(), errbuf);
|
||||
delete preg;
|
||||
delete paddhead;
|
||||
continue;
|
||||
}
|
||||
|
||||
// set
|
||||
paddhead->pregex = preg;
|
||||
paddhead->pregex = std::move(preg);
|
||||
paddhead->basestring = key;
|
||||
paddhead->headkey = head;
|
||||
paddhead->headvalue = value;
|
||||
|
||||
}else{
|
||||
// not regex, directly comparing
|
||||
paddhead->pregex = NULL;
|
||||
paddhead->pregex.reset(nullptr);
|
||||
paddhead->basestring = key;
|
||||
paddhead->headkey = head;
|
||||
paddhead->headvalue = value;
|
||||
}
|
||||
|
||||
// add list
|
||||
addheadlist.push_back(paddhead);
|
||||
addheadlist.push_back(std::move(paddhead));
|
||||
|
||||
// set flag
|
||||
is_enable = true;
|
||||
@ -160,16 +156,6 @@ void AdditionalHeader::Unload()
|
||||
{
|
||||
is_enable = false;
|
||||
|
||||
for(addheadlist_t::iterator iter = addheadlist.begin(); iter != addheadlist.end(); ++iter){
|
||||
ADDHEAD *paddhead = *iter;
|
||||
if(paddhead){
|
||||
if(paddhead->pregex){
|
||||
regfree(paddhead->pregex);
|
||||
delete paddhead->pregex;
|
||||
}
|
||||
delete paddhead;
|
||||
}
|
||||
}
|
||||
addheadlist.clear();
|
||||
}
|
||||
|
||||
@ -191,7 +177,7 @@ bool AdditionalHeader::AddHeader(headers_t& meta, const char* path) const
|
||||
// Because to allow duplicate key, and then scanning the entire table.
|
||||
//
|
||||
for(addheadlist_t::const_iterator iter = addheadlist.begin(); iter != addheadlist.end(); ++iter){
|
||||
const ADDHEAD *paddhead = *iter;
|
||||
const ADDHEAD *paddhead = iter->get();
|
||||
if(!paddhead){
|
||||
continue;
|
||||
}
|
||||
@ -199,7 +185,7 @@ bool AdditionalHeader::AddHeader(headers_t& meta, const char* path) const
|
||||
if(paddhead->pregex){
|
||||
// regex
|
||||
regmatch_t match; // not use
|
||||
if(0 == regexec(paddhead->pregex, path, 1, &match, 0)){
|
||||
if(0 == regexec(paddhead->pregex.get(), path, 1, &match, 0)){
|
||||
// match -> adding header
|
||||
meta[paddhead->headkey] = paddhead->headvalue;
|
||||
}
|
||||
@ -244,7 +230,7 @@ bool AdditionalHeader::Dump() const
|
||||
ssdbg << "Additional Header list[" << addheadlist.size() << "] = {" << std::endl;
|
||||
|
||||
for(addheadlist_t::const_iterator iter = addheadlist.begin(); iter != addheadlist.end(); ++iter, ++cnt){
|
||||
const ADDHEAD *paddhead = *iter;
|
||||
const ADDHEAD *paddhead = iter->get();
|
||||
|
||||
ssdbg << " [" << cnt << "] = {" << std::endl;
|
||||
|
||||
|
@ -21,6 +21,7 @@
|
||||
#ifndef S3FS_ADDHEAD_H_
|
||||
#define S3FS_ADDHEAD_H_
|
||||
|
||||
#include <memory>
|
||||
#include <regex.h>
|
||||
|
||||
#include "metaheader.h"
|
||||
@ -29,13 +30,19 @@
|
||||
// Structure / Typedef
|
||||
//----------------------------------------------
|
||||
typedef struct add_header{
|
||||
regex_t* pregex; // not NULL means using regex, NULL means comparing suffix directly.
|
||||
~add_header() {
|
||||
if(pregex){
|
||||
regfree(pregex.get());
|
||||
}
|
||||
}
|
||||
|
||||
std::unique_ptr<regex_t> pregex; // not NULL means using regex, NULL means comparing suffix directly.
|
||||
std::string basestring;
|
||||
std::string headkey;
|
||||
std::string headvalue;
|
||||
}ADDHEAD;
|
||||
|
||||
typedef std::vector<ADDHEAD *> addheadlist_t;
|
||||
typedef std::vector<std::unique_ptr<ADDHEAD>> addheadlist_t;
|
||||
|
||||
//----------------------------------------------
|
||||
// Class AdditionalHeader
|
||||
|
47
src/curl.cpp
47
src/curl.cpp
@ -22,6 +22,7 @@
|
||||
#include <cstdlib>
|
||||
#include <ctime>
|
||||
#include <errno.h>
|
||||
#include <memory>
|
||||
#include <unistd.h>
|
||||
#include <fstream>
|
||||
#include <sstream>
|
||||
@ -1347,7 +1348,7 @@ int S3fsCurl::MapPutErrorResponse(int result)
|
||||
// It is a factory method as utility because it requires an S3fsCurl object
|
||||
// initialized for multipart upload from outside this class.
|
||||
//
|
||||
S3fsCurl* S3fsCurl::CreateParallelS3fsCurl(const char* tpath, int fd, off_t start, off_t size, int part_num, bool is_copy, etagpair* petag, const std::string& upload_id, int& result)
|
||||
std::unique_ptr<S3fsCurl> S3fsCurl::CreateParallelS3fsCurl(const char* tpath, int fd, off_t start, off_t size, int part_num, bool is_copy, etagpair* petag, const std::string& upload_id, int& result)
|
||||
{
|
||||
// duplicate fd
|
||||
if(!tpath || -1 == fd || start < 0 || size <= 0 || !petag){
|
||||
@ -1357,7 +1358,7 @@ S3fsCurl* S3fsCurl::CreateParallelS3fsCurl(const char* tpath, int fd, off_t star
|
||||
}
|
||||
result = 0;
|
||||
|
||||
S3fsCurl* s3fscurl = new S3fsCurl(true);
|
||||
std::unique_ptr<S3fsCurl> s3fscurl(new S3fsCurl(true));
|
||||
|
||||
if(!is_copy){
|
||||
s3fscurl->partdata.fd = fd;
|
||||
@ -1372,7 +1373,6 @@ S3fsCurl* S3fsCurl::CreateParallelS3fsCurl(const char* tpath, int fd, off_t star
|
||||
|
||||
if(0 != (result = s3fscurl->UploadMultipartPostSetup(tpath, part_num, upload_id))){
|
||||
S3FS_PRN_ERR("failed uploading part setup(%d)", result);
|
||||
delete s3fscurl;
|
||||
return NULL;
|
||||
}
|
||||
}else{
|
||||
@ -1394,16 +1394,14 @@ S3fsCurl* S3fsCurl::CreateParallelS3fsCurl(const char* tpath, int fd, off_t star
|
||||
|
||||
if(0 != (result = s3fscurl->CopyMultipartPostSetup(tpath, tpath, part_num, upload_id, meta))){
|
||||
S3FS_PRN_ERR("failed uploading part setup(%d)", result);
|
||||
delete s3fscurl;
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
// Call lazy function
|
||||
if(!s3fscurl->fpLazySetup || !s3fscurl->fpLazySetup(s3fscurl)){
|
||||
if(!s3fscurl->fpLazySetup || !s3fscurl->fpLazySetup(s3fscurl.get())){
|
||||
S3FS_PRN_ERR("failed lazy function setup for uploading part");
|
||||
result = -EIO;
|
||||
delete s3fscurl;
|
||||
return NULL;
|
||||
}
|
||||
return s3fscurl;
|
||||
@ -1440,7 +1438,7 @@ int S3fsCurl::ParallelMultipartUploadRequest(const char* tpath, headers_t& meta,
|
||||
off_t chunk = remaining_bytes > S3fsCurl::multipart_size ? S3fsCurl::multipart_size : remaining_bytes;
|
||||
|
||||
// s3fscurl sub object
|
||||
S3fsCurl* s3fscurl_para = new S3fsCurl(true);
|
||||
std::unique_ptr<S3fsCurl> s3fscurl_para(new S3fsCurl(true));
|
||||
s3fscurl_para->partdata.fd = fd;
|
||||
s3fscurl_para->partdata.startpos = st.st_size - remaining_bytes;
|
||||
s3fscurl_para->partdata.size = chunk;
|
||||
@ -1451,14 +1449,12 @@ int S3fsCurl::ParallelMultipartUploadRequest(const char* tpath, headers_t& meta,
|
||||
// initiate upload part for parallel
|
||||
if(0 != (result = s3fscurl_para->UploadMultipartPostSetup(tpath, s3fscurl_para->partdata.get_part_number(), upload_id))){
|
||||
S3FS_PRN_ERR("failed uploading part setup(%d)", result);
|
||||
delete s3fscurl_para;
|
||||
return result;
|
||||
}
|
||||
|
||||
// set into parallel object
|
||||
if(!curlmulti.SetS3fsCurlObject(s3fscurl_para)){
|
||||
if(!curlmulti.SetS3fsCurlObject(std::move(s3fscurl_para))){
|
||||
S3FS_PRN_ERR("Could not make curl object into multi curl(%s).", tpath);
|
||||
delete s3fscurl_para;
|
||||
return -EIO;
|
||||
}
|
||||
remaining_bytes -= chunk;
|
||||
@ -1519,8 +1515,7 @@ int S3fsCurl::ParallelMixMultipartUploadRequest(const char* tpath, headers_t& me
|
||||
for(fdpage_list_t::const_iterator iter = mixuppages.begin(); iter != mixuppages.end(); ++iter){
|
||||
if(iter->modified){
|
||||
// Multipart upload
|
||||
S3fsCurl* s3fscurl_para = new S3fsCurl(true);
|
||||
|
||||
std::unique_ptr<S3fsCurl> s3fscurl_para(new S3fsCurl(true));
|
||||
s3fscurl_para->partdata.fd = fd;
|
||||
s3fscurl_para->partdata.startpos = iter->offset;
|
||||
s3fscurl_para->partdata.size = iter->bytes;
|
||||
@ -1533,20 +1528,18 @@ int S3fsCurl::ParallelMixMultipartUploadRequest(const char* tpath, headers_t& me
|
||||
// initiate upload part for parallel
|
||||
if(0 != (result = s3fscurl_para->UploadMultipartPostSetup(tpath, s3fscurl_para->partdata.get_part_number(), upload_id))){
|
||||
S3FS_PRN_ERR("failed uploading part setup(%d)", result);
|
||||
delete s3fscurl_para;
|
||||
return result;
|
||||
}
|
||||
|
||||
// set into parallel object
|
||||
if(!curlmulti.SetS3fsCurlObject(s3fscurl_para)){
|
||||
if(!curlmulti.SetS3fsCurlObject(std::move(s3fscurl_para))){
|
||||
S3FS_PRN_ERR("Could not make curl object into multi curl(%s).", tpath);
|
||||
delete s3fscurl_para;
|
||||
return -EIO;
|
||||
}
|
||||
}else{
|
||||
// Multipart copy
|
||||
for(off_t i = 0, bytes = 0; i < iter->bytes; i += bytes){
|
||||
S3fsCurl* s3fscurl_para = new S3fsCurl(true);
|
||||
std::unique_ptr<S3fsCurl> s3fscurl_para(new S3fsCurl(true));
|
||||
|
||||
bytes = std::min(static_cast<off_t>(GetMultipartCopySize()), iter->bytes - i);
|
||||
/* every part should be larger than MIN_MULTIPART_SIZE and smaller than FIVE_GB */
|
||||
@ -1573,14 +1566,12 @@ int S3fsCurl::ParallelMixMultipartUploadRequest(const char* tpath, headers_t& me
|
||||
// initiate upload part for parallel
|
||||
if(0 != (result = s3fscurl_para->CopyMultipartPostSetup(tpath, tpath, s3fscurl_para->partdata.get_part_number(), upload_id, meta))){
|
||||
S3FS_PRN_ERR("failed uploading part setup(%d)", result);
|
||||
delete s3fscurl_para;
|
||||
return result;
|
||||
}
|
||||
|
||||
// set into parallel object
|
||||
if(!curlmulti.SetS3fsCurlObject(s3fscurl_para)){
|
||||
if(!curlmulti.SetS3fsCurlObject(std::move(s3fscurl_para))){
|
||||
S3FS_PRN_ERR("Could not make curl object into multi curl(%s).", tpath);
|
||||
delete s3fscurl_para;
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
@ -1659,17 +1650,15 @@ int S3fsCurl::ParallelGetObjectRequest(const char* tpath, int fd, off_t start, o
|
||||
chunk = remaining_bytes > S3fsCurl::multipart_size ? S3fsCurl::multipart_size : remaining_bytes;
|
||||
|
||||
// s3fscurl sub object
|
||||
S3fsCurl* s3fscurl_para = new S3fsCurl();
|
||||
std::unique_ptr<S3fsCurl> s3fscurl_para(new S3fsCurl(true));
|
||||
if(0 != (result = s3fscurl_para->PreGetObjectRequest(tpath, fd, (start + size - remaining_bytes), chunk, ssetype, ssevalue))){
|
||||
S3FS_PRN_ERR("failed downloading part setup(%d)", result);
|
||||
delete s3fscurl_para;
|
||||
return result;
|
||||
}
|
||||
|
||||
// set into parallel object
|
||||
if(!curlmulti.SetS3fsCurlObject(s3fscurl_para)){
|
||||
if(!curlmulti.SetS3fsCurlObject(std::move(s3fscurl_para))){
|
||||
S3FS_PRN_ERR("Could not make curl object into multi curl(%s).", tpath);
|
||||
delete s3fscurl_para;
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
@ -4367,7 +4356,7 @@ int S3fsCurl::MultipartHeadRequest(const char* tpath, off_t size, headers_t& met
|
||||
meta["x-amz-copy-source-range"] = strrange.str();
|
||||
|
||||
// s3fscurl sub object
|
||||
S3fsCurl* s3fscurl_para = new S3fsCurl(true);
|
||||
std::unique_ptr<S3fsCurl> s3fscurl_para(new S3fsCurl(true));
|
||||
s3fscurl_para->b_from = SAFESTRPTR(tpath);
|
||||
s3fscurl_para->b_meta = meta;
|
||||
s3fscurl_para->partdata.add_etag_list(list);
|
||||
@ -4375,14 +4364,12 @@ int S3fsCurl::MultipartHeadRequest(const char* tpath, off_t size, headers_t& met
|
||||
// initiate upload part for parallel
|
||||
if(0 != (result = s3fscurl_para->CopyMultipartPostSetup(tpath, tpath, s3fscurl_para->partdata.get_part_number(), upload_id, meta))){
|
||||
S3FS_PRN_ERR("failed uploading part setup(%d)", result);
|
||||
delete s3fscurl_para;
|
||||
return result;
|
||||
}
|
||||
|
||||
// set into parallel object
|
||||
if(!curlmulti.SetS3fsCurlObject(s3fscurl_para)){
|
||||
if(!curlmulti.SetS3fsCurlObject(std::move(s3fscurl_para))){
|
||||
S3FS_PRN_ERR("Could not make curl object into multi curl(%s).", tpath);
|
||||
delete s3fscurl_para;
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
@ -4464,7 +4451,7 @@ int S3fsCurl::MultipartRenameRequest(const char* from, const char* to, headers_t
|
||||
meta["x-amz-copy-source-range"] = strrange.str();
|
||||
|
||||
// s3fscurl sub object
|
||||
S3fsCurl* s3fscurl_para = new S3fsCurl(true);
|
||||
std::unique_ptr<S3fsCurl> s3fscurl_para(new S3fsCurl(true));
|
||||
s3fscurl_para->b_from = SAFESTRPTR(from);
|
||||
s3fscurl_para->b_meta = meta;
|
||||
s3fscurl_para->partdata.add_etag_list(list);
|
||||
@ -4472,14 +4459,12 @@ int S3fsCurl::MultipartRenameRequest(const char* from, const char* to, headers_t
|
||||
// initiate upload part for parallel
|
||||
if(0 != (result = s3fscurl_para->CopyMultipartPostSetup(from, to, s3fscurl_para->partdata.get_part_number(), upload_id, meta))){
|
||||
S3FS_PRN_ERR("failed uploading part setup(%d)", result);
|
||||
delete s3fscurl_para;
|
||||
return result;
|
||||
}
|
||||
|
||||
// set into parallel object
|
||||
if(!curlmulti.SetS3fsCurlObject(s3fscurl_para)){
|
||||
if(!curlmulti.SetS3fsCurlObject(std::move(s3fscurl_para))){
|
||||
S3FS_PRN_ERR("Could not make curl object into multi curl(%s).", to);
|
||||
delete s3fscurl_para;
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include <curl/curl.h>
|
||||
#include <list>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "autolock.h"
|
||||
@ -268,7 +269,7 @@ class S3fsCurl
|
||||
static bool InitCredentialObject(S3fsCred* pcredobj);
|
||||
static bool InitMimeType(const std::string& strFile);
|
||||
static bool DestroyS3fsCurl();
|
||||
static S3fsCurl* CreateParallelS3fsCurl(const char* tpath, int fd, off_t start, off_t size, int part_num, bool is_copy, etagpair* petag, const std::string& upload_id, int& result);
|
||||
static std::unique_ptr<S3fsCurl> CreateParallelS3fsCurl(const char* tpath, int fd, off_t start, off_t size, int part_num, bool is_copy, etagpair* petag, const std::string& upload_id, int& result);
|
||||
static int ParallelMultipartUploadRequest(const char* tpath, headers_t& meta, int fd);
|
||||
static int ParallelMixMultipartUploadRequest(const char* tpath, headers_t& meta, int fd, const fdpage_list_t& mixuppages);
|
||||
static int ParallelGetObjectRequest(const char* tpath, int fd, off_t start, off_t size);
|
||||
|
@ -60,19 +60,17 @@ bool S3fsMultiCurl::ClearEx(bool is_all)
|
||||
{
|
||||
s3fscurllist_t::iterator iter;
|
||||
for(iter = clist_req.begin(); iter != clist_req.end(); ++iter){
|
||||
S3fsCurl* s3fscurl = *iter;
|
||||
S3fsCurl* s3fscurl = iter->get();
|
||||
if(s3fscurl){
|
||||
s3fscurl->DestroyCurlHandle();
|
||||
delete s3fscurl; // with destroy curl handle.
|
||||
}
|
||||
}
|
||||
clist_req.clear();
|
||||
|
||||
if(is_all){
|
||||
for(iter = clist_all.begin(); iter != clist_all.end(); ++iter){
|
||||
S3fsCurl* s3fscurl = *iter;
|
||||
S3fsCurl* s3fscurl = iter->get();
|
||||
s3fscurl->DestroyCurlHandle();
|
||||
delete s3fscurl;
|
||||
}
|
||||
clist_all.clear();
|
||||
}
|
||||
@ -117,12 +115,12 @@ void* S3fsMultiCurl::SetNotFoundCallbackParam(void* param)
|
||||
return old;
|
||||
}
|
||||
|
||||
bool S3fsMultiCurl::SetS3fsCurlObject(S3fsCurl* s3fscurl)
|
||||
bool S3fsMultiCurl::SetS3fsCurlObject(std::unique_ptr<S3fsCurl>&& s3fscurl)
|
||||
{
|
||||
if(!s3fscurl){
|
||||
return false;
|
||||
}
|
||||
clist_all.push_back(s3fscurl);
|
||||
clist_all.push_back(std::move(s3fscurl));
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -137,7 +135,7 @@ int S3fsMultiCurl::MultiPerform()
|
||||
|
||||
for(s3fscurllist_t::iterator iter = clist_req.begin(); iter != clist_req.end(); ++iter) {
|
||||
pthread_t thread;
|
||||
S3fsCurl* s3fscurl = *iter;
|
||||
S3fsCurl* s3fscurl = iter->get();
|
||||
if(!s3fscurl){
|
||||
continue;
|
||||
}
|
||||
@ -206,7 +204,7 @@ int S3fsMultiCurl::MultiRead()
|
||||
int result = 0;
|
||||
|
||||
for(s3fscurllist_t::iterator iter = clist_req.begin(); iter != clist_req.end(); ){
|
||||
S3fsCurl* s3fscurl = *iter;
|
||||
std::unique_ptr<S3fsCurl> s3fscurl(std::move(*iter));
|
||||
|
||||
bool isRetry = false;
|
||||
bool isPostpone = false;
|
||||
@ -220,7 +218,7 @@ int S3fsMultiCurl::MultiRead()
|
||||
isPostpone = true;
|
||||
}else if(400 > responseCode){
|
||||
// add into stat cache
|
||||
if(SuccessCallback && !SuccessCallback(s3fscurl, pSuccessCallbackParam)){
|
||||
if(SuccessCallback && !SuccessCallback(s3fscurl.get(), pSuccessCallbackParam)){
|
||||
S3FS_PRN_WARN("error from success callback function(%s).", s3fscurl->url.c_str());
|
||||
}
|
||||
}else if(400 == responseCode){
|
||||
@ -234,7 +232,7 @@ int S3fsMultiCurl::MultiRead()
|
||||
S3FS_PRN_WARN("failed a request(%ld: %s)", responseCode, s3fscurl->url.c_str());
|
||||
}
|
||||
// Call callback function
|
||||
if(NotFoundCallback && !NotFoundCallback(s3fscurl, pNotFoundCallbackParam)){
|
||||
if(NotFoundCallback && !NotFoundCallback(s3fscurl.get(), pNotFoundCallbackParam)){
|
||||
S3FS_PRN_WARN("error from not found callback function(%s).", s3fscurl->url.c_str());
|
||||
}
|
||||
}else if(500 == responseCode){
|
||||
@ -271,35 +269,35 @@ int S3fsMultiCurl::MultiRead()
|
||||
|
||||
if(isPostpone){
|
||||
clist_req.erase(iter);
|
||||
clist_req.push_back(s3fscurl); // Re-evaluate at the end
|
||||
clist_req.push_back(std::move(s3fscurl)); // Re-evaluate at the end
|
||||
iter = clist_req.begin();
|
||||
}else{
|
||||
if(!isRetry || 0 != result){
|
||||
// If an EIO error has already occurred, it will be terminated
|
||||
// immediately even if retry processing is required.
|
||||
s3fscurl->DestroyCurlHandle();
|
||||
delete s3fscurl;
|
||||
}else{
|
||||
S3fsCurl* retrycurl = NULL;
|
||||
std::unique_ptr<S3fsCurl> retrycurl;
|
||||
|
||||
// Reset offset
|
||||
if(isNeedResetOffset){
|
||||
S3fsCurl::ResetOffset(s3fscurl);
|
||||
S3fsCurl::ResetOffset(s3fscurl.get());
|
||||
}
|
||||
|
||||
// For retry
|
||||
S3fsCurl* retry_ptr = nullptr;
|
||||
if(RetryCallback){
|
||||
retrycurl = RetryCallback(s3fscurl);
|
||||
if(NULL != retrycurl){
|
||||
clist_all.push_back(retrycurl);
|
||||
retry_ptr = RetryCallback(s3fscurl.get());
|
||||
retrycurl.reset(retry_ptr);
|
||||
if(NULL != retry_ptr){
|
||||
clist_all.push_back(std::move(retrycurl));
|
||||
}else{
|
||||
// set EIO and wait for other parts.
|
||||
result = -EIO;
|
||||
}
|
||||
}
|
||||
if(s3fscurl != retrycurl){
|
||||
if(s3fscurl.get() != retry_ptr){
|
||||
s3fscurl->DestroyCurlHandle();
|
||||
delete s3fscurl;
|
||||
}
|
||||
}
|
||||
iter = clist_req.erase(iter);
|
||||
@ -310,9 +308,8 @@ int S3fsMultiCurl::MultiRead()
|
||||
if(0 != result){
|
||||
// If an EIO error has already occurred, clear all retry objects.
|
||||
for(s3fscurllist_t::iterator iter = clist_all.begin(); iter != clist_all.end(); ++iter){
|
||||
S3fsCurl* s3fscurl = *iter;
|
||||
S3fsCurl* s3fscurl = iter->get();
|
||||
s3fscurl->DestroyCurlHandle();
|
||||
delete s3fscurl;
|
||||
}
|
||||
clist_all.clear();
|
||||
}
|
||||
@ -333,8 +330,7 @@ int S3fsMultiCurl::Request()
|
||||
int result;
|
||||
s3fscurllist_t::iterator iter;
|
||||
for(iter = clist_all.begin(); iter != clist_all.end(); ++iter){
|
||||
S3fsCurl* s3fscurl = *iter;
|
||||
clist_req.push_back(s3fscurl);
|
||||
clist_req.push_back(std::move(*iter));
|
||||
}
|
||||
clist_all.clear();
|
||||
|
||||
|
@ -21,12 +21,14 @@
|
||||
#ifndef S3FS_CURL_MULTI_H_
|
||||
#define S3FS_CURL_MULTI_H_
|
||||
|
||||
#include <memory>
|
||||
|
||||
//----------------------------------------------
|
||||
// Typedef
|
||||
//----------------------------------------------
|
||||
class S3fsCurl;
|
||||
|
||||
typedef std::vector<S3fsCurl*> s3fscurllist_t;
|
||||
typedef std::vector<std::unique_ptr<S3fsCurl>> s3fscurllist_t;
|
||||
typedef bool (*S3fsMultiSuccessCallback)(S3fsCurl* s3fscurl, void* param); // callback for succeed multi request
|
||||
typedef bool (*S3fsMultiNotFoundCallback)(S3fsCurl* s3fscurl, void* param); // callback for succeed multi request
|
||||
typedef S3fsCurl* (*S3fsMultiRetryCallback)(S3fsCurl* s3fscurl); // callback for failure and retrying
|
||||
@ -70,7 +72,7 @@ class S3fsMultiCurl
|
||||
void* SetSuccessCallbackParam(void* param);
|
||||
void* SetNotFoundCallbackParam(void* param);
|
||||
bool Clear() { return ClearEx(true); }
|
||||
bool SetS3fsCurlObject(S3fsCurl* s3fscurl);
|
||||
bool SetS3fsCurlObject(std::unique_ptr<S3fsCurl>&& s3fscurl);
|
||||
int Request();
|
||||
};
|
||||
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <cerrno>
|
||||
#include <memory>
|
||||
#include <unistd.h>
|
||||
#include <limits.h>
|
||||
#include <sys/stat.h>
|
||||
@ -152,10 +153,6 @@ void FdEntity::Clear()
|
||||
AutoLock auto_lock(&fdent_lock);
|
||||
AutoLock auto_data_lock(&fdent_data_lock);
|
||||
|
||||
for(fdinfo_map_t::iterator iter = pseudo_fd_map.begin(); iter != pseudo_fd_map.end(); ++iter){
|
||||
PseudoFdInfo* ppseudofdinfo = iter->second;
|
||||
delete ppseudofdinfo;
|
||||
}
|
||||
pseudo_fd_map.clear();
|
||||
|
||||
if(-1 != physical_fd){
|
||||
@ -221,9 +218,7 @@ void FdEntity::Close(int fd)
|
||||
// search pseudo fd and close it.
|
||||
fdinfo_map_t::iterator iter = pseudo_fd_map.find(fd);
|
||||
if(pseudo_fd_map.end() != iter){
|
||||
PseudoFdInfo* ppseudoinfo = iter->second;
|
||||
pseudo_fd_map.erase(iter);
|
||||
delete ppseudoinfo;
|
||||
}else{
|
||||
S3FS_PRN_WARN("Not found pseudo_fd(%d) in entity object(%s)", fd, path.c_str());
|
||||
}
|
||||
@ -275,10 +270,10 @@ int FdEntity::Dup(int fd, AutoLock::Type locktype)
|
||||
S3FS_PRN_ERR("Not found pseudo_fd(%d) in entity object(%s) for physical_fd(%d)", fd, path.c_str(), physical_fd);
|
||||
return -1;
|
||||
}
|
||||
const PseudoFdInfo* org_pseudoinfo = iter->second;
|
||||
PseudoFdInfo* ppseudoinfo = new PseudoFdInfo(physical_fd, (org_pseudoinfo ? org_pseudoinfo->GetFlags() : 0));
|
||||
int pseudo_fd = ppseudoinfo->GetPseudoFd();
|
||||
pseudo_fd_map[pseudo_fd] = ppseudoinfo;
|
||||
const PseudoFdInfo* org_pseudoinfo = iter->second.get();
|
||||
std::unique_ptr<PseudoFdInfo> ppseudoinfo(new PseudoFdInfo(physical_fd, (org_pseudoinfo ? org_pseudoinfo->GetFlags() : 0)));
|
||||
int pseudo_fd = ppseudoinfo->GetPseudoFd();
|
||||
pseudo_fd_map[pseudo_fd] = std::move(ppseudoinfo);
|
||||
|
||||
return pseudo_fd;
|
||||
}
|
||||
@ -292,9 +287,9 @@ int FdEntity::OpenPseudoFd(int flags, AutoLock::Type locktype)
|
||||
if(-1 == physical_fd){
|
||||
return -1;
|
||||
}
|
||||
PseudoFdInfo* ppseudoinfo = new PseudoFdInfo(physical_fd, flags);
|
||||
std::unique_ptr<PseudoFdInfo> ppseudoinfo(new PseudoFdInfo(physical_fd, flags));
|
||||
int pseudo_fd = ppseudoinfo->GetPseudoFd();
|
||||
pseudo_fd_map[pseudo_fd] = ppseudoinfo;
|
||||
pseudo_fd_map[pseudo_fd] = std::move(ppseudoinfo);
|
||||
|
||||
return pseudo_fd;
|
||||
}
|
||||
@ -396,7 +391,7 @@ PseudoFdInfo* FdEntity::CheckPseudoFdFlags(int fd, bool writable, AutoLock::Type
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
return iter->second;
|
||||
return iter->second.get();
|
||||
}
|
||||
|
||||
bool FdEntity::IsUploading(AutoLock::Type locktype)
|
||||
@ -404,7 +399,7 @@ bool FdEntity::IsUploading(AutoLock::Type locktype)
|
||||
AutoLock auto_lock(&fdent_lock, locktype);
|
||||
|
||||
for(fdinfo_map_t::const_iterator iter = pseudo_fd_map.begin(); iter != pseudo_fd_map.end(); ++iter){
|
||||
const PseudoFdInfo* ppseudoinfo = iter->second;
|
||||
const PseudoFdInfo* ppseudoinfo = iter->second.get();
|
||||
if(ppseudoinfo && ppseudoinfo->IsUploading()){
|
||||
return true;
|
||||
}
|
||||
@ -486,7 +481,7 @@ int FdEntity::Open(const headers_t* pmeta, off_t size, const struct timespec& ts
|
||||
bool need_save_csf = false; // need to save(reset) cache stat file
|
||||
bool is_truncate = false; // need to truncate
|
||||
|
||||
std::auto_ptr<CacheFileStat> pcfstat;
|
||||
std::unique_ptr<CacheFileStat> pcfstat;
|
||||
|
||||
if(!cachepath.empty()){
|
||||
// using cache
|
||||
@ -674,9 +669,9 @@ int FdEntity::Open(const headers_t* pmeta, off_t size, const struct timespec& ts
|
||||
}
|
||||
|
||||
// create new pseudo fd, and set it to map
|
||||
PseudoFdInfo* ppseudoinfo = new PseudoFdInfo(physical_fd, flags);
|
||||
std::unique_ptr<PseudoFdInfo> ppseudoinfo(new PseudoFdInfo(physical_fd, flags));
|
||||
int pseudo_fd = ppseudoinfo->GetPseudoFd();
|
||||
pseudo_fd_map[pseudo_fd] = ppseudoinfo;
|
||||
pseudo_fd_map[pseudo_fd] = std::move(ppseudoinfo);
|
||||
|
||||
// if there is untreated area, set it to pseudo object.
|
||||
if(0 < truncated_size){
|
||||
@ -686,7 +681,6 @@ int FdEntity::Open(const headers_t* pmeta, off_t size, const struct timespec& ts
|
||||
fclose(pfile);
|
||||
pfile = NULL;
|
||||
}
|
||||
delete ppseudoinfo;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1425,7 +1419,7 @@ int FdEntity::RowFlush(int fd, const char* tpath, AutoLock::Type type, bool forc
|
||||
// If the entity is opened read-only, it will end normally without updating.
|
||||
return 0;
|
||||
}
|
||||
PseudoFdInfo* pseudo_obj = miter->second;
|
||||
PseudoFdInfo* pseudo_obj = miter->second.get();
|
||||
|
||||
AutoLock auto_lock2(&fdent_data_lock);
|
||||
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <errno.h>
|
||||
#include <memory>
|
||||
#include <sys/stat.h>
|
||||
#include <unistd.h>
|
||||
|
||||
@ -47,15 +48,13 @@ int PseudoFdInfo::opt_max_threads = -1;
|
||||
//
|
||||
void* PseudoFdInfo::MultipartUploadThreadWorker(void* arg)
|
||||
{
|
||||
pseudofdinfo_thparam* pthparam = static_cast<pseudofdinfo_thparam*>(arg);
|
||||
std::unique_ptr<pseudofdinfo_thparam> pthparam(static_cast<pseudofdinfo_thparam*>(arg));
|
||||
if(!pthparam || !(pthparam->ppseudofdinfo)){
|
||||
delete pthparam;
|
||||
return reinterpret_cast<void*>(-EIO);
|
||||
}
|
||||
S3FS_PRN_INFO3("Upload Part Thread [tpath=%s][start=%lld][size=%lld][part=%d]", pthparam->path.c_str(), static_cast<long long>(pthparam->start), static_cast<long long>(pthparam->size), pthparam->part_num);
|
||||
|
||||
int result;
|
||||
S3fsCurl* s3fscurl;
|
||||
{
|
||||
AutoLock auto_lock(&(pthparam->ppseudofdinfo->upload_list_lock));
|
||||
|
||||
@ -65,20 +64,19 @@ void* PseudoFdInfo::MultipartUploadThreadWorker(void* arg)
|
||||
if(!pthparam->ppseudofdinfo->CompleteInstruction(result, AutoLock::ALREADY_LOCKED)){ // result will be overwritten with the same value.
|
||||
result = -EIO;
|
||||
}
|
||||
delete pthparam;
|
||||
return reinterpret_cast<void*>(result);
|
||||
}
|
||||
}
|
||||
|
||||
// setup and make curl object
|
||||
if(NULL == (s3fscurl = S3fsCurl::CreateParallelS3fsCurl(pthparam->path.c_str(), pthparam->upload_fd, pthparam->start, pthparam->size, pthparam->part_num, pthparam->is_copy, pthparam->petag, pthparam->upload_id, result))){
|
||||
std::unique_ptr<S3fsCurl> s3fscurl(S3fsCurl::CreateParallelS3fsCurl(pthparam->path.c_str(), pthparam->upload_fd, pthparam->start, pthparam->size, pthparam->part_num, pthparam->is_copy, pthparam->petag, pthparam->upload_id, result));
|
||||
if(NULL == s3fscurl.get()){
|
||||
S3FS_PRN_ERR("failed creating s3fs curl object for uploading [path=%s][start=%lld][size=%lld][part=%d]", pthparam->path.c_str(), static_cast<long long>(pthparam->start), static_cast<long long>(pthparam->size), pthparam->part_num);
|
||||
|
||||
// set result for exiting
|
||||
if(!pthparam->ppseudofdinfo->CompleteInstruction(result, AutoLock::NONE)){
|
||||
result = -EIO;
|
||||
}
|
||||
delete pthparam;
|
||||
return reinterpret_cast<void*>(result);
|
||||
}
|
||||
|
||||
@ -93,13 +91,11 @@ void* PseudoFdInfo::MultipartUploadThreadWorker(void* arg)
|
||||
S3FS_PRN_ERR("failed uploading with error(%d) [path=%s][start=%lld][size=%lld][part=%d]", result, pthparam->path.c_str(), static_cast<long long>(pthparam->start), static_cast<long long>(pthparam->size), pthparam->part_num);
|
||||
}
|
||||
s3fscurl->DestroyCurlHandle(true, false);
|
||||
delete s3fscurl;
|
||||
|
||||
// set result
|
||||
if(!pthparam->ppseudofdinfo->CompleteInstruction(result, AutoLock::NONE)){
|
||||
S3FS_PRN_WARN("This thread worker is about to end, so it doesn't return an EIO here and runs to the end.");
|
||||
}
|
||||
delete pthparam;
|
||||
|
||||
return reinterpret_cast<void*>(result);
|
||||
}
|
||||
|
@ -21,6 +21,8 @@
|
||||
#ifndef S3FS_FDCACHE_FDINFO_H_
|
||||
#define S3FS_FDCACHE_FDINFO_H_
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "psemaphore.h"
|
||||
#include "metaheader.h"
|
||||
#include "autolock.h"
|
||||
@ -113,7 +115,7 @@ class PseudoFdInfo
|
||||
bool ExtractUploadPartsFromAllArea(UntreatedParts& untreated_list, mp_part_list_t& to_upload_list, mp_part_list_t& to_copy_list, mp_part_list_t& to_download_list, filepart_list_t& cancel_upload_list, off_t max_mp_size, off_t file_size, bool use_copy);
|
||||
};
|
||||
|
||||
typedef std::map<int, class PseudoFdInfo*> fdinfo_map_t;
|
||||
typedef std::map<int, std::unique_ptr<PseudoFdInfo>> fdinfo_map_t;
|
||||
|
||||
#endif // S3FS_FDCACHE_FDINFO_H_
|
||||
|
||||
|
@ -20,6 +20,7 @@
|
||||
|
||||
#include <cstdio>
|
||||
#include <cerrno>
|
||||
#include <memory>
|
||||
#include <unistd.h>
|
||||
#include <sstream>
|
||||
#include <sys/stat.h>
|
||||
@ -232,7 +233,7 @@ bool PageList::GetSparseFilePages(int fd, size_t file_size, fdpage_list_t& spars
|
||||
//
|
||||
bool PageList::CheckZeroAreaInFile(int fd, off_t start, size_t bytes)
|
||||
{
|
||||
char* readbuff = new char[CHECK_CACHEFILE_PART_SIZE];
|
||||
std::unique_ptr<char[]> readbuff(new char[CHECK_CACHEFILE_PART_SIZE]);
|
||||
|
||||
for(size_t comp_bytes = 0, check_bytes = 0; comp_bytes < bytes; comp_bytes += check_bytes){
|
||||
if(CHECK_CACHEFILE_PART_SIZE < (bytes - comp_bytes)){
|
||||
@ -242,7 +243,7 @@ bool PageList::CheckZeroAreaInFile(int fd, off_t start, size_t bytes)
|
||||
}
|
||||
bool found_bad_data = false;
|
||||
ssize_t read_bytes;
|
||||
if(-1 == (read_bytes = pread(fd, readbuff, check_bytes, (start + comp_bytes)))){
|
||||
if(-1 == (read_bytes = pread(fd, readbuff.get(), check_bytes, (start + comp_bytes)))){
|
||||
S3FS_PRN_ERR("Something error is occurred in reading %zu bytes at %lld from file(physical_fd=%d).", check_bytes, static_cast<long long int>(start + comp_bytes), fd);
|
||||
found_bad_data = true;
|
||||
}else{
|
||||
@ -256,11 +257,9 @@ bool PageList::CheckZeroAreaInFile(int fd, off_t start, size_t bytes)
|
||||
}
|
||||
}
|
||||
if(found_bad_data){
|
||||
delete[] readbuff;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
delete[] readbuff;
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -869,17 +868,16 @@ bool PageList::Serialize(CacheFileStat& file, bool is_output, ino_t inode)
|
||||
Init(0, false, false);
|
||||
return true;
|
||||
}
|
||||
char* ptmp = new char[st.st_size + 1];
|
||||
std::unique_ptr<char[]> ptmp(new char[st.st_size + 1]);
|
||||
ssize_t result;
|
||||
// read from file
|
||||
if(0 >= (result = pread(file.GetFd(), ptmp, st.st_size, 0))){
|
||||
if(0 >= (result = pread(file.GetFd(), ptmp.get(), st.st_size, 0))){
|
||||
S3FS_PRN_ERR("failed to read stats(%d)", errno);
|
||||
delete[] ptmp;
|
||||
return false;
|
||||
}
|
||||
ptmp[result] = '\0';
|
||||
std::string oneline;
|
||||
std::istringstream ssall(ptmp);
|
||||
std::istringstream ssall(ptmp.get());
|
||||
|
||||
// loaded
|
||||
Clear();
|
||||
@ -889,7 +887,6 @@ bool PageList::Serialize(CacheFileStat& file, bool is_output, ino_t inode)
|
||||
ino_t cache_inode; // if this value is 0, it means old format.
|
||||
if(!getline(ssall, oneline, '\n')){
|
||||
S3FS_PRN_ERR("failed to parse stats.");
|
||||
delete[] ptmp;
|
||||
return false;
|
||||
}else{
|
||||
std::istringstream sshead(oneline);
|
||||
@ -899,7 +896,6 @@ bool PageList::Serialize(CacheFileStat& file, bool is_output, ino_t inode)
|
||||
// get first part in head line.
|
||||
if(!getline(sshead, strhead1, ':')){
|
||||
S3FS_PRN_ERR("failed to parse stats.");
|
||||
delete[] ptmp;
|
||||
return false;
|
||||
}
|
||||
// get second part in head line.
|
||||
@ -913,7 +909,6 @@ bool PageList::Serialize(CacheFileStat& file, bool is_output, ino_t inode)
|
||||
cache_inode = static_cast<ino_t>(cvt_strtoofft(strhead1.c_str(), /* base= */10));
|
||||
if(0 == cache_inode){
|
||||
S3FS_PRN_ERR("wrong inode number in parsed cache stats.");
|
||||
delete[] ptmp;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -921,7 +916,6 @@ bool PageList::Serialize(CacheFileStat& file, bool is_output, ino_t inode)
|
||||
// check inode number
|
||||
if(0 != cache_inode && cache_inode != inode){
|
||||
S3FS_PRN_ERR("differ inode and inode number in parsed cache stats.");
|
||||
delete[] ptmp;
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -969,7 +963,6 @@ bool PageList::Serialize(CacheFileStat& file, bool is_output, ino_t inode)
|
||||
}
|
||||
SetPageLoadedStatus(offset, size, pstatus);
|
||||
}
|
||||
delete[] ptmp;
|
||||
if(is_err){
|
||||
S3FS_PRN_ERR("failed to parse stats.");
|
||||
Clear();
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <errno.h>
|
||||
#include <memory>
|
||||
#include <set>
|
||||
#include <unistd.h>
|
||||
#include <dirent.h>
|
||||
@ -3300,16 +3301,14 @@ static int readdir_multi_head(const char* path, const S3ObjList& head, void* buf
|
||||
|
||||
// First check for directory, start checking "not SSE-C".
|
||||
// If checking failed, retry to check with "SSE-C" by retry callback func when SSE-C mode.
|
||||
S3fsCurl* s3fscurl = new S3fsCurl();
|
||||
std::unique_ptr<S3fsCurl> s3fscurl(new S3fsCurl());
|
||||
if(!s3fscurl->PreHeadRequest(disppath, (*iter), disppath)){ // target path = cache key path.(ex "dir/")
|
||||
S3FS_PRN_WARN("Could not make curl object for head request(%s).", disppath.c_str());
|
||||
delete s3fscurl;
|
||||
continue;
|
||||
}
|
||||
|
||||
if(!curlmulti.SetS3fsCurlObject(s3fscurl)){
|
||||
if(!curlmulti.SetS3fsCurlObject(std::move(s3fscurl))){
|
||||
S3FS_PRN_WARN("Could not make curl object into multi curl(%s).", disppath.c_str());
|
||||
delete s3fscurl;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
@ -20,6 +20,7 @@
|
||||
|
||||
#include <cstdlib>
|
||||
#include <iomanip>
|
||||
#include <memory>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
|
||||
@ -264,21 +265,19 @@ void s3fs_low_logprn(S3fsLog::s3fs_log_level level, const char* file, const char
|
||||
size_t len = vsnprintf(NULL, 0, fmt, va) + 1;
|
||||
va_end(va);
|
||||
|
||||
char *message = new char[len];
|
||||
std::unique_ptr<char[]> message(new char[len]);
|
||||
va_start(va, fmt);
|
||||
vsnprintf(message, len, fmt, va);
|
||||
vsnprintf(message.get(), len, fmt, va);
|
||||
va_end(va);
|
||||
|
||||
if(foreground || S3fsLog::IsSetLogFile()){
|
||||
S3fsLog::SeekEnd();
|
||||
fprintf(S3fsLog::GetOutputLogFile(), "%s%s%s:%s(%d): %s\n", S3fsLog::GetCurrentTime().c_str(), S3fsLog::GetLevelString(level), file, func, line, message);
|
||||
fprintf(S3fsLog::GetOutputLogFile(), "%s%s%s:%s(%d): %s\n", S3fsLog::GetCurrentTime().c_str(), S3fsLog::GetLevelString(level), file, func, line, message.get());
|
||||
S3fsLog::Flush();
|
||||
}else{
|
||||
// TODO: why does this differ from s3fs_low_logprn2?
|
||||
syslog(S3fsLog::GetSyslogLevel(level), "%s%s:%s(%d): %s", instance_name.c_str(), file, func, line, message);
|
||||
syslog(S3fsLog::GetSyslogLevel(level), "%s%s:%s(%d): %s", instance_name.c_str(), file, func, line, message.get());
|
||||
}
|
||||
|
||||
delete[] message;
|
||||
}
|
||||
}
|
||||
|
||||
@ -290,20 +289,18 @@ void s3fs_low_logprn2(S3fsLog::s3fs_log_level level, int nest, const char* file,
|
||||
size_t len = vsnprintf(NULL, 0, fmt, va) + 1;
|
||||
va_end(va);
|
||||
|
||||
char *message = new char[len];
|
||||
std::unique_ptr<char[]> message(new char[len]);
|
||||
va_start(va, fmt);
|
||||
vsnprintf(message, len, fmt, va);
|
||||
vsnprintf(message.get(), len, fmt, va);
|
||||
va_end(va);
|
||||
|
||||
if(foreground || S3fsLog::IsSetLogFile()){
|
||||
S3fsLog::SeekEnd();
|
||||
fprintf(S3fsLog::GetOutputLogFile(), "%s%s%s%s:%s(%d): %s\n", S3fsLog::GetCurrentTime().c_str(), S3fsLog::GetLevelString(level), S3fsLog::GetS3fsLogNest(nest), file, func, line, message);
|
||||
fprintf(S3fsLog::GetOutputLogFile(), "%s%s%s%s:%s(%d): %s\n", S3fsLog::GetCurrentTime().c_str(), S3fsLog::GetLevelString(level), S3fsLog::GetS3fsLogNest(nest), file, func, line, message.get());
|
||||
S3fsLog::Flush();
|
||||
}else{
|
||||
syslog(S3fsLog::GetSyslogLevel(level), "%s%s%s", instance_name.c_str(), S3fsLog::GetS3fsLogNest(nest), message);
|
||||
syslog(S3fsLog::GetSyslogLevel(level), "%s%s%s", instance_name.c_str(), S3fsLog::GetS3fsLogNest(nest), message.get());
|
||||
}
|
||||
|
||||
delete[] message;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include <unistd.h>
|
||||
#include <cerrno>
|
||||
#include <grp.h>
|
||||
#include <memory>
|
||||
#include <pwd.h>
|
||||
#include <libgen.h>
|
||||
#include <dirent.h>
|
||||
@ -94,32 +95,27 @@ std::string get_username(uid_t uid)
|
||||
{
|
||||
size_t maxlen = max_password_size;
|
||||
int result;
|
||||
char* pbuf;
|
||||
struct passwd pwinfo;
|
||||
struct passwd* ppwinfo = NULL;
|
||||
|
||||
// make buffer
|
||||
pbuf = new char[maxlen];
|
||||
std::unique_ptr<char[]> pbuf(new char[maxlen]);
|
||||
// get pw information
|
||||
while(ERANGE == (result = getpwuid_r(uid, &pwinfo, pbuf, maxlen, &ppwinfo))){
|
||||
delete[] pbuf;
|
||||
while(ERANGE == (result = getpwuid_r(uid, &pwinfo, pbuf.get(), maxlen, &ppwinfo))){
|
||||
maxlen *= 2;
|
||||
pbuf = new char[maxlen];
|
||||
pbuf.reset(new char[maxlen]);
|
||||
}
|
||||
|
||||
if(0 != result){
|
||||
S3FS_PRN_ERR("could not get pw information(%d).", result);
|
||||
delete[] pbuf;
|
||||
return std::string("");
|
||||
}
|
||||
|
||||
// check pw
|
||||
if(NULL == ppwinfo){
|
||||
delete[] pbuf;
|
||||
return std::string("");
|
||||
}
|
||||
std::string name = SAFESTRPTR(ppwinfo->pw_name);
|
||||
delete[] pbuf;
|
||||
return name;
|
||||
}
|
||||
|
||||
@ -127,29 +123,25 @@ int is_uid_include_group(uid_t uid, gid_t gid)
|
||||
{
|
||||
size_t maxlen = max_group_name_length;
|
||||
int result;
|
||||
char* pbuf;
|
||||
struct group ginfo;
|
||||
struct group* pginfo = NULL;
|
||||
|
||||
// make buffer
|
||||
pbuf = new char[maxlen];
|
||||
std::unique_ptr<char[]> pbuf(new char[maxlen]);
|
||||
// get group information
|
||||
while(ERANGE == (result = getgrgid_r(gid, &ginfo, pbuf, maxlen, &pginfo))){
|
||||
delete[] pbuf;
|
||||
while(ERANGE == (result = getgrgid_r(gid, &ginfo, pbuf.get(), maxlen, &pginfo))){
|
||||
maxlen *= 2;
|
||||
pbuf = new char[maxlen];
|
||||
pbuf.reset(new char[maxlen]);
|
||||
}
|
||||
|
||||
if(0 != result){
|
||||
S3FS_PRN_ERR("could not get group information(%d).", result);
|
||||
delete[] pbuf;
|
||||
return -result;
|
||||
}
|
||||
|
||||
// check group
|
||||
if(NULL == pginfo){
|
||||
// there is not gid in group.
|
||||
delete[] pbuf;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -159,11 +151,9 @@ int is_uid_include_group(uid_t uid, gid_t gid)
|
||||
for(ppgr_mem = pginfo->gr_mem; ppgr_mem && *ppgr_mem; ppgr_mem++){
|
||||
if(username == *ppgr_mem){
|
||||
// Found username in group.
|
||||
delete[] pbuf;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
delete[] pbuf;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -107,6 +107,7 @@ bool get_keyword_value(const std::string& target, const char* keyword, std::stri
|
||||
//
|
||||
std::string s3fs_hex_lower(const unsigned char* input, size_t length);
|
||||
std::string s3fs_hex_upper(const unsigned char* input, size_t length);
|
||||
// TODO: return std::string
|
||||
char* s3fs_base64(const unsigned char* input, size_t length);
|
||||
unsigned char* s3fs_decode64(const char* input, size_t input_len, size_t* plength);
|
||||
|
||||
|
12
src/types.h
12
src/types.h
@ -200,7 +200,7 @@ typedef std::list<etagpair> etaglist_t;
|
||||
|
||||
struct petagpool
|
||||
{
|
||||
std::list<etagpair*> petaglist;
|
||||
std::list<etagpair> petaglist;
|
||||
|
||||
~petagpool()
|
||||
{
|
||||
@ -209,19 +209,13 @@ struct petagpool
|
||||
|
||||
void clear()
|
||||
{
|
||||
for(std::list<etagpair*>::iterator it = petaglist.begin(); petaglist.end() != it; ++it){
|
||||
if(*it){
|
||||
delete (*it);
|
||||
}
|
||||
}
|
||||
petaglist.clear();
|
||||
}
|
||||
|
||||
etagpair* add(const etagpair& etag_entity)
|
||||
{
|
||||
etagpair* petag = new etagpair(etag_entity);
|
||||
petaglist.push_back(petag);
|
||||
return petag;
|
||||
petaglist.push_back(etag_entity);
|
||||
return &petaglist.back();
|
||||
}
|
||||
};
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user