From 7f30353fb9f98663f90141aefbcd93408faca17d Mon Sep 17 00:00:00 2001 From: Andrew Gaul Date: Thu, 17 Aug 2023 22:08:56 +0900 Subject: [PATCH] Return std::unique_ptr from S3fsCurl callbacks (#2272) References #2261. --- .github/workflows/ci.yml | 4 ++-- src/curl.cpp | 25 ++++++++++--------------- src/curl.h | 8 ++++---- src/curl_multi.cpp | 13 ++++++------- src/curl_multi.h | 2 +- src/s3fs.cpp | 7 +++---- 6 files changed, 26 insertions(+), 33 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 756328f..8652b72 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -102,7 +102,7 @@ jobs: - name: Cppcheck run: | # specify the version range to run cppcheck (cppcheck version number is x.y or x.y.z) - if cppcheck --version | sed -e 's/\./ /g' | awk '{if (($2 * 1000 + $3) <= 1086) { exit(1) } }'; then + if cppcheck --version | sed -e 's/\./ /g' | awk '{if (($2 * 1000 + $3) <= 2004) { exit(1) } }'; then make cppcheck fi @@ -167,7 +167,7 @@ jobs: - name: Cppcheck run: | # specify the version range to run cppcheck (cppcheck version number is x.y or x.y.z) - if cppcheck --version | sed -e 's/\./ /g' | awk '{if (($2 * 1000 + $3) <= 1086) { exit(1) } }'; then + if cppcheck --version | sed -e 's/\./ /g' | awk '{if (($2 * 1000 + $3) <= 2004) { exit(1) } }'; then make cppcheck fi diff --git a/src/curl.cpp b/src/curl.cpp index c1d0d56..bd21fd0 100644 --- a/src/curl.cpp +++ b/src/curl.cpp @@ -1201,7 +1201,7 @@ bool S3fsCurl::MixMultipartPostCallback(S3fsCurl* s3fscurl, void* param) return s3fscurl->MixMultipartPostComplete(); } -S3fsCurl* S3fsCurl::UploadMultipartPostRetryCallback(S3fsCurl* s3fscurl) +std::unique_ptr S3fsCurl::UploadMultipartPostRetryCallback(S3fsCurl* s3fscurl) { if(!s3fscurl){ return nullptr; @@ -1229,7 +1229,7 @@ S3fsCurl* S3fsCurl::UploadMultipartPostRetryCallback(S3fsCurl* s3fscurl) } // duplicate request - S3fsCurl* newcurl = new S3fsCurl(s3fscurl->IsUseAhbe()); + std::unique_ptr newcurl(new S3fsCurl(s3fscurl->IsUseAhbe())); newcurl->partdata.petag = s3fscurl->partdata.petag; newcurl->partdata.fd = s3fscurl->partdata.fd; newcurl->partdata.startpos = s3fscurl->b_partdata_startpos; @@ -1243,13 +1243,12 @@ S3fsCurl* S3fsCurl::UploadMultipartPostRetryCallback(S3fsCurl* s3fscurl) // setup new curl object if(0 != newcurl->UploadMultipartPostSetup(s3fscurl->path.c_str(), part_num, upload_id)){ S3FS_PRN_ERR("Could not duplicate curl object(%s:%d).", s3fscurl->path.c_str(), part_num); - delete newcurl; return nullptr; } return newcurl; } -S3fsCurl* S3fsCurl::CopyMultipartPostRetryCallback(S3fsCurl* s3fscurl) +std::unique_ptr S3fsCurl::CopyMultipartPostRetryCallback(S3fsCurl* s3fscurl) { if(!s3fscurl){ return nullptr; @@ -1277,7 +1276,7 @@ S3fsCurl* S3fsCurl::CopyMultipartPostRetryCallback(S3fsCurl* s3fscurl) } // duplicate request - S3fsCurl* newcurl = new S3fsCurl(s3fscurl->IsUseAhbe()); + std::unique_ptr newcurl(new S3fsCurl(s3fscurl->IsUseAhbe())); newcurl->partdata.petag = s3fscurl->partdata.petag; newcurl->b_from = s3fscurl->b_from; newcurl->b_meta = s3fscurl->b_meta; @@ -1288,25 +1287,22 @@ S3fsCurl* S3fsCurl::CopyMultipartPostRetryCallback(S3fsCurl* s3fscurl) // setup new curl object if(0 != newcurl->CopyMultipartPostSetup(s3fscurl->b_from.c_str(), s3fscurl->path.c_str(), part_num, upload_id, s3fscurl->b_meta)){ S3FS_PRN_ERR("Could not duplicate curl object(%s:%d).", s3fscurl->path.c_str(), part_num); - delete newcurl; return nullptr; } return newcurl; } -S3fsCurl* S3fsCurl::MixMultipartPostRetryCallback(S3fsCurl* s3fscurl) +std::unique_ptr S3fsCurl::MixMultipartPostRetryCallback(S3fsCurl* s3fscurl) { if(!s3fscurl){ return nullptr; } - S3fsCurl* pcurl; if(-1 == s3fscurl->partdata.fd){ - pcurl = S3fsCurl::CopyMultipartPostRetryCallback(s3fscurl); + return S3fsCurl::CopyMultipartPostRetryCallback(s3fscurl); }else{ - pcurl = S3fsCurl::UploadMultipartPostRetryCallback(s3fscurl); + return S3fsCurl::UploadMultipartPostRetryCallback(s3fscurl); } - return pcurl; } int S3fsCurl::MapPutErrorResponse(int result) @@ -1589,7 +1585,7 @@ int S3fsCurl::ParallelMixMultipartUploadRequest(const char* tpath, headers_t& me return 0; } -S3fsCurl* S3fsCurl::ParallelGetObjectRetryCallback(S3fsCurl* s3fscurl) +std::unique_ptr S3fsCurl::ParallelGetObjectRetryCallback(S3fsCurl* s3fscurl) { int result; @@ -1602,12 +1598,11 @@ S3fsCurl* S3fsCurl::ParallelGetObjectRetryCallback(S3fsCurl* s3fscurl) } // duplicate request(setup new curl object) - S3fsCurl* newcurl = new S3fsCurl(s3fscurl->IsUseAhbe()); + std::unique_ptr newcurl(new S3fsCurl(s3fscurl->IsUseAhbe())); if(0 != (result = newcurl->PreGetObjectRequest(s3fscurl->path.c_str(), s3fscurl->partdata.fd, s3fscurl->partdata.startpos, s3fscurl->partdata.size, s3fscurl->b_ssetype, s3fscurl->b_ssevalue))){ S3FS_PRN_ERR("failed downloading part setup(%d)", result); - delete newcurl; - return nullptr;; + return nullptr; } newcurl->retry_count = s3fscurl->retry_count + 1; diff --git a/src/curl.h b/src/curl.h index f57bb0c..9224d1b 100644 --- a/src/curl.h +++ b/src/curl.h @@ -225,10 +225,10 @@ class S3fsCurl static bool UploadMultipartPostCallback(S3fsCurl* s3fscurl, void* param); static bool CopyMultipartPostCallback(S3fsCurl* s3fscurl, void* param); static bool MixMultipartPostCallback(S3fsCurl* s3fscurl, void* param); - static S3fsCurl* UploadMultipartPostRetryCallback(S3fsCurl* s3fscurl); - static S3fsCurl* CopyMultipartPostRetryCallback(S3fsCurl* s3fscurl); - static S3fsCurl* MixMultipartPostRetryCallback(S3fsCurl* s3fscurl); - static S3fsCurl* ParallelGetObjectRetryCallback(S3fsCurl* s3fscurl); + static std::unique_ptr UploadMultipartPostRetryCallback(S3fsCurl* s3fscurl); + static std::unique_ptr CopyMultipartPostRetryCallback(S3fsCurl* s3fscurl); + static std::unique_ptr MixMultipartPostRetryCallback(S3fsCurl* s3fscurl); + static std::unique_ptr ParallelGetObjectRetryCallback(S3fsCurl* s3fscurl); // lazy functions for set curl options static bool CopyMultipartPostSetCurlOpts(S3fsCurl* s3fscurl); diff --git a/src/curl_multi.cpp b/src/curl_multi.cpp index 8e269f0..74d3b37 100644 --- a/src/curl_multi.cpp +++ b/src/curl_multi.cpp @@ -277,26 +277,25 @@ int S3fsMultiCurl::MultiRead() // immediately even if retry processing is required. s3fscurl->DestroyCurlHandle(); }else{ - std::unique_ptr retrycurl; - // Reset offset if(isNeedResetOffset){ S3fsCurl::ResetOffset(s3fscurl.get()); } // For retry - S3fsCurl* retry_ptr = nullptr; + std::unique_ptr retrycurl; + const S3fsCurl* retrycurl_ptr = retrycurl.get(); // save this due to std::move below if(RetryCallback){ - retry_ptr = RetryCallback(s3fscurl.get()); - retrycurl.reset(retry_ptr); - if(nullptr != retry_ptr){ + retrycurl = RetryCallback(s3fscurl.get()); + if(nullptr != retrycurl){ clist_all.push_back(std::move(retrycurl)); }else{ // set EIO and wait for other parts. result = -EIO; } } - if(s3fscurl.get() != retry_ptr){ + // cppcheck-suppress mismatchingContainers + if(s3fscurl.get() != retrycurl_ptr){ s3fscurl->DestroyCurlHandle(); } } diff --git a/src/curl_multi.h b/src/curl_multi.h index 0778a6f..d87fafa 100644 --- a/src/curl_multi.h +++ b/src/curl_multi.h @@ -32,7 +32,7 @@ class S3fsCurl; typedef std::vector> s3fscurllist_t; typedef bool (*S3fsMultiSuccessCallback)(S3fsCurl* s3fscurl, void* param); // callback for succeed multi request typedef bool (*S3fsMultiNotFoundCallback)(S3fsCurl* s3fscurl, void* param); // callback for succeed multi request -typedef S3fsCurl* (*S3fsMultiRetryCallback)(S3fsCurl* s3fscurl); // callback for failure and retrying +typedef std::unique_ptr (*S3fsMultiRetryCallback)(S3fsCurl* s3fscurl); // callback for failure and retrying //---------------------------------------------- // class S3fsMultiCurl diff --git a/src/s3fs.cpp b/src/s3fs.cpp index 1ddb892..50835dc 100644 --- a/src/s3fs.cpp +++ b/src/s3fs.cpp @@ -120,7 +120,7 @@ static int check_object_owner(const char* path, struct stat* pstbuf); static int check_parent_object_access(const char* path, int mask); static int get_local_fent(AutoFdEntity& autoent, FdEntity **entity, const char* path, int flags = O_RDONLY, bool is_load = false); static bool multi_head_callback(S3fsCurl* s3fscurl, void* param); -static S3fsCurl* multi_head_retry_callback(S3fsCurl* s3fscurl); +static std::unique_ptr multi_head_retry_callback(S3fsCurl* s3fscurl); static int readdir_multi_head(const char* path, const S3ObjList& head, void* buf, fuse_fill_dir_t filler); static int list_bucket(const char* path, S3ObjList& head, const char* delimiter, bool check_content_only = false); static int directory_empty(const char* path); @@ -3219,7 +3219,7 @@ static bool multi_head_notfound_callback(S3fsCurl* s3fscurl, void* param) return true; } -static S3fsCurl* multi_head_retry_callback(S3fsCurl* s3fscurl) +static std::unique_ptr multi_head_retry_callback(S3fsCurl* s3fscurl) { if(!s3fscurl){ return nullptr; @@ -3239,14 +3239,13 @@ static S3fsCurl* multi_head_retry_callback(S3fsCurl* s3fscurl) retry_count++; } - S3fsCurl* newcurl = new S3fsCurl(s3fscurl->IsUseAhbe()); + std::unique_ptr newcurl(new S3fsCurl(s3fscurl->IsUseAhbe())); std::string path = s3fscurl->GetPath(); std::string base_path = s3fscurl->GetBasePath(); std::string saved_path = s3fscurl->GetSpecialSavedPath(); if(!newcurl->PreHeadRequest(path, base_path, saved_path, ssec_key_pos)){ S3FS_PRN_ERR("Could not duplicate curl object(%s).", saved_path.c_str()); - delete newcurl; return nullptr; } newcurl->SetMultipartRetryCount(retry_count);