From 141d74f1871fbed88282e6c38e7802c44ff90c60 Mon Sep 17 00:00:00 2001 From: Andrew Gaul Date: Fri, 18 Oct 2024 21:57:52 +0900 Subject: [PATCH] Use auto for iterator variable types (#2554) This touches a few other long type names. Applied via clang-tidy -fix. --- .clang-tidy | 1 - src/addhead.cpp | 6 +-- src/cache.cpp | 78 +++++++++++++++++++------------------- src/curl.cpp | 54 +++++++++++++------------- src/curl_multi.cpp | 15 ++++---- src/fdcache.cpp | 52 ++++++++++++------------- src/fdcache_entity.cpp | 40 +++++++++---------- src/fdcache_fdinfo.cpp | 20 +++++----- src/fdcache_page.cpp | 42 ++++++++++---------- src/fdcache_pseudofd.cpp | 4 +- src/fdcache_untreated.cpp | 20 +++++----- src/metaheader.cpp | 54 +++++++++++++------------- src/mpu_util.cpp | 4 +- src/openssl_auth.cpp | 5 +-- src/s3fs.cpp | 56 +++++++++++++-------------- src/s3fs_cred.cpp | 31 ++++++++------- src/s3objlist.cpp | 22 +++++------ src/s3objlist.h | 4 +- src/string_util.cpp | 2 +- src/types.h | 2 +- test/truncate_read_file.cc | 2 +- test/write_multiblock.cc | 6 +-- 22 files changed, 257 insertions(+), 263 deletions(-) diff --git a/.clang-tidy b/.clang-tidy index cb262a9..7bb8e12 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -59,7 +59,6 @@ Checks: ' -modernize-use-nodiscard, -modernize-raw-string-literal, -modernize-return-braced-init-list, - -modernize-use-auto, -modernize-use-default-member-init, -modernize-use-trailing-return-type, -modernize-use-using, diff --git a/src/addhead.cpp b/src/addhead.cpp index ddf2d67..8c49476 100644 --- a/src/addhead.cpp +++ b/src/addhead.cpp @@ -164,7 +164,7 @@ bool AdditionalHeader::AddHeader(headers_t& meta, const char* path) const // [NOTE] // Because to allow duplicate key, and then scanning the entire table. // - for(addheadlist_t::const_iterator iter = addheadlist.begin(); iter != addheadlist.end(); ++iter){ + for(auto iter = addheadlist.cbegin(); iter != addheadlist.cend(); ++iter){ const add_header *paddhead = &*iter; if(paddhead->pregex){ @@ -194,7 +194,7 @@ struct curl_slist* AdditionalHeader::AddHeader(struct curl_slist* list, const ch if(!AddHeader(meta, path)){ return list; } - for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){ + for(auto iter = meta.cbegin(); iter != meta.cend(); ++iter){ // Adding header list = curl_slist_sort_insert(list, iter->first.c_str(), iter->second.c_str()); } @@ -214,7 +214,7 @@ bool AdditionalHeader::Dump() const ssdbg << "Additional Header list[" << addheadlist.size() << "] = {" << std::endl; - for(addheadlist_t::const_iterator iter = addheadlist.begin(); iter != addheadlist.end(); ++iter, ++cnt){ + for(auto iter = addheadlist.cbegin(); iter != addheadlist.cend(); ++iter, ++cnt){ const add_header *paddhead = &*iter; ssdbg << " [" << cnt << "] = {" << std::endl; diff --git a/src/cache.cpp b/src/cache.cpp index 0d211ef..364d233 100644 --- a/src/cache.cpp +++ b/src/cache.cpp @@ -194,7 +194,7 @@ bool StatCache::GetStat(const std::string& key, struct stat* pst, headers_t* met const std::lock_guard lock(StatCache::stat_cache_lock); - stat_cache_t::iterator iter = stat_cache.end(); + auto iter = stat_cache.end(); if(overcheck && '/' != *strpath.rbegin()){ strpath += "/"; iter = stat_cache.find(strpath); @@ -220,7 +220,7 @@ bool StatCache::GetStat(const std::string& key, struct stat* pst, headers_t* met std::string stretag; if(petag){ // find & check ETag - for(headers_t::iterator hiter = ent->meta.begin(); hiter != ent->meta.end(); ++hiter){ + for(auto hiter = ent->meta.cbegin(); hiter != ent->meta.cend(); ++hiter){ std::string tag = lower(hiter->first); if(tag == "etag"){ stretag = hiter->second; @@ -280,7 +280,7 @@ bool StatCache::IsNoObjectCache(const std::string& key, bool overcheck) const std::lock_guard lock(StatCache::stat_cache_lock); - stat_cache_t::iterator iter = stat_cache.end(); + auto iter = stat_cache.end(); if(overcheck && '/' != *strpath.rbegin()){ strpath += "/"; iter = stat_cache.find(strpath); @@ -319,7 +319,7 @@ bool StatCache::AddStat(const std::string& key, const headers_t& meta, bool forc const std::lock_guard lock(StatCache::stat_cache_lock); - if(stat_cache.end() != stat_cache.find(key)){ + if(stat_cache.cend() != stat_cache.find(key)){ // found cache DelStatHasLock(key); }else{ @@ -345,7 +345,7 @@ bool StatCache::AddStat(const std::string& key, const headers_t& meta, bool forc ent.meta.clear(); SetStatCacheTime(ent.cache_date); // Set time. //copy only some keys - for(headers_t::const_iterator iter = meta.begin(); iter != meta.end(); ++iter){ + for(auto iter = meta.cbegin(); iter != meta.cend(); ++iter){ std::string tag = lower(iter->first); std::string value = iter->second; if(tag == "content-type"){ @@ -365,7 +365,7 @@ bool StatCache::AddStat(const std::string& key, const headers_t& meta, bool forc // check symbolic link cache if(!S_ISLNK(value.stbuf.st_mode)){ - if(symlink_cache.end() != symlink_cache.find(key)){ + if(symlink_cache.cend() != symlink_cache.find(key)){ // if symbolic link cache has key, thus remove it. DelSymlinkHasLock(key); } @@ -395,14 +395,14 @@ bool StatCache::UpdateMetaStats(const std::string& key, const headers_t& meta) S3FS_PRN_INFO3("update stat cache entry[path=%s]", key.c_str()); const std::lock_guard lock(StatCache::stat_cache_lock); - stat_cache_t::iterator iter = stat_cache.find(key); - if(stat_cache.end() == iter){ + auto iter = stat_cache.find(key); + if(stat_cache.cend() == iter){ return true; } stat_cache_entry* ent = &iter->second; // update only meta keys - for(headers_t::const_iterator metaiter = meta.begin(); metaiter != meta.end(); ++metaiter){ + for(auto metaiter = meta.cbegin(); metaiter != meta.cend(); ++metaiter){ std::string tag = lower(metaiter->first); std::string value = metaiter->second; if(tag == "content-type"){ @@ -439,7 +439,7 @@ bool StatCache::AddNoObjectCache(const std::string& key) const std::lock_guard lock(StatCache::stat_cache_lock); - if(stat_cache.end() != stat_cache.find(key)){ + if(stat_cache.cend() != stat_cache.find(key)){ // found DelStatHasLock(key); }else{ @@ -465,7 +465,7 @@ bool StatCache::AddNoObjectCache(const std::string& key) stat_cache[key] = std::move(ent); // check symbolic link cache - if(symlink_cache.end() != symlink_cache.find(key)){ + if(symlink_cache.cend() != symlink_cache.find(key)){ // if symbolic link cache has key, thus remove it. DelSymlinkHasLock(key); } @@ -475,9 +475,9 @@ bool StatCache::AddNoObjectCache(const std::string& key) void StatCache::ChangeNoTruncateFlag(const std::string& key, bool no_truncate) { const std::lock_guard lock(StatCache::stat_cache_lock); - stat_cache_t::iterator iter = stat_cache.find(key); + auto iter = stat_cache.find(key); - if(stat_cache.end() != iter){ + if(stat_cache.cend() != iter){ stat_cache_entry* ent = &iter->second; if(no_truncate){ if(0L == ent->notruncate){ @@ -505,7 +505,7 @@ bool StatCache::TruncateCache() // 1) erase over expire time if(IsExpireTime){ - for(stat_cache_t::iterator iter = stat_cache.begin(); iter != stat_cache.end(); ){ + for(auto iter = stat_cache.cbegin(); iter != stat_cache.cend(); ){ const stat_cache_entry* entry = &iter->second; if(0L == entry->notruncate && IsExpireStatCacheTime(entry->cache_date, ExpireTime)){ iter = stat_cache.erase(iter); @@ -523,7 +523,7 @@ bool StatCache::TruncateCache() // 3) erase from the old cache in order size_t erase_count= stat_cache.size() - CacheSize + 1; statiterlist_t erase_iters; - for(stat_cache_t::iterator iter = stat_cache.begin(); iter != stat_cache.end() && 0 < erase_count; ++iter){ + for(auto iter = stat_cache.begin(); iter != stat_cache.end() && 0 < erase_count; ++iter){ // check no truncate const stat_cache_entry* ent = &iter->second; if(0L < ent->notruncate){ @@ -542,8 +542,8 @@ bool StatCache::TruncateCache() } } } - for(statiterlist_t::iterator iiter = erase_iters.begin(); iiter != erase_iters.end(); ++iiter){ - stat_cache_t::iterator siter = *iiter; + for(auto iiter = erase_iters.cbegin(); iiter != erase_iters.cend(); ++iiter){ + auto siter = *iiter; S3FS_PRN_DBG("truncate stat cache[path=%s]", siter->first.c_str()); stat_cache.erase(siter); @@ -558,7 +558,7 @@ bool StatCache::DelStatHasLock(const std::string& key) S3FS_PRN_INFO3("delete stat cache entry[path=%s]", key.c_str()); stat_cache_t::iterator iter; - if(stat_cache.end() != (iter = stat_cache.find(key))){ + if(stat_cache.cend() != (iter = stat_cache.find(key))){ stat_cache.erase(iter); DelNotruncateCache(key); } @@ -571,7 +571,7 @@ bool StatCache::DelStatHasLock(const std::string& key) // If there is "path/" cache, delete it. strpath += "/"; } - if(stat_cache.end() != (iter = stat_cache.find(strpath))){ + if(stat_cache.cend() != (iter = stat_cache.find(strpath))){ stat_cache.erase(iter); DelNotruncateCache(strpath); } @@ -588,8 +588,8 @@ bool StatCache::GetSymlink(const std::string& key, std::string& value) const std::lock_guard lock(StatCache::stat_cache_lock); - symlink_cache_t::iterator iter = symlink_cache.find(strpath); - if(iter != symlink_cache.end()){ + auto iter = symlink_cache.find(strpath); + if(iter != symlink_cache.cend()){ symlink_cache_entry* ent = &iter->second; if(!IsExpireTime || !IsExpireStatCacheTime(ent->cache_date, ExpireTime)){ // use the same as Stats // found @@ -624,7 +624,7 @@ bool StatCache::AddSymlink(const std::string& key, const std::string& value) const std::lock_guard lock(StatCache::stat_cache_lock); - if(symlink_cache.end() != symlink_cache.find(key)){ + if(symlink_cache.cend() != symlink_cache.find(key)){ // found DelSymlinkHasLock(key); }else{ @@ -657,7 +657,7 @@ bool StatCache::TruncateSymlink() // 1) erase over expire time if(IsExpireTime){ - for(symlink_cache_t::iterator iter = symlink_cache.begin(); iter != symlink_cache.end(); ){ + for(auto iter = symlink_cache.cbegin(); iter != symlink_cache.cend(); ){ const symlink_cache_entry* entry = &iter->second; if(IsExpireStatCacheTime(entry->cache_date, ExpireTime)){ // use the same as Stats iter = symlink_cache.erase(iter); @@ -675,15 +675,15 @@ bool StatCache::TruncateSymlink() // 3) erase from the old cache in order size_t erase_count= symlink_cache.size() - CacheSize + 1; symlinkiterlist_t erase_iters; - for(symlink_cache_t::iterator iter = symlink_cache.begin(); iter != symlink_cache.end(); ++iter){ + for(auto iter = symlink_cache.begin(); iter != symlink_cache.end(); ++iter){ erase_iters.push_back(iter); sort(erase_iters.begin(), erase_iters.end(), sort_symlinkiterlist()); if(erase_count < erase_iters.size()){ erase_iters.pop_back(); } } - for(symlinkiterlist_t::iterator iiter = erase_iters.begin(); iiter != erase_iters.end(); ++iiter){ - symlink_cache_t::iterator siter = *iiter; + for(auto iiter = erase_iters.cbegin(); iiter != erase_iters.cend(); ++iiter){ + auto siter = *iiter; S3FS_PRN_DBG("truncate symbolic link cache[path=%s]", siter->first.c_str()); symlink_cache.erase(siter); @@ -698,7 +698,7 @@ bool StatCache::DelSymlinkHasLock(const std::string& key) S3FS_PRN_INFO3("delete symbolic link cache entry[path=%s]", key.c_str()); symlink_cache_t::iterator iter; - if(symlink_cache.end() != (iter = symlink_cache.find(key))){ + if(symlink_cache.cend() != (iter = symlink_cache.find(key))){ symlink_cache.erase(iter); } S3FS_MALLOCTRIM(0); @@ -719,8 +719,8 @@ bool StatCache::AddNotruncateCache(const std::string& key) } parentdir += '/'; // directory path must be '/' termination. - notruncate_dir_map_t::iterator iter = notruncate_file_cache.find(parentdir); - if(iter == notruncate_file_cache.end()){ + auto iter = notruncate_file_cache.find(parentdir); + if(iter == notruncate_file_cache.cend()){ // add new list notruncate_filelist_t list; list.push_back(filename); @@ -728,8 +728,8 @@ bool StatCache::AddNotruncateCache(const std::string& key) }else{ // add filename to existed list notruncate_filelist_t& filelist = iter->second; - notruncate_filelist_t::const_iterator fiter = std::find(filelist.begin(), filelist.end(), filename); - if(fiter == filelist.end()){ + auto fiter = std::find(filelist.cbegin(), filelist.cend(), filename); + if(fiter == filelist.cend()){ filelist.push_back(filename); } } @@ -749,12 +749,12 @@ bool StatCache::DelNotruncateCache(const std::string& key) } parentdir += '/'; // directory path must be '/' termination. - notruncate_dir_map_t::iterator iter = notruncate_file_cache.find(parentdir); - if(iter != notruncate_file_cache.end()){ + auto iter = notruncate_file_cache.find(parentdir); + if(iter != notruncate_file_cache.cend()){ // found directory in map notruncate_filelist_t& filelist = iter->second; - notruncate_filelist_t::iterator fiter = std::find(filelist.begin(), filelist.end(), filename); - if(fiter != filelist.end()){ + auto fiter = std::find(filelist.begin(), filelist.end(), filename); + if(fiter != filelist.cend()){ // found filename in directory file list filelist.erase(fiter); if(filelist.empty()){ @@ -793,16 +793,16 @@ bool StatCache::GetNotruncateCache(const std::string& parentdir, notruncate_file const std::lock_guard lock(StatCache::stat_cache_lock); - notruncate_dir_map_t::iterator iter = notruncate_file_cache.find(dirpath); - if(iter == notruncate_file_cache.end()){ + auto iter = notruncate_file_cache.find(dirpath); + if(iter == notruncate_file_cache.cend()){ // not found directory map return true; } // found directory in map const notruncate_filelist_t& filelist = iter->second; - for(notruncate_filelist_t::const_iterator fiter = filelist.begin(); fiter != filelist.end(); ++fiter){ - if(list.end() == std::find(list.begin(), list.end(), *fiter)){ + for(auto fiter = filelist.cbegin(); fiter != filelist.cend(); ++fiter){ + if(list.cend() == std::find(list.cbegin(), list.cend(), *fiter)){ // found notuncate file that does not exist in the list, so add it. list.push_back(*fiter); } diff --git a/src/curl.cpp b/src/curl.cpp index a44559d..d5c9080 100644 --- a/src/curl.cpp +++ b/src/curl.cpp @@ -284,7 +284,7 @@ void S3fsCurl::LockCurlShare(CURL* handle, curl_lock_data nLockData, curl_lock_a if(!hCurlShare){ return; } - S3fsCurl::callback_locks_t* locks = static_cast(useptr); + auto* locks = static_cast(useptr); if(CURL_LOCK_DATA_DNS == nLockData){ locks->dns.lock(); }else if(CURL_LOCK_DATA_SSL_SESSION == nLockData){ @@ -297,7 +297,7 @@ void S3fsCurl::UnlockCurlShare(CURL* handle, curl_lock_data nLockData, void* use if(!hCurlShare){ return; } - S3fsCurl::callback_locks_t* locks = static_cast(useptr); + auto* locks = static_cast(useptr); if(CURL_LOCK_DATA_DNS == nLockData){ locks->dns.unlock(); }else if(CURL_LOCK_DATA_SSL_SESSION == nLockData){ @@ -457,10 +457,10 @@ std::string S3fsCurl::LookupMimeType(const std::string& name) } // if we get here, then we have an extension (ext) - mimes_t::const_iterator iter = S3fsCurl::mimeTypes.find(ext); + auto iter = S3fsCurl::mimeTypes.find(ext); // if the last extension matches a mimeType, then return // that mime type - if (iter != S3fsCurl::mimeTypes.end()) { + if (iter != S3fsCurl::mimeTypes.cend()) { result = (*iter).second; return result; } @@ -473,7 +473,7 @@ std::string S3fsCurl::LookupMimeType(const std::string& name) // Didn't find a mime-type for the first extension // Look for second extension in mimeTypes, return if found iter = S3fsCurl::mimeTypes.find(ext2); - if (iter != S3fsCurl::mimeTypes.end()) { + if (iter != S3fsCurl::mimeTypes.cend()) { result = (*iter).second; return result; } @@ -558,14 +558,14 @@ bool S3fsCurl::LocateBundle() size_t S3fsCurl::WriteMemoryCallback(void* ptr, size_t blockSize, size_t numBlocks, void* data) { - std::string* body = static_cast(data); + auto* body = static_cast(data); body->append(static_cast(ptr), blockSize * numBlocks); return (blockSize * numBlocks); } size_t S3fsCurl::ReadCallback(void* ptr, size_t size, size_t nmemb, void* userp) { - S3fsCurl* pCurl = static_cast(userp); + auto* pCurl = static_cast(userp); if(1 > (size * nmemb)){ return 0; @@ -584,7 +584,7 @@ size_t S3fsCurl::ReadCallback(void* ptr, size_t size, size_t nmemb, void* userp) size_t S3fsCurl::HeaderCallback(void* data, size_t blockSize, size_t numBlocks, void* userPtr) { - headers_t* headers = static_cast(userPtr); + auto* headers = static_cast(userPtr); std::string header(static_cast(data), blockSize * numBlocks); std::string key; std::istringstream ss(header); @@ -592,7 +592,7 @@ size_t S3fsCurl::HeaderCallback(void* data, size_t blockSize, size_t numBlocks, if(getline(ss, key, ':')){ // Force to lower, only "x-amz" std::string lkey = key; - transform(lkey.begin(), lkey.end(), lkey.begin(), static_cast(std::tolower)); + transform(lkey.cbegin(), lkey.cend(), lkey.begin(), static_cast(std::tolower)); if(is_prefix(lkey.c_str(), "x-amz")){ key = lkey; } @@ -605,7 +605,7 @@ size_t S3fsCurl::HeaderCallback(void* data, size_t blockSize, size_t numBlocks, size_t S3fsCurl::UploadReadCallback(void* ptr, size_t size, size_t nmemb, void* userp) { - S3fsCurl* pCurl = static_cast(userp); + auto* pCurl = static_cast(userp); if(1 > (size * nmemb)){ return 0; @@ -637,7 +637,7 @@ size_t S3fsCurl::UploadReadCallback(void* ptr, size_t size, size_t nmemb, void* size_t S3fsCurl::DownloadWriteCallback(void* ptr, size_t size, size_t nmemb, void* userp) { - S3fsCurl* pCurl = static_cast(userp); + auto* pCurl = static_cast(userp); if(1 > (size * nmemb)){ return 0; @@ -746,7 +746,7 @@ std::string S3fsCurl::SetStorageClass(const std::string& storage_class) std::string old = S3fsCurl::storage_class; S3fsCurl::storage_class = storage_class; // AWS requires uppercase storage class values - transform(S3fsCurl::storage_class.begin(), S3fsCurl::storage_class.end(), S3fsCurl::storage_class.begin(), ::toupper); + transform(S3fsCurl::storage_class.cbegin(), S3fsCurl::storage_class.cend(), S3fsCurl::storage_class.begin(), ::toupper); return old; } @@ -919,8 +919,8 @@ bool S3fsCurl::LoadEnvSseKmsid() // bool S3fsCurl::GetSseKey(std::string& md5, std::string& ssekey) { - for(sseckeylist_t::const_iterator iter = S3fsCurl::sseckeys.begin(); iter != S3fsCurl::sseckeys.end(); ++iter){ - if(md5.empty() || md5 == (*iter).begin()->first){ + for(auto iter = S3fsCurl::sseckeys.cbegin(); iter != S3fsCurl::sseckeys.cend(); ++iter){ + if(md5.empty() || md5 == (*iter).cbegin()->first){ md5 = iter->begin()->first; ssekey = iter->begin()->second; return true; @@ -935,7 +935,7 @@ bool S3fsCurl::GetSseKeyMd5(size_t pos, std::string& md5) return false; } size_t cnt = 0; - for(sseckeylist_t::const_iterator iter = S3fsCurl::sseckeys.begin(); iter != S3fsCurl::sseckeys.end(); ++iter, ++cnt){ + for(auto iter = S3fsCurl::sseckeys.cbegin(); iter != S3fsCurl::sseckeys.cend(); ++iter, ++cnt){ if(pos == cnt){ md5 = iter->begin()->first; return true; @@ -1551,7 +1551,7 @@ int S3fsCurl::ParallelMixMultipartUploadRequest(const char* tpath, headers_t& me curlmulti.SetSuccessCallback(S3fsCurl::MixMultipartPostCallback); curlmulti.SetRetryCallback(S3fsCurl::MixMultipartPostRetryCallback); - for(fdpage_list_t::const_iterator iter = mixuppages.begin(); iter != mixuppages.end(); ++iter){ + for(auto iter = mixuppages.cbegin(); iter != mixuppages.cend(); ++iter){ if(iter->modified){ // Multipart upload std::unique_ptr s3fscurl_para(new S3fsCurl(true)); @@ -2837,7 +2837,7 @@ std::string S3fsCurl::CalcSignatureV2(const std::string& method, const std::stri const void* key = secret_access_key.data(); size_t key_len = secret_access_key.size(); - const unsigned char* sdata = reinterpret_cast(StringToSign.data()); + const auto* sdata = reinterpret_cast(StringToSign.data()); size_t sdata_len = StringToSign.size(); unsigned int md_len = 0; @@ -2883,7 +2883,7 @@ std::string S3fsCurl::CalcSignature(const std::string& method, const std::string std::unique_ptr kService = s3fs_HMAC256(kRegion.get(), kRegion_len, reinterpret_cast("s3"), sizeof("s3") - 1, &kService_len); std::unique_ptr kSigning = s3fs_HMAC256(kService.get(), kService_len, reinterpret_cast("aws4_request"), sizeof("aws4_request") - 1, &kSigning_len); - const unsigned char* cRequest = reinterpret_cast(StringCQ.c_str()); + const auto* cRequest = reinterpret_cast(StringCQ.c_str()); size_t cRequest_len = StringCQ.size(); sha256_t sRequest; s3fs_sha256(cRequest, cRequest_len, &sRequest); @@ -2893,7 +2893,7 @@ std::string S3fsCurl::CalcSignature(const std::string& method, const std::string StringToSign += strdate + "/" + endpoint + "/s3/aws4_request\n"; StringToSign += s3fs_hex_lower(sRequest.data(), sRequest.size()); - const unsigned char* cscope = reinterpret_cast(StringToSign.c_str()); + const auto* cscope = reinterpret_cast(StringToSign.c_str()); size_t cscope_len = StringToSign.size(); unsigned int md_len = 0; @@ -3413,7 +3413,7 @@ int S3fsCurl::HeadRequest(const char* tpath, headers_t& meta) // file exists in s3 // fixme: clean this up. meta.clear(); - for(headers_t::iterator iter = responseHeaders.begin(); iter != responseHeaders.end(); ++iter){ + for(auto iter = responseHeaders.cbegin(); iter != responseHeaders.cend(); ++iter){ std::string key = lower(iter->first); std::string value = iter->second; if(key == "content-type"){ @@ -3455,7 +3455,7 @@ int S3fsCurl::PutHeadRequest(const char* tpath, headers_t& meta, bool is_copy) requestHeaders = curl_slist_sort_insert(requestHeaders, "Content-Type", contype.c_str()); // Make request headers - for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){ + for(auto iter = meta.cbegin(); iter != meta.cend(); ++iter){ std::string key = lower(iter->first); std::string value = iter->second; if(is_prefix(key.c_str(), "x-amz-acl")){ @@ -3592,7 +3592,7 @@ int S3fsCurl::PutRequest(const char* tpath, headers_t& meta, int fd) std::string contype = S3fsCurl::LookupMimeType(tpath); requestHeaders = curl_slist_sort_insert(requestHeaders, "Content-Type", contype.c_str()); - for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){ + for(auto iter = meta.cbegin(); iter != meta.cend(); ++iter){ std::string key = lower(iter->first); std::string value = iter->second; if(is_prefix(key.c_str(), "x-amz-acl")){ @@ -3917,7 +3917,7 @@ int S3fsCurl::PreMultipartPostRequest(const char* tpath, headers_t& meta, std::s std::string contype = S3fsCurl::LookupMimeType(tpath); - for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){ + for(auto iter = meta.cbegin(); iter != meta.cend(); ++iter){ std::string key = lower(iter->first); std::string value = iter->second; if(is_prefix(key.c_str(), "x-amz-acl")){ @@ -4007,7 +4007,7 @@ int S3fsCurl::CompleteMultipartPostRequest(const char* tpath, const std::string& // make contents std::string postContent; postContent += "\n"; - for(auto it = parts.begin(); it != parts.end(); ++it){ + for(auto it = parts.cbegin(); it != parts.cend(); ++it){ if(it->etag.empty()){ S3FS_PRN_ERR("%d file part is not finished uploading.", it->part_num); return -EIO; @@ -4323,7 +4323,7 @@ int S3fsCurl::CopyMultipartPostSetup(const char* from, const char* to, int part_ requestHeaders = curl_slist_sort_insert(requestHeaders, "Content-Type", contype.c_str()); // Make request headers - for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){ + for(auto iter = meta.cbegin(); iter != meta.cend(); ++iter){ std::string key = lower(iter->first); std::string value = iter->second; if(key == "x-amz-copy-source"){ @@ -4362,8 +4362,8 @@ int S3fsCurl::CopyMultipartPostSetup(const char* from, const char* to, int part_ bool S3fsCurl::UploadMultipartPostComplete() { - headers_t::iterator it = responseHeaders.find("ETag"); - if (it == responseHeaders.end()) { + auto it = responseHeaders.find("ETag"); + if (it == responseHeaders.cend()) { return false; } std::string etag = peeloff(it->second); diff --git a/src/curl_multi.cpp b/src/curl_multi.cpp index 3683d4f..813a221 100644 --- a/src/curl_multi.cpp +++ b/src/curl_multi.cpp @@ -45,8 +45,8 @@ S3fsMultiCurl::~S3fsMultiCurl() bool S3fsMultiCurl::ClearEx(bool is_all) { - s3fscurllist_t::iterator iter; - for(iter = clist_req.begin(); iter != clist_req.end(); ++iter){ + s3fscurllist_t::const_iterator iter; + for(iter = clist_req.cbegin(); iter != clist_req.cend(); ++iter){ S3fsCurl* s3fscurl = iter->get(); if(s3fscurl){ s3fscurl->DestroyCurlHandle(); @@ -55,7 +55,7 @@ bool S3fsMultiCurl::ClearEx(bool is_all) clist_req.clear(); if(is_all){ - for(iter = clist_all.begin(); iter != clist_all.end(); ++iter){ + for(iter = clist_all.cbegin(); iter != clist_all.cend(); ++iter){ S3fsCurl* s3fscurl = iter->get(); s3fscurl->DestroyCurlHandle(); } @@ -119,7 +119,7 @@ int S3fsMultiCurl::MultiPerform() bool isMultiHead = false; Semaphore sem(GetMaxParallelism()); - for(s3fscurllist_t::iterator iter = clist_req.begin(); iter != clist_req.end(); ++iter) { + for(auto iter = clist_req.cbegin(); iter != clist_req.cend(); ++iter) { S3fsCurl* s3fscurl = iter->get(); if(!s3fscurl){ continue; @@ -177,7 +177,7 @@ int S3fsMultiCurl::MultiRead() { int result = 0; - for(s3fscurllist_t::iterator iter = clist_req.begin(); iter != clist_req.end(); ){ + for(auto iter = clist_req.begin(); iter != clist_req.end(); ){ std::unique_ptr s3fscurl(std::move(*iter)); bool isRetry = false; @@ -284,7 +284,7 @@ int S3fsMultiCurl::MultiRead() if(!not_abort && 0 != result){ // If an EIO error has already occurred, clear all retry objects. - for(s3fscurllist_t::iterator iter = clist_all.begin(); iter != clist_all.end(); ++iter){ + for(auto iter = clist_all.cbegin(); iter != clist_all.cend(); ++iter){ S3fsCurl* s3fscurl = iter->get(); s3fscurl->DestroyCurlHandle(); } @@ -305,8 +305,7 @@ int S3fsMultiCurl::Request() while(!clist_all.empty()){ // set curl handle to multi handle int result; - s3fscurllist_t::iterator iter; - for(iter = clist_all.begin(); iter != clist_all.end(); ++iter){ + for(auto iter = clist_all.begin(); iter != clist_all.end(); ++iter){ clist_req.push_back(std::move(*iter)); } clist_all.clear(); diff --git a/src/fdcache.cpp b/src/fdcache.cpp index d2a83aa..82729f0 100644 --- a/src/fdcache.cpp +++ b/src/fdcache.cpp @@ -460,7 +460,7 @@ FdManager::FdManager() FdManager::~FdManager() { if(this == FdManager::get()){ - for(fdent_map_t::iterator iter = fent.begin(); fent.end() != iter; ++iter){ + for(auto iter = fent.cbegin(); fent.cend() != iter; ++iter){ FdEntity* ent = (*iter).second.get(); S3FS_PRN_WARN("To exit with the cache file opened: path=%s, refcnt=%d", ent->GetPath().c_str(), ent->GetOpenCount()); } @@ -481,25 +481,25 @@ FdEntity* FdManager::GetFdEntityHasLock(const char* path, int& existfd, bool new UpdateEntityToTempPath(); - fdent_map_t::iterator iter = fent.find(path); - if(fent.end() != iter && iter->second){ + auto fiter = fent.find(path); + if(fent.cend() != fiter && fiter->second){ if(-1 == existfd){ if(newfd){ - existfd = iter->second->OpenPseudoFd(O_RDWR); // [NOTE] O_RDWR flags + existfd = fiter->second->OpenPseudoFd(O_RDWR); // [NOTE] O_RDWR flags } - return iter->second.get(); + return fiter->second.get(); }else{ - if(iter->second->FindPseudoFd(existfd)){ + if(fiter->second->FindPseudoFd(existfd)){ if(newfd){ - existfd = iter->second->Dup(existfd); + existfd = fiter->second->Dup(existfd); } - return iter->second.get(); + return fiter->second.get(); } } } if(-1 != existfd){ - for(iter = fent.begin(); iter != fent.end(); ++iter){ + for(auto iter = fent.cbegin(); iter != fent.cend(); ++iter){ if(iter->second && iter->second->FindPseudoFd(existfd)){ // found opened fd in map if(iter->second->GetPath() == path){ @@ -518,7 +518,7 @@ FdEntity* FdManager::GetFdEntityHasLock(const char* path, int& existfd, bool new // If the cache directory is not specified, s3fs opens a temporary file // when the file is opened. if(!FdManager::IsCacheDir()){ - for(iter = fent.begin(); iter != fent.end(); ++iter){ + for(auto iter = fent.cbegin(); iter != fent.cend(); ++iter){ if(iter->second && iter->second->IsOpen() && iter->second->GetPath() == path){ return iter->second.get(); } @@ -540,7 +540,7 @@ FdEntity* FdManager::Open(int& fd, const char* path, const headers_t* pmeta, off UpdateEntityToTempPath(); // search in mapping by key(path) - fdent_map_t::iterator iter = fent.find(path); + auto iter = fent.find(path); if(fent.end() == iter && !force_tmpfile && !FdManager::IsCacheDir()){ // If the cache directory is not specified, s3fs opens a temporary file // when the file is opened. @@ -628,7 +628,7 @@ FdEntity* FdManager::GetExistFdEntity(const char* path, int existfd) UpdateEntityToTempPath(); // search from all entity. - for(fdent_map_t::iterator iter = fent.begin(); iter != fent.end(); ++iter){ + for(auto iter = fent.cbegin(); iter != fent.cend(); ++iter){ if(iter->second && iter->second->FindPseudoFd(existfd)){ // found existfd in entity return iter->second.get(); @@ -662,7 +662,7 @@ int FdManager::GetPseudoFdCount(const char* path) UpdateEntityToTempPath(); // search from all entity. - for(fdent_map_t::iterator iter = fent.begin(); iter != fent.end(); ++iter){ + for(auto iter = fent.cbegin(); iter != fent.cend(); ++iter){ if(iter->second && iter->second->GetPath() == path){ // found the entity for the path return iter->second->GetOpenCount(); @@ -678,7 +678,7 @@ void FdManager::Rename(const std::string &from, const std::string &to) UpdateEntityToTempPath(); - fdent_map_t::iterator iter = fent.find(from); + auto iter = fent.find(from); if(fent.end() == iter && !FdManager::IsCacheDir()){ // If the cache directory is not specified, s3fs opens a temporary file // when the file is opened. @@ -724,7 +724,7 @@ bool FdManager::Close(FdEntity* ent, int fd) UpdateEntityToTempPath(); - for(fdent_map_t::iterator iter = fent.begin(); iter != fent.end(); ++iter){ + for(auto iter = fent.cbegin(); iter != fent.cend(); ++iter){ if(iter->second.get() == ent){ ent->Close(fd); if(!ent->IsOpen()){ @@ -732,7 +732,7 @@ bool FdManager::Close(FdEntity* ent, int fd) iter = fent.erase(iter); // check another key name for entity value to be on the safe side - for(; iter != fent.end(); ){ + for(; iter != fent.cend(); ){ if(iter->second.get() == ent){ iter = fent.erase(iter); }else{ @@ -757,15 +757,15 @@ bool FdManager::UpdateEntityToTempPath() { const std::lock_guard lock(FdManager::except_entmap_lock); - for(fdent_direct_map_t::iterator except_iter = except_fent.begin(); except_iter != except_fent.end(); ){ + for(auto except_iter = except_fent.cbegin(); except_iter != except_fent.cend(); ){ std::string tmppath; FdManager::MakeRandomTempPath(except_iter->first.c_str(), tmppath); - fdent_map_t::iterator iter = fent.find(except_iter->first); - if(fent.end() != iter && iter->second.get() == except_iter->second){ + auto iter = fent.find(except_iter->first); + if(fent.cend() != iter && iter->second.get() == except_iter->second){ // Move the entry to the new key fent[tmppath] = std::move(iter->second); - iter = fent.erase(iter); + fent.erase(iter); except_iter = except_fent.erase(except_iter); }else{ // [NOTE] @@ -838,8 +838,8 @@ void FdManager::CleanupCacheDirInternal(const std::string &path) } UpdateEntityToTempPath(); - fdent_map_t::iterator iter = fent.find(next_path); - if(fent.end() == iter) { + auto iter = fent.find(next_path); + if(fent.cend() == iter) { S3FS_PRN_DBG("cleaned up: %s", next_path.c_str()); FdManager::DeleteCacheFile(next_path.c_str()); } @@ -950,8 +950,8 @@ bool FdManager::RawCheckAllCache(FILE* fp, const char* cache_stat_top_dir, const UpdateEntityToTempPath(); - fdent_map_t::iterator iter = fent.find(object_file_path); - if(fent.end() != iter){ + auto iter = fent.find(object_file_path); + if(fent.cend() != iter){ // This file is opened now, then we need to put warning message. strOpenedWarn = CACHEDBG_FMT_WARN_OPEN; } @@ -1007,14 +1007,14 @@ bool FdManager::RawCheckAllCache(FILE* fp, const char* cache_stat_top_dir, const S3FS_PRN_CACHE(fp, CACHEDBG_FMT_FILE_PROB, object_file_path.c_str(), strOpenedWarn.c_str()); if(!warn_area_list.empty()){ S3FS_PRN_CACHE(fp, CACHEDBG_FMT_WARN_HEAD); - for(fdpage_list_t::const_iterator witer = warn_area_list.begin(); witer != warn_area_list.end(); ++witer){ + for(auto witer = warn_area_list.cbegin(); witer != warn_area_list.cend(); ++witer){ S3FS_PRN_CACHE(fp, CACHEDBG_FMT_PROB_BLOCK, static_cast(witer->offset), static_cast(witer->bytes)); } } if(!err_area_list.empty()){ ++err_file_cnt; S3FS_PRN_CACHE(fp, CACHEDBG_FMT_ERR_HEAD); - for(fdpage_list_t::const_iterator eiter = err_area_list.begin(); eiter != err_area_list.end(); ++eiter){ + for(auto eiter = err_area_list.cbegin(); eiter != err_area_list.cend(); ++eiter){ S3FS_PRN_CACHE(fp, CACHEDBG_FMT_PROB_BLOCK, static_cast(eiter->offset), static_cast(eiter->bytes)); } } diff --git a/src/fdcache_entity.cpp b/src/fdcache_entity.cpp index 7067b4e..c4c440a 100644 --- a/src/fdcache_entity.cpp +++ b/src/fdcache_entity.cpp @@ -188,8 +188,8 @@ void FdEntity::Close(int fd) S3FS_PRN_DBG("[path=%s][pseudo_fd=%d][physical_fd=%d]", path.c_str(), fd, physical_fd); // search pseudo fd and close it. - fdinfo_map_t::iterator iter = pseudo_fd_map.find(fd); - if(pseudo_fd_map.end() != iter){ + auto iter = pseudo_fd_map.find(fd); + if(pseudo_fd_map.cend() != iter){ pseudo_fd_map.erase(iter); }else{ S3FS_PRN_WARN("Not found pseudo_fd(%d) in entity object(%s)", fd, path.c_str()); @@ -235,8 +235,8 @@ int FdEntity::DupWithLock(int fd) if(-1 == physical_fd){ return -1; } - fdinfo_map_t::iterator iter = pseudo_fd_map.find(fd); - if(pseudo_fd_map.end() == iter){ + auto iter = pseudo_fd_map.find(fd); + if(pseudo_fd_map.cend() == iter){ S3FS_PRN_ERR("Not found pseudo_fd(%d) in entity object(%s) for physical_fd(%d)", fd, path.c_str(), physical_fd); return -1; } @@ -287,7 +287,7 @@ int FdEntity::OpenMirrorFile() } // create seed generating mirror file name - unsigned int seed = static_cast(time(nullptr)); + auto seed = static_cast(time(nullptr)); int urandom_fd; if(-1 != (urandom_fd = open("/dev/urandom", O_RDONLY))){ unsigned int rand_data; @@ -332,7 +332,7 @@ bool FdEntity::FindPseudoFdWithLock(int fd) const if(-1 == fd){ return false; } - if(pseudo_fd_map.end() == pseudo_fd_map.find(fd)){ + if(pseudo_fd_map.cend() == pseudo_fd_map.find(fd)){ return false; } return true; @@ -343,8 +343,8 @@ PseudoFdInfo* FdEntity::CheckPseudoFdFlags(int fd, bool writable) if(-1 == fd){ return nullptr; } - fdinfo_map_t::iterator iter = pseudo_fd_map.find(fd); - if(pseudo_fd_map.end() == iter || nullptr == iter->second){ + auto iter = pseudo_fd_map.find(fd); + if(pseudo_fd_map.cend() == iter || nullptr == iter->second){ return nullptr; } if(writable){ @@ -361,7 +361,7 @@ PseudoFdInfo* FdEntity::CheckPseudoFdFlags(int fd, bool writable) bool FdEntity::IsUploading() { - for(fdinfo_map_t::const_iterator iter = pseudo_fd_map.begin(); iter != pseudo_fd_map.end(); ++iter){ + for(auto iter = pseudo_fd_map.cbegin(); iter != pseudo_fd_map.cend(); ++iter){ const PseudoFdInfo* ppseudoinfo = iter->second.get(); if(ppseudoinfo && ppseudoinfo->IsUploading()){ return true; @@ -952,8 +952,8 @@ bool FdEntity::GetXattr(std::string& xattr) const { const std::lock_guard lock(fdent_lock); - headers_t::const_iterator iter = orgmeta.find("x-amz-meta-xattr"); - if(iter == orgmeta.end()){ + auto iter = orgmeta.find("x-amz-meta-xattr"); + if(iter == orgmeta.cend()){ return false; } xattr = iter->second; @@ -1028,7 +1028,7 @@ int FdEntity::Load(off_t start, off_t size, bool is_modified_flag) // check loaded area & load fdpage_list_t unloaded_list; if(0 < pagelist.GetUnloadedPages(unloaded_list, start, size)){ - for(fdpage_list_t::iterator iter = unloaded_list.begin(); iter != unloaded_list.end(); ++iter){ + for(auto iter = unloaded_list.cbegin(); iter != unloaded_list.cend(); ++iter){ if(0 != size && start + size <= iter->offset){ // reached end break; @@ -1110,7 +1110,7 @@ int FdEntity::NoCacheLoadAndPost(PseudoFdInfo* pseudo_obj, off_t start, off_t si } // loop uploading by multipart - for(fdpage_list_t::iterator iter = pagelist.pages.begin(); iter != pagelist.pages.end(); ++iter){ + for(auto iter = pagelist.pages.begin(); iter != pagelist.pages.end(); ++iter){ if(iter->end() < start){ continue; } @@ -1357,8 +1357,8 @@ int FdEntity::RowFlushHasLock(int fd, const char* tpath, bool force_sync) } // check pseudo fd and its flag - fdinfo_map_t::iterator miter = pseudo_fd_map.find(fd); - if(pseudo_fd_map.end() == miter || nullptr == miter->second){ + const auto miter = pseudo_fd_map.find(fd); + if(pseudo_fd_map.cend() == miter || nullptr == miter->second){ return -EBADF; } if(!miter->second->Writable() && !(miter->second->GetFlags() & O_CREAT)){ @@ -1631,7 +1631,7 @@ int FdEntity::RowFlushMixMultipart(PseudoFdInfo* pseudo_obj, const char* tpath) // [TODO] should use parallel downloading // - for(fdpage_list_t::const_iterator iter = dlpages.begin(); iter != dlpages.end(); ++iter){ + for(auto iter = dlpages.cbegin(); iter != dlpages.cend(); ++iter){ if(0 != (result = Load(iter->offset, iter->bytes, /*is_modified_flag=*/ true))){ // set loaded and modified flag S3FS_PRN_ERR("failed to get parts(start=%lld, size=%lld) before uploading.", static_cast(iter->offset), static_cast(iter->bytes)); return result; @@ -1774,7 +1774,7 @@ int FdEntity::RowFlushStreamMultipart(PseudoFdInfo* pseudo_obj, const char* tpat // [TODO] // Execute in parallel downloading with multiple thread. // - for(mp_part_list_t::const_iterator download_iter = to_download_list.begin(); download_iter != to_download_list.end(); ++download_iter){ + for(auto download_iter = to_download_list.cbegin(); download_iter != to_download_list.cend(); ++download_iter){ if(0 != (result = Load(download_iter->start, download_iter->size))){ break; } @@ -1814,7 +1814,7 @@ int FdEntity::RowFlushStreamMultipart(PseudoFdInfo* pseudo_obj, const char* tpat // When canceling(overwriting) a part that has already been uploaded, output it. // if(S3fsLog::IsS3fsLogDbg()){ - for(filepart_list_t::const_iterator cancel_iter = cancel_uploaded_list.begin(); cancel_iter != cancel_uploaded_list.end(); ++cancel_iter){ + for(auto cancel_iter = cancel_uploaded_list.cbegin(); cancel_iter != cancel_uploaded_list.cend(); ++cancel_iter){ S3FS_PRN_DBG("Cancel uploaded: start(%lld), size(%lld), part number(%d)", static_cast(cancel_iter->startpos), static_cast(cancel_iter->size), (cancel_iter->petag ? cancel_iter->petag->part_num : -1)); } } @@ -2330,7 +2330,7 @@ bool FdEntity::MergeOrgMeta(headers_t& updatemeta) merge_headers(orgmeta, updatemeta, true); // overwrite all keys // [NOTE] // this is special cases, we remove the key which has empty values. - for(headers_t::iterator hiter = orgmeta.begin(); hiter != orgmeta.end(); ){ + for(auto hiter = orgmeta.cbegin(); hiter != orgmeta.cend(); ){ if(hiter->second.empty()){ hiter = orgmeta.erase(hiter); }else{ @@ -2454,7 +2454,7 @@ bool FdEntity::PunchHole(off_t start, size_t size) } // try to punch hole to file - for(fdpage_list_t::const_iterator iter = nodata_pages.begin(); iter != nodata_pages.end(); ++iter){ + for(auto iter = nodata_pages.cbegin(); iter != nodata_pages.cend(); ++iter){ if(0 != fallocate(physical_fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, iter->offset, iter->bytes)){ if(ENOSYS == errno || EOPNOTSUPP == errno){ S3FS_PRN_ERR("failed to fallocate for punching hole to file with errno(%d), it maybe the fallocate function is not implemented in this kernel, or the file system does not support FALLOC_FL_PUNCH_HOLE.", errno); diff --git a/src/fdcache_fdinfo.cpp b/src/fdcache_fdinfo.cpp index be5b1c2..a6512c6 100644 --- a/src/fdcache_fdinfo.cpp +++ b/src/fdcache_fdinfo.cpp @@ -333,7 +333,7 @@ bool PseudoFdInfo::GetEtaglist(etaglist_t& list) const } list.clear(); - for(filepart_list_t::const_iterator iter = upload_list.begin(); iter != upload_list.end(); ++iter){ + for(auto iter = upload_list.cbegin(); iter != upload_list.cend(); ++iter){ if(iter->petag){ list.push_back(*(iter->petag)); }else{ @@ -443,7 +443,7 @@ bool PseudoFdInfo::ParallelMultipartUpload(const char* path, const mp_part_list_ return false; } - for(mp_part_list_t::const_iterator iter = mplist.begin(); iter != mplist.end(); ++iter){ + for(auto iter = mplist.cbegin(); iter != mplist.cend(); ++iter){ // Insert upload part etagpair* petag = nullptr; if(!InsertUploadPart(iter->start, iter->size, iter->part_num, is_copy, &petag)){ @@ -452,7 +452,7 @@ bool PseudoFdInfo::ParallelMultipartUpload(const char* path, const mp_part_list_ } // make parameter for my thread - pseudofdinfo_thparam* thargs = new pseudofdinfo_thparam; + auto* thargs = new pseudofdinfo_thparam; thargs->ppseudofdinfo = this; thargs->path = SAFESTRPTR(path); thargs->upload_id = tmp_upload_id; @@ -604,7 +604,7 @@ ssize_t PseudoFdInfo::UploadBoundaryLastUntreatedArea(const char* path, headers_ // When canceling(overwriting) a part that has already been uploaded, output it. // if(S3fsLog::IsS3fsLogDbg()){ - for(filepart_list_t::const_iterator cancel_iter = cancel_uploaded_list.begin(); cancel_iter != cancel_uploaded_list.end(); ++cancel_iter){ + for(auto cancel_iter = cancel_uploaded_list.cbegin(); cancel_iter != cancel_uploaded_list.cend(); ++cancel_iter){ S3FS_PRN_DBG("Cancel uploaded: start(%lld), size(%lld), part number(%d)", static_cast(cancel_iter->startpos), static_cast(cancel_iter->size), (cancel_iter->petag ? cancel_iter->petag->part_num : -1)); } } @@ -733,7 +733,7 @@ bool PseudoFdInfo::ExtractUploadPartsFromUntreatedArea(off_t untreated_start, of { const std::lock_guard lock(upload_list_lock); - for(filepart_list_t::iterator cur_iter = upload_list.begin(); cur_iter != upload_list.end(); /* ++cur_iter */){ + for(auto cur_iter = upload_list.begin(); cur_iter != upload_list.end(); /* ++cur_iter */){ // Check overlap if((cur_iter->startpos + cur_iter->size - 1) < aligned_start || (aligned_start + aligned_size - 1) < cur_iter->startpos){ // Areas do not overlap @@ -805,8 +805,8 @@ bool PseudoFdInfo::ExtractUploadPartsFromAllArea(UntreatedParts& untreated_list, untreated_list.Duplicate(dup_untreated_list); // Initialize the iterator of each list first - untreated_list_t::iterator dup_untreated_iter = dup_untreated_list.begin(); - filepart_list_t::iterator uploaded_iter = upload_list.begin(); + auto dup_untreated_iter = dup_untreated_list.begin(); + auto uploaded_iter = upload_list.begin(); // // Loop to extract areas to upload and download @@ -889,7 +889,7 @@ bool PseudoFdInfo::ExtractUploadPartsFromAllArea(UntreatedParts& untreated_list, // It also assumes that each size of uploaded area must be a maximum upload // size. // - filepart_list_t::iterator overlap_uploaded_iter = upload_list.end(); + auto overlap_uploaded_iter = upload_list.end(); for(; uploaded_iter != upload_list.end(); ++uploaded_iter){ if((cur_start < (uploaded_iter->startpos + uploaded_iter->size)) && (uploaded_iter->startpos < (cur_start + cur_size))){ if(overlap_uploaded_iter != upload_list.end()){ @@ -990,7 +990,7 @@ bool PseudoFdInfo::ExtractUploadPartsFromAllArea(UntreatedParts& untreated_list, off_t changed_start = cur_start; off_t changed_size = cur_size; bool first_area = true; - for(untreated_list_t::const_iterator tmp_cur_untreated_iter = cur_untreated_list.begin(); tmp_cur_untreated_iter != cur_untreated_list.end(); ++tmp_cur_untreated_iter, first_area = false){ + for(auto tmp_cur_untreated_iter = cur_untreated_list.cbegin(); tmp_cur_untreated_iter != cur_untreated_list.cend(); ++tmp_cur_untreated_iter, first_area = false){ if(tmp_cur_start < tmp_cur_untreated_iter->start){ // // Detected a gap at the start of area @@ -1006,7 +1006,7 @@ bool PseudoFdInfo::ExtractUploadPartsFromAllArea(UntreatedParts& untreated_list, // within 5GB and the remaining area after unification is // larger than the minimum multipart upload size. // - mp_part_list_t::reverse_iterator copy_riter = to_copy_list.rbegin(); + auto copy_riter = to_copy_list.rbegin(); if( (copy_riter->start + copy_riter->size) == tmp_cur_start && (copy_riter->size + (tmp_cur_untreated_iter->start - tmp_cur_start)) <= FIVE_GB && diff --git a/src/fdcache_page.cpp b/src/fdcache_page.cpp index ba4bc8f..a4df19e 100644 --- a/src/fdcache_page.cpp +++ b/src/fdcache_page.cpp @@ -75,7 +75,7 @@ static void raw_compress_fdpage_list(const fdpage_list_t& pages, fdpage_list_t& fdpage* lastpage = nullptr; fdpage_list_t::iterator add_iter; - for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){ + for(auto iter = pages.cbegin(); iter != pages.cend(); ++iter){ if(0 == iter->bytes){ continue; } @@ -139,7 +139,7 @@ static void compress_fdpage_list(const fdpage_list_t& pages, fdpage_list_t& comp static fdpage_list_t parse_partsize_fdpage_list(const fdpage_list_t& pages, off_t max_partsize) { fdpage_list_t parsed_pages; - for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){ + for(auto iter = pages.cbegin(); iter != pages.cend(); ++iter){ if(iter->modified){ // modified page fdpage tmppage = *iter; @@ -288,7 +288,7 @@ bool PageList::CheckAreaInSparseFile(const struct fdpage& checkpage, const fdpag // bool result = true; - for(fdpage_list_t::const_iterator iter = sparse_list.begin(); iter != sparse_list.end(); ++iter){ + for(auto iter = sparse_list.cbegin(); iter != sparse_list.cend(); ++iter){ off_t check_start = 0; off_t check_bytes = 0; if((iter->offset + iter->bytes) <= checkpage.offset){ @@ -384,14 +384,14 @@ off_t PageList::Size() const if(pages.empty()){ return 0; } - fdpage_list_t::const_reverse_iterator riter = pages.rbegin(); + auto riter = pages.rbegin(); return riter->next(); } bool PageList::Compress() { fdpage* lastpage = nullptr; - for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ){ + for(auto iter = pages.begin(); iter != pages.end(); ){ if(!lastpage){ // First item lastpage = &(*iter); @@ -427,7 +427,7 @@ bool PageList::Compress() bool PageList::Parse(off_t new_pos) { - for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ++iter){ + for(auto iter = pages.begin(); iter != pages.end(); ++iter){ if(new_pos == iter->offset){ // nothing to do return true; @@ -462,7 +462,7 @@ bool PageList::Resize(off_t size, bool is_loaded, bool is_modified) }else if(size < total){ // cut area - for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ){ + for(auto iter = pages.begin(); iter != pages.end(); ){ if(iter->next() <= size){ ++iter; }else{ @@ -485,7 +485,7 @@ bool PageList::Resize(off_t size, bool is_loaded, bool is_modified) bool PageList::IsPageLoaded(off_t start, off_t size) const { - for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){ + for(auto iter = pages.cbegin(); iter != pages.cend(); ++iter){ if(iter->end() < start){ continue; } @@ -525,7 +525,7 @@ bool PageList::SetPageLoadedStatus(off_t start, off_t size, PageList::page_statu Parse(start + size); // set loaded flag - for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ++iter){ + for(auto iter = pages.begin(); iter != pages.end(); ++iter){ if(iter->end() < start){ continue; }else if(start + size <= iter->offset){ @@ -542,7 +542,7 @@ bool PageList::SetPageLoadedStatus(off_t start, off_t size, PageList::page_statu bool PageList::FindUnloadedPage(off_t start, off_t& resstart, off_t& ressize) const { - for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){ + for(auto iter = pages.cbegin(); iter != pages.cend(); ++iter){ if(start <= iter->end()){ if(!iter->loaded && !iter->modified){ // Do not load unloaded and modified areas resstart = iter->offset; @@ -568,7 +568,7 @@ off_t PageList::GetTotalUnloadedPageSize(off_t start, off_t size, off_t limit_si } off_t next = start + size; off_t restsize = 0; - for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){ + for(auto iter = pages.cbegin(); iter != pages.cend(); ++iter){ if(iter->next() <= start){ continue; } @@ -609,7 +609,7 @@ size_t PageList::GetUnloadedPages(fdpage_list_t& unloaded_list, off_t start, off } off_t next = start + size; - for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){ + for(auto iter = pages.cbegin(); iter != pages.cend(); ++iter){ if(iter->next() <= start){ continue; } @@ -626,7 +626,7 @@ size_t PageList::GetUnloadedPages(fdpage_list_t& unloaded_list, off_t start, off off_t page_size = page_next - page_start; // add list - fdpage_list_t::reverse_iterator riter = unloaded_list.rbegin(); + auto riter = unloaded_list.rbegin(); if(riter != unloaded_list.rend() && riter->next() == page_start){ // merge to before page riter->bytes += page_size; @@ -657,7 +657,7 @@ bool PageList::GetPageListsForMultipartUpload(fdpage_list_t& dlpages, fdpage_lis compress_fdpage_list_ignore_load(pages, modified_pages, false); fdpage prev_page; - for(fdpage_list_t::const_iterator iter = modified_pages.begin(); iter != modified_pages.end(); ++iter){ + for(auto iter = modified_pages.cbegin(); iter != modified_pages.cend(); ++iter){ if(iter->modified){ // current is modified area if(!prev_page.modified){ @@ -754,7 +754,7 @@ bool PageList::GetNoDataPageLists(fdpage_list_t& nodata_pages, off_t start, size // extract areas without data fdpage_list_t tmp_pagelist; off_t stop_pos = (0L == size ? -1 : (start + size)); - for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){ + for(auto iter = pages.cbegin(); iter != pages.cend(); ++iter){ if((iter->offset + iter->bytes) < start){ continue; } @@ -786,7 +786,7 @@ bool PageList::GetNoDataPageLists(fdpage_list_t& nodata_pages, off_t start, size off_t PageList::BytesModified() const { off_t total = 0; - for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){ + for(auto iter = pages.cbegin(); iter != pages.cend(); ++iter){ if(iter->modified){ total += iter->bytes; } @@ -799,7 +799,7 @@ bool PageList::IsModified() const if(is_shrink){ return true; } - for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){ + for(auto iter = pages.cbegin(); iter != pages.cend(); ++iter){ if(iter->modified){ return true; } @@ -811,7 +811,7 @@ bool PageList::ClearAllModified() { is_shrink = false; - for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ++iter){ + for(auto iter = pages.begin(); iter != pages.end(); ++iter){ if(iter->modified){ iter->modified = false; } @@ -831,7 +831,7 @@ bool PageList::Serialize(CacheFileStat& file, bool is_output, ino_t inode) std::ostringstream ssall; ssall << inode << ":" << Size(); - for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ++iter){ + for(auto iter = pages.cbegin(); iter != pages.cend(); ++iter){ ssall << "\n" << iter->offset << ":" << iter->bytes << ":" << (iter->loaded ? "1" : "0") << ":" << (iter->modified ? "1" : "0"); } @@ -975,7 +975,7 @@ void PageList::Dump() const int cnt = 0; S3FS_PRN_DBG("pages (shrunk=%s) = {", (is_shrink ? "yes" : "no")); - for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter, ++cnt){ + for(auto iter = pages.cbegin(); iter != pages.cend(); ++iter, ++cnt){ S3FS_PRN_DBG(" [%08d] -> {%014lld - %014lld : %s / %s}", cnt, static_cast(iter->offset), static_cast(iter->bytes), iter->loaded ? "loaded" : "unloaded", iter->modified ? "modified" : "not modified"); } S3FS_PRN_DBG("}"); @@ -1016,7 +1016,7 @@ bool PageList::CompareSparseFile(int fd, size_t file_size, fdpage_list_t& err_ar // Compare each pages and sparse_list bool result = true; - for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){ + for(auto iter = pages.cbegin(); iter != pages.cend(); ++iter){ if(!PageList::CheckAreaInSparseFile(*iter, sparse_list, fd, err_area_list, warn_area_list)){ result = false; } diff --git a/src/fdcache_pseudofd.cpp b/src/fdcache_pseudofd.cpp index f5941df..c9d9ada 100644 --- a/src/fdcache_pseudofd.cpp +++ b/src/fdcache_pseudofd.cpp @@ -60,7 +60,7 @@ int PseudoFdManager::GetUnusedMinPseudoFd() const int min_fd = MIN_PSEUDOFD_NUMBER; // Look for the first discontinuous value. - for(pseudofd_list_t::const_iterator iter = pseudofd_list.begin(); iter != pseudofd_list.end(); ++iter){ + for(auto iter = pseudofd_list.cbegin(); iter != pseudofd_list.cend(); ++iter){ if(min_fd == (*iter)){ ++min_fd; }else if(min_fd < (*iter)){ @@ -85,7 +85,7 @@ bool PseudoFdManager::ReleasePseudoFd(int fd) { const std::lock_guard lock(pseudofd_list_lock); - for(pseudofd_list_t::iterator iter = pseudofd_list.begin(); iter != pseudofd_list.end(); ++iter){ + for(auto iter = pseudofd_list.begin(); iter != pseudofd_list.end(); ++iter){ if(fd == (*iter)){ pseudofd_list.erase(iter); return true; diff --git a/src/fdcache_untreated.cpp b/src/fdcache_untreated.cpp index 952941c..c62ec5d 100644 --- a/src/fdcache_untreated.cpp +++ b/src/fdcache_untreated.cpp @@ -43,11 +43,11 @@ bool UntreatedParts::AddPart(off_t start, off_t size) ++last_tag; // Check the overlap with the existing part and add the part. - for(untreated_list_t::iterator iter = untreated_list.begin(); iter != untreated_list.end(); ++iter){ + for(auto iter = untreated_list.begin(); iter != untreated_list.end(); ++iter){ if(iter->stretch(start, size, last_tag)){ // the part was stretched, thus check if it overlaps with next parts - untreated_list_t::iterator niter = iter; - for(++niter; niter != untreated_list.end(); ){ + auto niter = iter; + for(++niter; niter != untreated_list.cend(); ){ if(!iter->stretch(niter->start, niter->size, last_tag)){ // This next part does not overlap with the current part break; @@ -79,7 +79,7 @@ bool UntreatedParts::RowGetPart(off_t& start, off_t& size, off_t max_size, off_t const std::lock_guard lock(untreated_list_lock); // Check the overlap with the existing part and add the part. - for(untreated_list_t::const_iterator iter = untreated_list.begin(); iter != untreated_list.end(); ++iter){ + for(auto iter = untreated_list.cbegin(); iter != untreated_list.cend(); ++iter){ if(!lastpart || iter->untreated_tag == last_tag){ if(min_size <= iter->size){ if(iter->size <= max_size){ @@ -118,7 +118,7 @@ bool UntreatedParts::ClearParts(off_t start, off_t size) } // Check the overlap with the existing part. - for(untreated_list_t::iterator iter = untreated_list.begin(); iter != untreated_list.end(); ){ + for(auto iter = untreated_list.begin(); iter != untreated_list.end(); ){ if(0 != size && (start + size) <= iter->start){ // clear area is in front of iter area, no more to do. break; @@ -166,7 +166,7 @@ bool UntreatedParts::GetLastUpdatePart(off_t& start, off_t& size) const { const std::lock_guard lock(untreated_list_lock); - for(untreated_list_t::const_iterator iter = untreated_list.begin(); iter != untreated_list.end(); ++iter){ + for(auto iter = untreated_list.cbegin(); iter != untreated_list.cend(); ++iter){ if(iter->untreated_tag == last_tag){ start = iter->start; size = iter->size; @@ -186,13 +186,13 @@ bool UntreatedParts::ReplaceLastUpdatePart(off_t start, off_t size) { const std::lock_guard lock(untreated_list_lock); - for(untreated_list_t::iterator iter = untreated_list.begin(); iter != untreated_list.end(); ++iter){ + for(auto iter = untreated_list.begin(); iter != untreated_list.end(); ++iter){ if(iter->untreated_tag == last_tag){ if(0 < size){ iter->start = start; iter->size = size; }else{ - iter = untreated_list.erase(iter); + untreated_list.erase(iter); } return true; } @@ -207,7 +207,7 @@ bool UntreatedParts::RemoveLastUpdatePart() { const std::lock_guard lock(untreated_list_lock); - for(untreated_list_t::iterator iter = untreated_list.begin(); iter != untreated_list.end(); ++iter){ + for(auto iter = untreated_list.begin(); iter != untreated_list.end(); ++iter){ if(iter->untreated_tag == last_tag){ untreated_list.erase(iter); return true; @@ -232,7 +232,7 @@ void UntreatedParts::Dump() const std::lock_guard lock(untreated_list_lock); S3FS_PRN_DBG("untreated list = ["); - for(untreated_list_t::const_iterator iter = untreated_list.begin(); iter != untreated_list.end(); ++iter){ + for(auto iter = untreated_list.cbegin(); iter != untreated_list.cend(); ++iter){ S3FS_PRN_DBG(" {%014lld - %014lld : tag=%ld}", static_cast(iter->start), static_cast(iter->size), iter->untreated_tag); } S3FS_PRN_DBG("]"); diff --git a/src/metaheader.cpp b/src/metaheader.cpp index 2dbd637..532e413 100644 --- a/src/metaheader.cpp +++ b/src/metaheader.cpp @@ -56,7 +56,7 @@ static struct timespec cvt_string_to_time(const char *str) static struct timespec get_time(const headers_t& meta, const char *header) { headers_t::const_iterator iter; - if(meta.end() == (iter = meta.find(header))){ + if(meta.cend() == (iter = meta.find(header))){ return DEFAULT_TIMESPEC; } return cvt_string_to_time((*iter).second.c_str()); @@ -112,8 +112,8 @@ off_t get_size(const char *s) off_t get_size(const headers_t& meta) { - headers_t::const_iterator iter = meta.find("Content-Length"); - if(meta.end() == iter){ + auto iter = meta.find("Content-Length"); + if(meta.cend() == iter){ return 0; } return get_size((*iter).second.c_str()); @@ -130,12 +130,12 @@ mode_t get_mode(const headers_t& meta, const std::string& strpath, bool checkdir bool isS3sync = false; headers_t::const_iterator iter; - if(meta.end() != (iter = meta.find("x-amz-meta-mode"))){ + if(meta.cend() != (iter = meta.find("x-amz-meta-mode"))){ mode = get_mode((*iter).second.c_str()); - }else if(meta.end() != (iter = meta.find("x-amz-meta-permissions"))){ // for s3sync + }else if(meta.cend() != (iter = meta.find("x-amz-meta-permissions"))){ // for s3sync mode = get_mode((*iter).second.c_str()); isS3sync = true; - }else if(meta.end() != (iter = meta.find("x-amz-meta-goog-reserved-posix-mode"))){ // for GCS + }else if(meta.cend() != (iter = meta.find("x-amz-meta-goog-reserved-posix-mode"))){ // for GCS mode = get_mode((*iter).second.c_str(), 8); }else{ // If another tool creates an object without permissions, default to owner @@ -152,7 +152,7 @@ mode_t get_mode(const headers_t& meta, const std::string& strpath, bool checkdir if(forcedir){ mode |= S_IFDIR; }else{ - if(meta.end() != (iter = meta.find("Content-Type"))){ + if(meta.cend() != (iter = meta.find("Content-Type"))){ std::string strConType = (*iter).second; // Leave just the mime type, remove any optional parameters (eg charset) std::string::size_type pos = strConType.find(';'); @@ -212,11 +212,11 @@ uid_t get_uid(const char *s) uid_t get_uid(const headers_t& meta) { headers_t::const_iterator iter; - if(meta.end() != (iter = meta.find("x-amz-meta-uid"))){ + if(meta.cend() != (iter = meta.find("x-amz-meta-uid"))){ return get_uid((*iter).second.c_str()); - }else if(meta.end() != (iter = meta.find("x-amz-meta-owner"))){ // for s3sync + }else if(meta.cend() != (iter = meta.find("x-amz-meta-owner"))){ // for s3sync return get_uid((*iter).second.c_str()); - }else if(meta.end() != (iter = meta.find("x-amz-meta-goog-reserved-posix-uid"))){ // for GCS + }else if(meta.cend() != (iter = meta.find("x-amz-meta-goog-reserved-posix-uid"))){ // for GCS return get_uid((*iter).second.c_str()); }else{ return geteuid(); @@ -231,11 +231,11 @@ gid_t get_gid(const char *s) gid_t get_gid(const headers_t& meta) { headers_t::const_iterator iter; - if(meta.end() != (iter = meta.find("x-amz-meta-gid"))){ + if(meta.cend() != (iter = meta.find("x-amz-meta-gid"))){ return get_gid((*iter).second.c_str()); - }else if(meta.end() != (iter = meta.find("x-amz-meta-group"))){ // for s3sync + }else if(meta.cend() != (iter = meta.find("x-amz-meta-group"))){ // for s3sync return get_gid((*iter).second.c_str()); - }else if(meta.end() != (iter = meta.find("x-amz-meta-goog-reserved-posix-gid"))){ // for GCS + }else if(meta.cend() != (iter = meta.find("x-amz-meta-goog-reserved-posix-gid"))){ // for GCS return get_gid((*iter).second.c_str()); }else{ return getegid(); @@ -269,8 +269,8 @@ time_t get_lastmodified(const char* s) time_t get_lastmodified(const headers_t& meta) { - headers_t::const_iterator iter = meta.find("Last-Modified"); - if(meta.end() == iter){ + auto iter = meta.find("Last-Modified"); + if(meta.cend() == iter){ return -1; } return get_lastmodified((*iter).second.c_str()); @@ -290,21 +290,21 @@ bool is_need_check_obj_detail(const headers_t& meta) return false; } // if the object has x-amz-meta information, checking is no more. - if(meta.end() != meta.find("x-amz-meta-mode") || - meta.end() != meta.find("x-amz-meta-mtime") || - meta.end() != meta.find("x-amz-meta-ctime") || - meta.end() != meta.find("x-amz-meta-atime") || - meta.end() != meta.find("x-amz-meta-uid") || - meta.end() != meta.find("x-amz-meta-gid") || - meta.end() != meta.find("x-amz-meta-owner") || - meta.end() != meta.find("x-amz-meta-group") || - meta.end() != meta.find("x-amz-meta-permissions") ) + if(meta.cend() != meta.find("x-amz-meta-mode") || + meta.cend() != meta.find("x-amz-meta-mtime") || + meta.cend() != meta.find("x-amz-meta-ctime") || + meta.cend() != meta.find("x-amz-meta-atime") || + meta.cend() != meta.find("x-amz-meta-uid") || + meta.cend() != meta.find("x-amz-meta-gid") || + meta.cend() != meta.find("x-amz-meta-owner") || + meta.cend() != meta.find("x-amz-meta-group") || + meta.cend() != meta.find("x-amz-meta-permissions") ) { return false; } // if there is not Content-Type, or Content-Type is "x-directory", // checking is no more. - if(meta.end() == (iter = meta.find("Content-Type"))){ + if(meta.cend() == (iter = meta.find("Content-Type"))){ return false; } if("application/x-directory" == (*iter).second){ @@ -319,8 +319,8 @@ bool is_need_check_obj_detail(const headers_t& meta) bool merge_headers(headers_t& base, const headers_t& additional, bool add_noexist) { bool added = false; - for(headers_t::const_iterator iter = additional.begin(); iter != additional.end(); ++iter){ - if(add_noexist || base.find(iter->first) != base.end()){ + for(auto iter = additional.cbegin(); iter != additional.cend(); ++iter){ + if(add_noexist || base.find(iter->first) != base.cend()){ base[iter->first] = iter->second; added = true; } diff --git a/src/mpu_util.cpp b/src/mpu_util.cpp index 3b3f681..030d16f 100644 --- a/src/mpu_util.cpp +++ b/src/mpu_util.cpp @@ -46,7 +46,7 @@ static void print_incomp_mpu_list(const incomp_mpu_list_t& list) printf("---------------------------------------------------------------\n"); int cnt = 0; - for(incomp_mpu_list_t::const_iterator iter = list.begin(); iter != list.end(); ++iter, ++cnt){ + for(auto iter = list.cbegin(); iter != list.cend(); ++iter, ++cnt){ printf(" Path : %s\n", (*iter).key.c_str()); printf(" UploadId : %s\n", (*iter).id.c_str()); printf(" Date : %s\n", (*iter).date.c_str()); @@ -69,7 +69,7 @@ static bool abort_incomp_mpu_list(const incomp_mpu_list_t& list, time_t abort_ti // do removing. S3fsCurl s3fscurl; bool result = true; - for(incomp_mpu_list_t::const_iterator iter = list.begin(); iter != list.end(); ++iter){ + for(auto iter = list.cbegin(); iter != list.cend(); ++iter){ const char* tpath = (*iter).key.c_str(); std::string upload_id = (*iter).id; diff --git a/src/openssl_auth.cpp b/src/openssl_auth.cpp index c63e188..1e2d4ec 100644 --- a/src/openssl_auth.cpp +++ b/src/openssl_auth.cpp @@ -105,8 +105,7 @@ static unsigned long s3fs_crypt_get_threadid() static struct CRYPTO_dynlock_value* s3fs_dyn_crypt_mutex(const char* file, int line) __attribute__ ((unused)); static struct CRYPTO_dynlock_value* s3fs_dyn_crypt_mutex(const char* file, int line) { - struct CRYPTO_dynlock_value* dyndata = new CRYPTO_dynlock_value(); - return dyndata; + return new CRYPTO_dynlock_value(); } static void s3fs_dyn_crypt_mutex_lock(int mode, struct CRYPTO_dynlock_value* dyndata, const char* file, int line) __attribute__ ((unused)) NO_THREAD_SAFETY_ANALYSIS; @@ -329,7 +328,7 @@ bool s3fs_sha256(const unsigned char* data, size_t datalen, sha256_t* digest) EVP_MD_CTX* mdctx = EVP_MD_CTX_create(); EVP_DigestInit_ex(mdctx, md, nullptr); EVP_DigestUpdate(mdctx, data, datalen); - unsigned int digestlen = static_cast(digest->size()); + auto digestlen = static_cast(digest->size()); EVP_DigestFinal_ex(mdctx, digest->data(), &digestlen); EVP_MD_CTX_destroy(mdctx); diff --git a/src/s3fs.cpp b/src/s3fs.cpp index 0a98649..d45d2ab 100644 --- a/src/s3fs.cpp +++ b/src/s3fs.cpp @@ -254,7 +254,7 @@ int SyncFiller::SufficiencyFill(const std::vector& pathlist) const std::lock_guard lock(filler_lock); int result = 0; - for(std::vector::const_iterator it = pathlist.begin(); it != pathlist.end(); ++it) { + for(auto it = pathlist.cbegin(); it != pathlist.cend(); ++it) { if(filled.insert(*it).second){ if(0 != filler_func(filler_buff, it->c_str(), nullptr, 0)){ result = 1; @@ -846,7 +846,7 @@ bool get_object_sse_type(const char* path, sse_type_t& ssetype, std::string& sse ssetype = sse_type_t::SSE_DISABLE; ssevalue.clear(); - for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){ + for(auto iter = meta.cbegin(); iter != meta.cend(); ++iter){ std::string key = (*iter).first; if(0 == strcasecmp(key.c_str(), "x-amz-server-side-encryption") && 0 == strcasecmp((*iter).second.c_str(), "AES256")){ ssetype = sse_type_t::SSE_S3; @@ -1389,7 +1389,7 @@ static int s3fs_symlink(const char* _from, const char* _to) } // write(without space words) strFrom = trim(from); - ssize_t from_size = static_cast(strFrom.length()); + auto from_size = static_cast(strFrom.length()); ssize_t ressize; if(from_size != (ressize = ent->Write(autoent.GetPseudoFd(), strFrom.c_str(), 0, from_size))){ if(ressize < 0){ @@ -1690,7 +1690,7 @@ static int rename_directory(const char* from, const char* to) S3ObjList::MakeHierarchizedList(headlist, false); // add hierarchized dir. s3obj_list_t::const_iterator liter; - for(liter = headlist.begin(); headlist.end() != liter; ++liter){ + for(liter = headlist.cbegin(); headlist.cend() != liter; ++liter){ // make "from" and "to" object name. std::string from_name = basepath + (*liter); std::string to_name = strto + (*liter); @@ -1752,7 +1752,7 @@ static int rename_directory(const char* from, const char* to) // iterate over the list - copy the files with rename_object // does a safe copy - copies first and then deletes old - for(auto mn_cur = mvnodes.begin(); mn_cur != mvnodes.end(); ++mn_cur){ + for(auto mn_cur = mvnodes.cbegin(); mn_cur != mvnodes.cend(); ++mn_cur){ if(!mn_cur->is_dir){ if(!nocopyapi && !norenameapi){ result = rename_object(mn_cur->old_path.c_str(), mn_cur->new_path.c_str(), false); // keep ctime @@ -3139,7 +3139,7 @@ static bool multi_head_callback(S3fsCurl* s3fscurl, void* param) bpath = s3fs_wtf8_decode(bpath); } if(param){ - SyncFiller* pcbparam = reinterpret_cast(param); + auto* pcbparam = reinterpret_cast(param); struct stat st; if(StatCache::getStatCacheData()->GetStat(saved_path, &st)){ pcbparam->Fill(bpath.c_str(), &st, 0); @@ -3173,7 +3173,7 @@ static bool multi_head_notfound_callback(S3fsCurl* s3fscurl, void* param) } // set path to not found list - struct multi_head_notfound_callback_param* pcbparam = reinterpret_cast(param); + auto* pcbparam = reinterpret_cast(param); const std::lock_guard lock(pcbparam->list_lock); pcbparam->notfound_list.push_back(s3fscurl->GetBasePath()); @@ -3243,7 +3243,7 @@ static int readdir_multi_head(const char* path, const S3ObjList& head, void* buf } // Make single head request(with max). - for(s3obj_list_t::iterator iter = headlist.begin(); headlist.end() != iter; ++iter){ + for(auto iter = headlist.cbegin(); headlist.cend() != iter; ++iter){ std::string disppath = path + (*iter); std::string etag = head.GetETag((*iter).c_str()); struct stat st; @@ -3310,7 +3310,7 @@ static int readdir_multi_head(const char* path, const S3ObjList& head, void* buf dummy_header["x-amz-meta-ctime"] = "0"; dummy_header["x-amz-meta-mtime"] = "0"; - for(s3obj_list_t::iterator reiter = notfound_param.notfound_list.begin(); reiter != notfound_param.notfound_list.end(); ++reiter){ + for(auto reiter = notfound_param.notfound_list.cbegin(); reiter != notfound_param.notfound_list.cend(); ++reiter){ int dir_result; const std::string& dirpath = *reiter; if(-ENOTEMPTY == (dir_result = directory_empty(dirpath.c_str()))){ @@ -3542,7 +3542,7 @@ static bool get_meta_xattr_value(const char* path, std::string& rawvalue) } headers_t::const_iterator iter; - if(meta.end() == (iter = meta.find("x-amz-meta-xattr"))){ + if(meta.cend() == (iter = meta.find("x-amz-meta-xattr"))){ return false; } rawvalue = iter->second; @@ -3586,7 +3586,7 @@ static bool get_xattr_posix_key_value(const char* path, std::string& xattrvalue, } xattrs_t::iterator iter; - if(xattrs.end() == (iter = xattrs.find(targetkey))){ + if(xattrs.cend() == (iter = xattrs.find(targetkey))){ return false; } @@ -3696,7 +3696,7 @@ static std::string raw_build_xattrs(const xattrs_t& xattrs) { std::string strxattrs; bool is_set = false; - for(xattrs_t::const_iterator iter = xattrs.begin(); iter != xattrs.end(); ++iter){ + for(auto iter = xattrs.cbegin(); iter != xattrs.cend(); ++iter){ if(is_set){ strxattrs += ','; }else{ @@ -3732,7 +3732,7 @@ static int set_xattrs_to_header(headers_t& meta, const char* name, const char* v xattrs_t xattrs; headers_t::iterator iter; - if(meta.end() == (iter = meta.find("x-amz-meta-xattr"))){ + if(meta.cend() == (iter = meta.find("x-amz-meta-xattr"))){ #if defined(XATTR_REPLACE) if(XATTR_REPLACE == (flags & XATTR_REPLACE)){ // there is no xattr header but flags is replace, so failure. @@ -3948,8 +3948,8 @@ static int s3fs_getxattr(const char* path, const char* name, char* value, size_t } // get xattrs - headers_t::iterator hiter = meta.find("x-amz-meta-xattr"); - if(meta.end() == hiter){ + auto hiter = meta.find("x-amz-meta-xattr"); + if(meta.cend() == hiter){ // object does not have xattrs return -ENOATTR; } @@ -3961,8 +3961,8 @@ static int s3fs_getxattr(const char* path, const char* name, char* value, size_t // search name std::string strname = name; - xattrs_t::iterator xiter = xattrs.find(strname); - if(xattrs.end() == xiter){ + auto xiter = xattrs.find(strname); + if(xattrs.cend() == xiter){ // not found name in xattrs return -ENOATTR; } @@ -4008,7 +4008,7 @@ static int s3fs_listxattr(const char* path, char* list, size_t size) // get xattrs headers_t::iterator iter; - if(meta.end() == (iter = meta.find("x-amz-meta-xattr"))){ + if(meta.cend() == (iter = meta.find("x-amz-meta-xattr"))){ // object does not have xattrs return 0; } @@ -4020,7 +4020,7 @@ static int s3fs_listxattr(const char* path, char* list, size_t size) // calculate total name length size_t total = 0; - for(xattrs_t::const_iterator xiter = xattrs.begin(); xiter != xattrs.end(); ++xiter){ + for(auto xiter = xattrs.cbegin(); xiter != xattrs.cend(); ++xiter){ if(!xiter->first.empty()){ total += xiter->first.length() + 1; } @@ -4040,7 +4040,7 @@ static int s3fs_listxattr(const char* path, char* list, size_t size) // copy to list char* setpos = list; - for(xattrs_t::const_iterator xiter = xattrs.begin(); xiter != xattrs.end(); ++xiter){ + for(auto xiter = xattrs.cbegin(); xiter != xattrs.cend(); ++xiter){ if(!xiter->first.empty()){ strcpy(setpos, xiter->first.c_str()); setpos = &setpos[strlen(setpos) + 1]; @@ -4090,8 +4090,8 @@ static int s3fs_removexattr(const char* path, const char* name) } // get xattrs - headers_t::iterator hiter = meta.find("x-amz-meta-xattr"); - if(meta.end() == hiter){ + auto hiter = meta.find("x-amz-meta-xattr"); + if(meta.cend() == hiter){ // object does not have xattrs return -ENOATTR; } @@ -4101,8 +4101,8 @@ static int s3fs_removexattr(const char* path, const char* name) // check name xattrs std::string strname = name; - xattrs_t::iterator xiter = xattrs.find(strname); - if(xattrs.end() == xiter){ + auto xiter = xattrs.find(strname); + if(xattrs.cend() == xiter){ return -ENOATTR; } @@ -5067,7 +5067,7 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar return 0; } else if(is_prefix(arg, "readwrite_timeout=")){ - time_t rwtimeout = static_cast(cvt_strtoofft(strchr(arg, '=') + sizeof(char), /*base=*/ 10)); + auto rwtimeout = static_cast(cvt_strtoofft(strchr(arg, '=') + sizeof(char), /*base=*/ 10)); S3fsCurl::SetReadwriteTimeout(rwtimeout); return 0; } @@ -5081,19 +5081,19 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar return 0; } else if(is_prefix(arg, "max_stat_cache_size=")){ - unsigned long cache_size = static_cast(cvt_strtoofft(strchr(arg, '=') + sizeof(char), 10)); + auto cache_size = static_cast(cvt_strtoofft(strchr(arg, '=') + sizeof(char), 10)); StatCache::getStatCacheData()->SetCacheSize(cache_size); return 0; } else if(is_prefix(arg, "stat_cache_expire=")){ - time_t expr_time = static_cast(cvt_strtoofft(strchr(arg, '=') + sizeof(char), 10)); + auto expr_time = static_cast(cvt_strtoofft(strchr(arg, '=') + sizeof(char), 10)); StatCache::getStatCacheData()->SetExpireTime(expr_time); return 0; } // [NOTE] // This option is for compatibility old version. else if(is_prefix(arg, "stat_cache_interval_expire=")){ - time_t expr_time = static_cast(cvt_strtoofft(strchr(arg, '=') + sizeof(char), /*base=*/ 10)); + auto expr_time = static_cast(cvt_strtoofft(strchr(arg, '=') + sizeof(char), /*base=*/ 10)); StatCache::getStatCacheData()->SetExpireTime(expr_time, true); return 0; } diff --git a/src/s3fs_cred.cpp b/src/s3fs_cred.cpp index ebe574a..0cc3e31 100644 --- a/src/s3fs_cred.cpp +++ b/src/s3fs_cred.cpp @@ -637,7 +637,6 @@ bool S3fsCred::ParseS3fsPasswdFile(bucketkvmap_t& resmap) std::string line; size_t first_pos; readline_t linelist; - readline_t::iterator iter; // open passwd file std::ifstream PF(passwd_file.c_str()); @@ -668,7 +667,7 @@ bool S3fsCred::ParseS3fsPasswdFile(bucketkvmap_t& resmap) // read '=' type kvmap_t kv; - for(iter = linelist.begin(); iter != linelist.end(); ++iter){ + for(auto iter = linelist.cbegin(); iter != linelist.cend(); ++iter){ first_pos = iter->find_first_of('='); if(first_pos == std::string::npos){ continue; @@ -679,7 +678,7 @@ bool S3fsCred::ParseS3fsPasswdFile(bucketkvmap_t& resmap) if(key.empty()){ continue; } - if(kv.end() != kv.find(key)){ + if(kv.cend() != kv.find(key)){ S3FS_PRN_WARN("same key name(%s) found in passwd file, skip this.", key.c_str()); continue; } @@ -689,7 +688,7 @@ bool S3fsCred::ParseS3fsPasswdFile(bucketkvmap_t& resmap) resmap[S3fsCred::KEYVAL_FIELDS_TYPE] = kv; // read ':' type - for(iter = linelist.begin(); iter != linelist.end(); ++iter){ + for(auto iter = linelist.cbegin(); iter != linelist.cend(); ++iter){ first_pos = iter->find_first_of(':'); size_t last_pos = iter->find_last_of(':'); if(first_pos == std::string::npos){ @@ -709,7 +708,7 @@ bool S3fsCred::ParseS3fsPasswdFile(bucketkvmap_t& resmap) accesskey = trim(iter->substr(0, first_pos)); secret = trim(iter->substr(first_pos + 1, std::string::npos)); } - if(resmap.end() != resmap.find(bucketname)){ + if(resmap.cend() != resmap.find(bucketname)){ S3FS_PRN_EXIT("there are multiple entries for the same bucket(%s) in the passwd file.", (bucketname.empty() ? "default" : bucketname.c_str())); return false; } @@ -759,8 +758,8 @@ bool S3fsCred::ReadS3fsPasswdFile() // // check key=value type format. // - bucketkvmap_t::iterator it = bucketmap.find(S3fsCred::KEYVAL_FIELDS_TYPE); - if(bucketmap.end() != it){ + auto it = bucketmap.find(S3fsCred::KEYVAL_FIELDS_TYPE); + if(bucketmap.cend() != it){ // aws format std::string access_key_id; std::string secret_access_key; @@ -778,19 +777,19 @@ bool S3fsCred::ReadS3fsPasswdFile() } std::string bucket_key = S3fsCred::ALLBUCKET_FIELDS_TYPE; - if(!S3fsCred::bucket_name.empty() && bucketmap.end() != bucketmap.find(S3fsCred::bucket_name)){ + if(!S3fsCred::bucket_name.empty() && bucketmap.cend() != bucketmap.find(S3fsCred::bucket_name)){ bucket_key = S3fsCred::bucket_name; } it = bucketmap.find(bucket_key); - if(bucketmap.end() == it){ + if(bucketmap.cend() == it){ S3FS_PRN_EXIT("Not found access key/secret key in passwd file."); return false; } keyval = it->second; - kvmap_t::iterator aws_accesskeyid_it = keyval.find(S3fsCred::AWS_ACCESSKEYID); - kvmap_t::iterator aws_secretkey_it = keyval.find(S3fsCred::AWS_SECRETKEY); - if(keyval.end() == aws_accesskeyid_it || keyval.end() == aws_secretkey_it){ + auto aws_accesskeyid_it = keyval.find(S3fsCred::AWS_ACCESSKEYID); + auto aws_secretkey_it = keyval.find(S3fsCred::AWS_SECRETKEY); + if(keyval.cend() == aws_accesskeyid_it || keyval.end() == aws_secretkey_it){ S3FS_PRN_EXIT("Not found access key/secret key in passwd file."); return false; } @@ -815,12 +814,12 @@ int S3fsCred::CheckS3fsCredentialAwsFormat(const kvmap_t& kvmap, std::string& ac if(kvmap.empty()){ return 0; } - kvmap_t::const_iterator str1_it = kvmap.find(str1); - kvmap_t::const_iterator str2_it = kvmap.find(str2); - if(kvmap.end() == str1_it && kvmap.end() == str2_it){ + auto str1_it = kvmap.find(str1); + auto str2_it = kvmap.find(str2); + if(kvmap.cend() == str1_it && kvmap.end() == str2_it){ return 0; } - if(kvmap.end() == str1_it || kvmap.end() == str2_it){ + if(kvmap.cend() == str1_it || kvmap.end() == str2_it){ S3FS_PRN_EXIT("AWSAccesskey or AWSSecretkey is not specified."); return -1; } diff --git a/src/s3objlist.cpp b/src/s3objlist.cpp index 8df4e16..6ab1011 100644 --- a/src/s3objlist.cpp +++ b/src/s3objlist.cpp @@ -64,13 +64,13 @@ bool S3ObjList::insert(const char* name, const char* etag, bool is_dir) // Check derived name object. if(is_dir){ std::string chkname = newname.substr(0, newname.length() - 1); - if(objects.end() != (iter = objects.find(chkname))){ + if(objects.cend() != (iter = objects.find(chkname))){ // found "dir" object --> remove it. objects.erase(iter); } }else{ std::string chkname = newname + "/"; - if(objects.end() != (iter = objects.find(chkname))){ + if(objects.cend() != (iter = objects.find(chkname))){ // found "dir/" object --> not add new object. // and add normalization return insert_normalized(orgname.c_str(), chkname.c_str(), true); @@ -78,7 +78,7 @@ bool S3ObjList::insert(const char* name, const char* etag, bool is_dir) } // Add object - if(objects.end() != (iter = objects.find(newname))){ + if(objects.cend() != (iter = objects.find(newname))){ // Found same object --> update information. (*iter).second.normalname.clear(); (*iter).second.orgname = orgname; @@ -111,7 +111,7 @@ bool S3ObjList::insert_normalized(const char* name, const char* normalized, bool } s3obj_t::iterator iter; - if(objects.end() != (iter = objects.find(name))){ + if(objects.cend() != (iter = objects.find(name))){ // found name --> over write iter->second.orgname.clear(); iter->second.etag.clear(); @@ -134,7 +134,7 @@ const s3obj_entry* S3ObjList::GetS3Obj(const char* name) const if(!name || '\0' == name[0]){ return nullptr; } - if(objects.end() == (iter = objects.find(name))){ + if(objects.cend() == (iter = objects.find(name))){ return nullptr; } return &((*iter).second); @@ -196,7 +196,7 @@ bool S3ObjList::GetLastName(std::string& lastname) const { bool result = false; lastname = ""; - for(s3obj_t::const_iterator iter = objects.begin(); iter != objects.end(); ++iter){ + for(auto iter = objects.cbegin(); iter != objects.cend(); ++iter){ if(!iter->second.orgname.empty()){ if(lastname.compare(iter->second.orgname) < 0){ lastname = (*iter).second.orgname; @@ -216,7 +216,7 @@ bool S3ObjList::GetNameList(s3obj_list_t& list, bool OnlyNormalized, bool CutSla { s3obj_t::const_iterator iter; - for(iter = objects.begin(); objects.end() != iter; ++iter){ + for(iter = objects.cbegin(); objects.cend() != iter; ++iter){ if(OnlyNormalized && !iter->second.normalname.empty()){ continue; } @@ -235,10 +235,8 @@ typedef std::map s3obj_h_t; bool S3ObjList::MakeHierarchizedList(s3obj_list_t& list, bool haveSlash) { s3obj_h_t h_map; - s3obj_h_t::iterator hiter; - s3obj_list_t::const_iterator liter; - for(liter = list.begin(); list.end() != liter; ++liter){ + for(auto liter = list.cbegin(); list.cend() != liter; ++liter){ std::string strtmp = (*liter); if(1 < strtmp.length() && '/' == *strtmp.rbegin()){ strtmp.erase(strtmp.length() - 1); @@ -251,7 +249,7 @@ bool S3ObjList::MakeHierarchizedList(s3obj_list_t& list, bool haveSlash) if(strtmp.empty() || "/" == strtmp){ break; } - if(h_map.end() == h_map.find(strtmp)){ + if(h_map.cend() == h_map.find(strtmp)){ // not found h_map[strtmp] = false; } @@ -259,7 +257,7 @@ bool S3ObjList::MakeHierarchizedList(s3obj_list_t& list, bool haveSlash) } // check map and add lost hierarchized directory. - for(hiter = h_map.begin(); hiter != h_map.end(); ++hiter){ + for(auto hiter = h_map.cbegin(); hiter != h_map.cend(); ++hiter){ if(false == (*hiter).second){ // add hierarchized directory. std::string strtmp = (*hiter).first; diff --git a/src/s3objlist.h b/src/s3objlist.h index 80146e3..e980bc2 100644 --- a/src/s3objlist.h +++ b/src/s3objlist.h @@ -50,8 +50,8 @@ class S3ObjList bool insert_normalized(const char* name, const char* normalized, bool is_dir); const s3obj_entry* GetS3Obj(const char* name) const; - s3obj_t::const_iterator begin() const { return objects.begin(); } - s3obj_t::const_iterator end() const { return objects.end(); } + s3obj_t::const_iterator cbegin() const { return objects.cbegin(); } + s3obj_t::const_iterator cend() const { return objects.cend(); } public: bool IsEmpty() const { return objects.empty(); } diff --git a/src/string_util.cpp b/src/string_util.cpp index 8f8ab1c..765562f 100644 --- a/src/string_util.cpp +++ b/src/string_util.cpp @@ -126,7 +126,7 @@ std::string trim(std::string s, const char *t /* = SPACES */) std::string peeloff(const std::string& s) { - if(s.size() < 2 || *s.begin() != '"' || *s.rbegin() != '"'){ + if(s.size() < 2 || *s.cbegin() != '"' || *s.rbegin() != '"'){ return s; } return s.substr(1, s.size() - 2); diff --git a/src/types.h b/src/types.h index 5109677..bf0a0ed 100644 --- a/src/types.h +++ b/src/types.h @@ -311,7 +311,7 @@ typedef std::vector mp_part_list_t; inline off_t total_mp_part_list(const mp_part_list_t& mplist) { off_t size = 0; - for(mp_part_list_t::const_iterator iter = mplist.begin(); iter != mplist.end(); ++iter){ + for(auto iter = mplist.cbegin(); iter != mplist.cend(); ++iter){ size += iter->size; } return size; diff --git a/test/truncate_read_file.cc b/test/truncate_read_file.cc index 0dd890e..015a1b7 100644 --- a/test/truncate_read_file.cc +++ b/test/truncate_read_file.cc @@ -40,7 +40,7 @@ int main(int argc, const char *argv[]) } const char* filepath = argv[1]; - off_t size = static_cast(strtoull(argv[2], nullptr, 10)); + auto size = static_cast(strtoull(argv[2], nullptr, 10)); int fd; // open file diff --git a/test/write_multiblock.cc b/test/write_multiblock.cc index 15c2169..70c3d78 100644 --- a/test/write_multiblock.cc +++ b/test/write_multiblock.cc @@ -129,7 +129,7 @@ static bool parse_write_blocks(const char* pstr, wbpart_list_t& wbparts, off_t& return false; } - for(strlist_t::const_iterator iter = partlist.begin(); iter != partlist.end(); ++iter){ + for(auto iter = partlist.cbegin(); iter != partlist.cend(); ++iter){ strlist_t partpair; if(parse_string(iter->c_str(), ':', partpair) && 2 == partpair.size()){ write_block_part tmp_part; @@ -206,7 +206,7 @@ int main(int argc, char** argv) // make data and buffer std::unique_ptr pData = create_random_data(max_size); - for(strlist_t::const_iterator fiter = files.begin(); fiter != files.end(); ++fiter){ + for(auto fiter = files.cbegin(); fiter != files.cend(); ++fiter){ // open/create file int fd; struct stat st; @@ -227,7 +227,7 @@ int main(int argc, char** argv) } // write blocks - for(wbpart_list_t::const_iterator piter = wbparts.begin(); piter != wbparts.end(); ++piter){ + for(auto piter = wbparts.cbegin(); piter != wbparts.cend(); ++piter){ // write one block for(ssize_t writepos = 0, writecnt = 0; writepos < piter->size; writepos += writecnt){ if(-1 == (writecnt = pwrite(fd, &(pData[writepos]), static_cast(piter->size - writepos), (piter->start + writepos)))){