Configure cppcheck #224

This commit is contained in:
Takeshi Nakatani 2015-08-12 15:04:16 +00:00
parent 2482aada43
commit 756d1e5e81
6 changed files with 41 additions and 32 deletions

View File

@ -28,3 +28,13 @@ dist-hook:
release : dist ../utils/release.sh
../utils/release.sh $(DIST_ARCHIVES)
cppcheck:
cppcheck --quiet --error-exitcode=1 \
-U CURLE_PEER_FAILED_VERIFICATION \
--enable=warning \
--enable=performance \
--enable=portability \
--enable=information \
--enable=missingInclude \
--suppress=missingIncludeSystem \
src/ test/

View File

@ -330,7 +330,7 @@ bool StatCache::AddNoObjectCache(string& key)
bool StatCache::TruncateCache(void)
{
if(0 == stat_cache.size()){
if(stat_cache.empty()){
return true;
}
@ -340,7 +340,7 @@ bool StatCache::TruncateCache(void)
stat_cache_t::iterator iter_to_delete = stat_cache.end();
stat_cache_t::iterator iter;
for(iter = stat_cache.begin(); iter != stat_cache.end(); iter++) {
for(iter = stat_cache.begin(); iter != stat_cache.end(); ++iter) {
if((*iter).second){
if(lowest_time > (*iter).second->cache_date){
lowest_time = (*iter).second->cache_date;

View File

@ -879,7 +879,7 @@ bool S3fsCurl::LoadEnvSseKeys(void)
//
bool S3fsCurl::GetSseKey(string& md5, string& ssekey)
{
for(sseckeylist_t::const_iterator iter = S3fsCurl::sseckeys.begin(); iter != S3fsCurl::sseckeys.end(); iter++){
for(sseckeylist_t::const_iterator iter = S3fsCurl::sseckeys.begin(); iter != S3fsCurl::sseckeys.end(); ++iter){
if(0 == md5.length() || md5 == (*iter).begin()->first){
md5 = iter->begin()->first;
ssekey = iter->begin()->second;
@ -898,7 +898,7 @@ bool S3fsCurl::GetSseKeyMd5(int pos, string& md5)
return false;
}
int cnt = 0;
for(sseckeylist_t::const_iterator iter = S3fsCurl::sseckeys.begin(); iter != S3fsCurl::sseckeys.end(); iter++, cnt++){
for(sseckeylist_t::const_iterator iter = S3fsCurl::sseckeys.begin(); iter != S3fsCurl::sseckeys.end(); ++iter, ++cnt){
if(pos == cnt){
md5 = iter->begin()->first;
return true;
@ -1756,7 +1756,7 @@ int S3fsCurl::RequestPerform(void)
FPRNNN("This check can be over-ridden by using the -o ssl_verify_hostname=0");
FPRNNN("The certificate will still be checked but the hostname will not be verified.");
FPRNNN("A more secure method would be to use a bucket name without periods.");
}else
}else{
DPRNNN("my_curl_easy_perform: curlCode: %d -- %s", curlCode, curl_easy_strerror(curlCode));
}
exit(EXIT_FAILURE);
@ -3565,7 +3565,7 @@ int S3fsMultiCurl::Request(void)
// Send multi request loop( with retry )
// (When many request is sends, sometimes gets "Couldn't connect to server")
//
while(0 < cMap_all.size()){
while(!cMap_all.empty()){
// populate the multi interface with an initial set of requests
if(NULL == (hMulti = curl_multi_init())){
Clear();

View File

@ -212,7 +212,7 @@ PageList::~PageList()
off_t PageList::Size(void) const
{
if(0 == pages.size()){
if(pages.empty()){
return 0;
}
fdpage_list_t::const_reverse_iterator riter = pages.rbegin();
@ -246,7 +246,7 @@ int PageList::Resize(off_t size, bool is_init)
}
}else if(total > size){
for(fdpage_list_t::reverse_iterator riter = pages.rbegin(); riter != pages.rend(); riter++){
for(fdpage_list_t::reverse_iterator riter = pages.rbegin(); riter != pages.rend(); ++riter){
if((*riter)->offset < size){
(*riter)->bytes = static_cast<size_t>(size - (*riter)->offset);
break;
@ -276,7 +276,7 @@ bool PageList::IsInit(off_t start, off_t size)
{
off_t next = start + size;
if(0 == pages.size()){
if(pages.empty()){
return false;
}
// check end
@ -285,7 +285,7 @@ bool PageList::IsInit(off_t start, off_t size)
// size is over end of page list.
return false;
}
for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); iter++){
for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ++iter){
if(next <= (*iter)->offset){
break;
}
@ -309,7 +309,7 @@ bool PageList::SetInit(off_t start, off_t size, bool is_init)
}
off_t next = start + size;
for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); iter++){
for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ++iter){
if((*iter)->end() < start){
// out of area
// iter:start < iter:end < start < end
@ -333,7 +333,7 @@ bool PageList::SetInit(off_t start, off_t size, bool is_init)
bool PageList::FindUninitPage(off_t start, off_t& resstart, size_t& ressize)
{
for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); iter++){
for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ++iter){
if(start <= (*iter)->end()){
if(!(*iter)->init){
resstart = (*iter)->offset;
@ -347,7 +347,7 @@ bool PageList::FindUninitPage(off_t start, off_t& resstart, size_t& ressize)
int PageList::GetUninitPages(fdpage_list_t& uninit_list, off_t start, off_t size)
{
for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); iter++){
for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ++iter){
if(start <= (*iter)->end()){
if((start + size) <= (*iter)->offset){
// reach to end
@ -382,7 +382,7 @@ bool PageList::Serialize(CacheFileStat& file, bool is_output)
stringstream ssall;
ssall << Size();
for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); iter++){
for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ++iter){
ssall << "\n" << (*iter)->offset << ":" << (*iter)->bytes << ":" << ((*iter)->init ? "1" : "0");
}
@ -481,7 +481,7 @@ void PageList::Dump(void)
int cnt = 0;
DPRNINFO("pages = {");
for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); iter++, cnt++){
for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ++iter, ++cnt){
DPRNINFO(" [%08d] -> {%014jd - %014zu : %s}", cnt, (intmax_t)((*iter)->offset), (*iter)->bytes, (*iter)->init ? "true" : "false");
}
DPRNINFO("}");
@ -796,7 +796,7 @@ int FdEntity::Load(off_t start, off_t size)
// check loaded area & load
fdpage_list_t uninit_list;
if(0 < pagelist.GetUninitPages(uninit_list, start, size)){
for(fdpage_list_t::iterator iter = uninit_list.begin(); iter != uninit_list.end(); iter++){
for(fdpage_list_t::iterator iter = uninit_list.begin(); iter != uninit_list.end(); ++iter){
if(-1 != size && (start + size) <= (*iter)->offset){
break;
}
@ -1136,7 +1136,7 @@ FdManager::FdManager()
FdManager::~FdManager()
{
if(this == FdManager::get()){
for(fdent_map_t::iterator iter = fent.begin(); fent.end() != iter; iter++){
for(fdent_map_t::iterator iter = fent.begin(); fent.end() != iter; ++iter){
FdEntity* ent = (*iter).second;
delete ent;
}
@ -1170,7 +1170,7 @@ FdEntity* FdManager::GetFdEntity(const char* path, int existfd)
}
if(-1 != existfd){
for(iter = fent.begin(); iter != fent.end(); iter++){
for(iter = fent.begin(); iter != fent.end(); ++iter){
if((*iter).second && (*iter).second->GetFd() == existfd){
// found opend fd in map
if(0 == strcmp((*iter).second->GetPath(), path)){
@ -1249,7 +1249,7 @@ FdEntity* FdManager::ExistOpen(const char* path, int existfd)
// search from all fdentity because of not using cache.
AutoLock auto_lock(&FdManager::fd_manager_lock);
for(fdent_map_t::iterator iter = fent.begin(); iter != fent.end(); iter++){
for(fdent_map_t::iterator iter = fent.begin(); iter != fent.end(); ++iter){
if((*iter).second && (*iter).second->GetFd() == existfd && (*iter).second->IsOpen()){
// found opend fd in map
if(0 == strcmp((*iter).second->GetPath(), path)){
@ -1288,7 +1288,7 @@ bool FdManager::Close(FdEntity* ent)
AutoLock auto_lock(&FdManager::fd_manager_lock);
for(fdent_map_t::iterator iter = fent.begin(); iter != fent.end(); iter++){
for(fdent_map_t::iterator iter = fent.begin(); iter != fent.end(); ++iter){
if((*iter).second == ent){
ent->Close();
if(!ent->IsOpen()){

View File

@ -224,7 +224,6 @@ static bool is_special_name_folder_object(const char* path)
string strpath = path;
headers_t header;
strpath = path;
if(string::npos == strpath.find("_$folder$", 0)){
if('/' == strpath[strpath.length() - 1]){
strpath = strpath.substr(0, strpath.length() - 1);
@ -610,7 +609,7 @@ static int check_parent_object_access(const char* path, int mask)
return 0;
}
if(X_OK == (mask & X_OK)){
for(parent = mydirname(path); 0 < parent.size(); parent = mydirname(parent.c_str())){
for(parent = mydirname(path); 0 < parent.size(); parent = mydirname(parent)){
if(parent == "."){
parent = "/";
}
@ -1281,7 +1280,7 @@ static int rename_directory(const char* from, const char* to)
S3ObjList::MakeHierarchizedList(headlist, false); // add hierarchized dir.
s3obj_list_t::const_iterator liter;
for(liter = headlist.begin(); headlist.end() != liter; liter++){
for(liter = headlist.begin(); headlist.end() != liter; ++liter){
// make "from" and "to" object name.
string from_name = basepath + (*liter);
string to_name = strto + (*liter);
@ -2236,7 +2235,7 @@ static int readdir_multi_head(const char* path, S3ObjList& head, void* buf, fuse
curlmulti.SetRetryCallback(multi_head_retry_callback);
// Loop
while(0 < headlist.size()){
while(!headlist.empty()){
s3obj_list_t::iterator iter;
long cnt;
@ -2282,7 +2281,7 @@ static int readdir_multi_head(const char* path, S3ObjList& head, void* buf, fuse
// populate fuse buffer
// here is best posision, because a case is cache size < files in directory
//
for(iter = fillerlist.begin(); fillerlist.end() != iter; iter++){
for(iter = fillerlist.begin(); fillerlist.end() != iter; ++iter){
struct stat st;
string bpath = mybasename((*iter));
if(StatCache::getStatCacheData()->GetStat((*iter), &st)){
@ -3344,11 +3343,11 @@ static void print_uncomp_mp_list(uncomp_mp_list_t& list)
printf("Lists the parts that have been uploaded for a specific multipart upload.\n");
printf("\n");
if(0 < list.size()){
if(!list.empty()){
printf("---------------------------------------------------------------\n");
int cnt = 0;
for(uncomp_mp_list_t::iterator iter = list.begin(); iter != list.end(); iter++, cnt++){
for(uncomp_mp_list_t::iterator iter = list.begin(); iter != list.end(); ++iter, ++cnt){
printf(" Path : %s\n", (*iter).key.c_str());
printf(" UploadId : %s\n", (*iter).id.c_str());
printf(" Date : %s\n", (*iter).date.c_str());
@ -3365,7 +3364,7 @@ static bool abort_uncomp_mp_list(uncomp_mp_list_t& list)
{
char buff[1024];
if(0 >= list.size()){
if(list.empty()){
return true;
}
memset(buff, 0, sizeof(buff));
@ -3386,7 +3385,7 @@ static bool abort_uncomp_mp_list(uncomp_mp_list_t& list)
// do removing their.
S3fsCurl s3fscurl;
bool result = true;
for(uncomp_mp_list_t::iterator iter = list.begin(); iter != list.end(); iter++){
for(uncomp_mp_list_t::iterator iter = list.begin(); iter != list.end(); ++iter){
const char* tpath = (*iter).key.c_str();
string upload_id = (*iter).id;

View File

@ -233,7 +233,7 @@ bool S3ObjList::GetLastName(std::string& lastname) const
{
bool result = false;
lastname = "";
for(s3obj_t::const_iterator iter = objects.begin(); iter != objects.end(); iter++){
for(s3obj_t::const_iterator iter = objects.begin(); iter != objects.end(); ++iter){
if((*iter).second.orgname.length()){
if(0 > strcmp(lastname.c_str(), (*iter).second.orgname.c_str())){
lastname = (*iter).second.orgname;
@ -253,7 +253,7 @@ bool S3ObjList::GetNameList(s3obj_list_t& list, bool OnlyNormalized, bool CutSla
{
s3obj_t::const_iterator iter;
for(iter = objects.begin(); objects.end() != iter; iter++){
for(iter = objects.begin(); objects.end() != iter; ++iter){
if(OnlyNormalized && 0 != (*iter).second.normalname.length()){
continue;
}
@ -275,7 +275,7 @@ bool S3ObjList::MakeHierarchizedList(s3obj_list_t& list, bool haveSlash)
s3obj_h_t::iterator hiter;
s3obj_list_t::const_iterator liter;
for(liter = list.begin(); list.end() != liter; liter++){
for(liter = list.begin(); list.end() != liter; ++liter){
string strtmp = (*liter);
if(1 < strtmp.length() && '/' == strtmp[strtmp.length() - 1]){
strtmp = strtmp.substr(0, strtmp.length() - 1);