mirror of
https://github.com/s3fs-fuse/s3fs-fuse.git
synced 2025-01-08 23:54:14 +00:00
Return more specific errno when available (#1520)
Previously s3fs threw away some function return values and returned EIO instead. This was due to not trusting the mix of -1 and errno return codes. Correct the obviously incorrect ones via visual inspection. Stronger typing may find more occurrences. Fixes #1519.
This commit is contained in:
parent
a6563211af
commit
4d833a4fb9
60
src/curl.cpp
60
src/curl.cpp
@ -1314,7 +1314,7 @@ int S3fsCurl::ParallelMultipartUploadRequest(const char* tpath, headers_t& meta,
|
||||
S3FS_PRN_ERR("Could not make curl object into multi curl(%s).", tpath);
|
||||
close(fd2);
|
||||
delete s3fscurl_para;
|
||||
return -1;
|
||||
return -EIO;
|
||||
}
|
||||
remaining_bytes -= chunk;
|
||||
}
|
||||
@ -1432,7 +1432,7 @@ int S3fsCurl::ParallelMixMultipartUploadRequest(const char* tpath, headers_t& me
|
||||
S3FS_PRN_ERR("Could not make curl object into multi curl(%s).", tpath);
|
||||
close(fd2);
|
||||
delete s3fscurl_para;
|
||||
return -1;
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1521,7 +1521,7 @@ int S3fsCurl::ParallelGetObjectRequest(const char* tpath, int fd, off_t start, o
|
||||
if(!curlmulti.SetS3fsCurlObject(s3fscurl_para)){
|
||||
S3FS_PRN_ERR("Could not make curl object into multi curl(%s).", tpath);
|
||||
delete s3fscurl_para;
|
||||
return -1;
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
|
||||
@ -2672,10 +2672,10 @@ int S3fsCurl::DeleteRequest(const char* tpath)
|
||||
S3FS_PRN_INFO3("[tpath=%s]", SAFESTRPTR(tpath));
|
||||
|
||||
if(!tpath){
|
||||
return -1;
|
||||
return -EINVAL;
|
||||
}
|
||||
if(!CreateCurlHandle()){
|
||||
return -1;
|
||||
return -EIO;
|
||||
}
|
||||
std::string resource;
|
||||
std::string turl;
|
||||
@ -3013,10 +3013,10 @@ int S3fsCurl::PutHeadRequest(const char* tpath, headers_t& meta, bool is_copy)
|
||||
S3FS_PRN_INFO3("[tpath=%s]", SAFESTRPTR(tpath));
|
||||
|
||||
if(!tpath){
|
||||
return -1;
|
||||
return -EINVAL;
|
||||
}
|
||||
if(!CreateCurlHandle()){
|
||||
return -1;
|
||||
return -EIO;
|
||||
}
|
||||
std::string resource;
|
||||
std::string turl;
|
||||
@ -3127,7 +3127,7 @@ int S3fsCurl::PutRequest(const char* tpath, headers_t& meta, int fd)
|
||||
S3FS_PRN_INFO3("[tpath=%s]", SAFESTRPTR(tpath));
|
||||
|
||||
if(!tpath){
|
||||
return -1;
|
||||
return -EINVAL;
|
||||
}
|
||||
if(-1 != fd){
|
||||
// duplicate fd
|
||||
@ -3149,7 +3149,7 @@ int S3fsCurl::PutRequest(const char* tpath, headers_t& meta, int fd)
|
||||
if(file){
|
||||
fclose(file);
|
||||
}
|
||||
return -1;
|
||||
return -EIO;
|
||||
}
|
||||
std::string resource;
|
||||
std::string turl;
|
||||
@ -3237,7 +3237,7 @@ int S3fsCurl::PreGetObjectRequest(const char* tpath, int fd, off_t start, off_t
|
||||
S3FS_PRN_INFO3("[tpath=%s][start=%lld][size=%lld]", SAFESTRPTR(tpath), static_cast<long long>(start), static_cast<long long>(size));
|
||||
|
||||
if(!tpath || -1 == fd || 0 > start || 0 > size){
|
||||
return -1;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
std::string resource;
|
||||
@ -3289,7 +3289,7 @@ int S3fsCurl::GetObjectRequest(const char* tpath, int fd, off_t start, off_t siz
|
||||
S3FS_PRN_INFO3("[tpath=%s][start=%lld][size=%lld]", SAFESTRPTR(tpath), static_cast<long long>(start), static_cast<long long>(size));
|
||||
|
||||
if(!tpath){
|
||||
return -1;
|
||||
return -EINVAL;
|
||||
}
|
||||
sse_type_t ssetype = sse_type_t::SSE_DISABLE;
|
||||
std::string ssevalue;
|
||||
@ -3302,7 +3302,7 @@ int S3fsCurl::GetObjectRequest(const char* tpath, int fd, off_t start, off_t siz
|
||||
}
|
||||
if(!fpLazySetup || !fpLazySetup(this)){
|
||||
S3FS_PRN_ERR("Failed to lazy setup in single get object request.");
|
||||
return -1;
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
S3FS_PRN_INFO3("downloading... [path=%s][fd=%d]", tpath, fd);
|
||||
@ -3318,7 +3318,7 @@ int S3fsCurl::CheckBucket()
|
||||
S3FS_PRN_INFO3("check a bucket.");
|
||||
|
||||
if(!CreateCurlHandle()){
|
||||
return -1;
|
||||
return -EIO;
|
||||
}
|
||||
std::string resource;
|
||||
std::string turl;
|
||||
@ -3351,10 +3351,10 @@ int S3fsCurl::ListBucketRequest(const char* tpath, const char* query)
|
||||
S3FS_PRN_INFO3("[tpath=%s]", SAFESTRPTR(tpath));
|
||||
|
||||
if(!tpath){
|
||||
return -1;
|
||||
return -EINVAL;
|
||||
}
|
||||
if(!CreateCurlHandle()){
|
||||
return -1;
|
||||
return -EIO;
|
||||
}
|
||||
std::string resource;
|
||||
std::string turl;
|
||||
@ -3400,10 +3400,10 @@ int S3fsCurl::PreMultipartPostRequest(const char* tpath, headers_t& meta, std::s
|
||||
S3FS_PRN_INFO3("[tpath=%s]", SAFESTRPTR(tpath));
|
||||
|
||||
if(!tpath){
|
||||
return -1;
|
||||
return -EINVAL;
|
||||
}
|
||||
if(!CreateCurlHandle()){
|
||||
return -1;
|
||||
return -EIO;
|
||||
}
|
||||
std::string resource;
|
||||
std::string turl;
|
||||
@ -3488,7 +3488,7 @@ int S3fsCurl::PreMultipartPostRequest(const char* tpath, headers_t& meta, std::s
|
||||
|
||||
if(!simple_parse_xml(bodydata.str(), bodydata.size(), "UploadId", upload_id)){
|
||||
bodydata.Clear();
|
||||
return -1;
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
bodydata.Clear();
|
||||
@ -3500,7 +3500,7 @@ int S3fsCurl::CompleteMultipartPostRequest(const char* tpath, const std::string&
|
||||
S3FS_PRN_INFO3("[tpath=%s][parts=%zu]", SAFESTRPTR(tpath), parts.size());
|
||||
|
||||
if(!tpath){
|
||||
return -1;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
// make contents
|
||||
@ -3510,7 +3510,7 @@ int S3fsCurl::CompleteMultipartPostRequest(const char* tpath, const std::string&
|
||||
for(etaglist_t::iterator it = parts.begin(); it != parts.end(); ++it, ++cnt){
|
||||
if(it->empty()){
|
||||
S3FS_PRN_ERR("%d file part is not finished uploading.", cnt + 1);
|
||||
return -1;
|
||||
return -EIO;
|
||||
}
|
||||
postContent += "<Part>\n";
|
||||
postContent += " <PartNumber>" + str(cnt + 1) + "</PartNumber>\n";
|
||||
@ -3526,7 +3526,7 @@ int S3fsCurl::CompleteMultipartPostRequest(const char* tpath, const std::string&
|
||||
b_postdata_remaining = postdata_remaining;
|
||||
|
||||
if(!CreateCurlHandle()){
|
||||
return -1;
|
||||
return -EIO;
|
||||
}
|
||||
std::string resource;
|
||||
std::string turl;
|
||||
@ -3573,7 +3573,7 @@ int S3fsCurl::MultipartListRequest(std::string& body)
|
||||
S3FS_PRN_INFO3("list request(multipart)");
|
||||
|
||||
if(!CreateCurlHandle()){
|
||||
return -1;
|
||||
return -EIO;
|
||||
}
|
||||
std::string resource;
|
||||
std::string turl;
|
||||
@ -3614,10 +3614,10 @@ int S3fsCurl::AbortMultipartUpload(const char* tpath, const std::string& upload_
|
||||
S3FS_PRN_INFO3("[tpath=%s]", SAFESTRPTR(tpath));
|
||||
|
||||
if(!tpath){
|
||||
return -1;
|
||||
return -EINVAL;
|
||||
}
|
||||
if(!CreateCurlHandle()){
|
||||
return -1;
|
||||
return -EIO;
|
||||
}
|
||||
std::string resource;
|
||||
std::string turl;
|
||||
@ -3659,7 +3659,7 @@ int S3fsCurl::UploadMultipartPostSetup(const char* tpath, int part_num, const st
|
||||
S3FS_PRN_INFO3("[tpath=%s][start=%lld][size=%lld][part=%d]", SAFESTRPTR(tpath), static_cast<long long int>(partdata.startpos), static_cast<long long int>(partdata.size), part_num);
|
||||
|
||||
if(-1 == partdata.fd || -1 == partdata.startpos || -1 == partdata.size){
|
||||
return -1;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
requestHeaders = NULL;
|
||||
@ -3669,7 +3669,7 @@ int S3fsCurl::UploadMultipartPostSetup(const char* tpath, int part_num, const st
|
||||
unsigned char *md5raw = s3fs_md5_fd(partdata.fd, partdata.startpos, partdata.size);
|
||||
if(md5raw == NULL){
|
||||
S3FS_PRN_ERR("Could not make md5 for file(part %d)", part_num);
|
||||
return -1;
|
||||
return -EIO;
|
||||
}
|
||||
partdata.etag = s3fs_hex(md5raw, get_md5_digest_length());
|
||||
char* md5base64p = s3fs_base64(md5raw, get_md5_digest_length());
|
||||
@ -3724,7 +3724,7 @@ int S3fsCurl::UploadMultipartPostRequest(const char* tpath, int part_num, const
|
||||
|
||||
if(!fpLazySetup || !fpLazySetup(this)){
|
||||
S3FS_PRN_ERR("Failed to lazy setup in multipart upload post request.");
|
||||
return -1;
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
// request
|
||||
@ -3745,7 +3745,7 @@ int S3fsCurl::CopyMultipartPostSetup(const char* from, const char* to, int part_
|
||||
S3FS_PRN_INFO3("[from=%s][to=%s][part=%d]", SAFESTRPTR(from), SAFESTRPTR(to), part_num);
|
||||
|
||||
if(!from || !to){
|
||||
return -1;
|
||||
return -EINVAL;
|
||||
}
|
||||
query_string = "partNumber=" + str(part_num) + "&uploadId=" + upload_id;
|
||||
std::string urlargs = "?" + query_string;
|
||||
@ -3892,7 +3892,7 @@ int S3fsCurl::MultipartHeadRequest(const char* tpath, off_t size, headers_t& met
|
||||
if(!curlmulti.SetS3fsCurlObject(s3fscurl_para)){
|
||||
S3FS_PRN_ERR("Could not make curl object into multi curl(%s).", tpath);
|
||||
delete s3fscurl_para;
|
||||
return -1;
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
|
||||
@ -4062,7 +4062,7 @@ int S3fsCurl::MultipartRenameRequest(const char* from, const char* to, headers_t
|
||||
if(!curlmulti.SetS3fsCurlObject(s3fscurl_para)){
|
||||
S3FS_PRN_ERR("Could not make curl object into multi curl(%s).", to);
|
||||
delete s3fscurl_para;
|
||||
return -1;
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
|
||||
|
59
src/s3fs.cpp
59
src/s3fs.cpp
@ -1004,7 +1004,7 @@ static int create_directory_object(const char* path, mode_t mode, time_t atime,
|
||||
S3FS_PRN_INFO1("[path=%s][mode=%04o][atime=%lld][mtime=%lld][ctime=%lld][uid=%u][gid=%u]", path, mode, static_cast<long long>(atime), static_cast<long long>(ctime), static_cast<long long>(mtime), (unsigned int)uid, (unsigned int)gid);
|
||||
|
||||
if(!path || '\0' == path[0]){
|
||||
return -1;
|
||||
return -EINVAL;
|
||||
}
|
||||
std::string tpath = path;
|
||||
if('/' != tpath[tpath.length() - 1]){
|
||||
@ -1456,7 +1456,7 @@ static int rename_directory(const char* from, const char* to)
|
||||
if(0 != (result = clone_directory_object(mn_cur->old_path, mn_cur->new_path, (strfrom == mn_cur->old_path)))){
|
||||
S3FS_PRN_ERR("clone_directory_object returned an error(%d)", result);
|
||||
free_mvnodes(mn_head);
|
||||
return -EIO;
|
||||
return result;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1473,7 +1473,7 @@ static int rename_directory(const char* from, const char* to)
|
||||
if(0 != result){
|
||||
S3FS_PRN_ERR("rename_object returned an error(%d)", result);
|
||||
free_mvnodes(mn_head);
|
||||
return -EIO;
|
||||
return result;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1485,7 +1485,7 @@ static int rename_directory(const char* from, const char* to)
|
||||
if(0 != (result = s3fs_rmdir(mn_cur->old_path))){
|
||||
S3FS_PRN_ERR("s3fs_rmdir returned an error(%d)", result);
|
||||
free_mvnodes(mn_head);
|
||||
return -EIO;
|
||||
return result;
|
||||
}
|
||||
}else{
|
||||
// cache clear.
|
||||
@ -1631,16 +1631,16 @@ static int s3fs_chmod(const char* _path, mode_t mode)
|
||||
}else{
|
||||
// allow to put header
|
||||
// updatemeta already merged the orgmeta of the opened files.
|
||||
if(0 != put_headers(strpath.c_str(), updatemeta, true)){
|
||||
return -EIO;
|
||||
if(0 != (result = put_headers(strpath.c_str(), updatemeta, true))){
|
||||
return result;
|
||||
}
|
||||
StatCache::getStatCacheData()->DelStat(nowcache);
|
||||
}
|
||||
}else{
|
||||
// not opened file, then put headers
|
||||
merge_headers(meta, updatemeta, true);
|
||||
if(0 != put_headers(strpath.c_str(), meta, true)){
|
||||
return -EIO;
|
||||
if(0 != (result = put_headers(strpath.c_str(), meta, true))){
|
||||
return result;
|
||||
}
|
||||
StatCache::getStatCacheData()->DelStat(nowcache);
|
||||
}
|
||||
@ -1807,16 +1807,16 @@ static int s3fs_chown(const char* _path, uid_t uid, gid_t gid)
|
||||
}else{
|
||||
// allow to put header
|
||||
// updatemeta already merged the orgmeta of the opened files.
|
||||
if(0 != put_headers(strpath.c_str(), updatemeta, true)){
|
||||
return -EIO;
|
||||
if(0 != (result = put_headers(strpath.c_str(), updatemeta, true))){
|
||||
return result;
|
||||
}
|
||||
StatCache::getStatCacheData()->DelStat(nowcache);
|
||||
}
|
||||
}else{
|
||||
// not opened file, then put headers
|
||||
merge_headers(meta, updatemeta, true);
|
||||
if(0 != put_headers(strpath.c_str(), meta, true)){
|
||||
return -EIO;
|
||||
if(0 != (result = put_headers(strpath.c_str(), meta, true))){
|
||||
return result;
|
||||
}
|
||||
StatCache::getStatCacheData()->DelStat(nowcache);
|
||||
}
|
||||
@ -1987,8 +1987,8 @@ static int s3fs_utimens(const char* _path, const struct timespec ts[2])
|
||||
}else{
|
||||
// allow to put header
|
||||
// updatemeta already merged the orgmeta of the opened files.
|
||||
if(0 != put_headers(strpath.c_str(), updatemeta, true)){
|
||||
return -EIO;
|
||||
if(0 != (result = put_headers(strpath.c_str(), updatemeta, true))){
|
||||
return result;
|
||||
}
|
||||
StatCache::getStatCacheData()->DelStat(nowcache);
|
||||
|
||||
@ -2002,8 +2002,8 @@ static int s3fs_utimens(const char* _path, const struct timespec ts[2])
|
||||
}else{
|
||||
// not opened file, then put headers
|
||||
merge_headers(meta, updatemeta, true);
|
||||
if(0 != put_headers(strpath.c_str(), meta, true)){
|
||||
return -EIO;
|
||||
if(0 != (result = put_headers(strpath.c_str(), meta, true))){
|
||||
return result;
|
||||
}
|
||||
StatCache::getStatCacheData()->DelStat(nowcache);
|
||||
}
|
||||
@ -2665,12 +2665,12 @@ static int list_bucket(const char* path, S3ObjList& head, const char* delimiter,
|
||||
// xmlDocPtr
|
||||
if(NULL == (doc = xmlReadMemory(body->str(), static_cast<int>(body->size()), "", NULL, 0))){
|
||||
S3FS_PRN_ERR("xmlReadMemory returns with error.");
|
||||
return -1;
|
||||
return -EIO;
|
||||
}
|
||||
if(0 != append_objects_from_xml(path, doc, head)){
|
||||
S3FS_PRN_ERR("append_objects_from_xml returns with error.");
|
||||
xmlFreeDoc(doc);
|
||||
return -1;
|
||||
return -EIO;
|
||||
}
|
||||
if(true == (truncated = is_truncated(doc))){
|
||||
xmlChar* tmpch = get_next_marker(doc);
|
||||
@ -2711,15 +2711,16 @@ static int list_bucket(const char* path, S3ObjList& head, const char* delimiter,
|
||||
static int remote_mountpath_exists(const char* path)
|
||||
{
|
||||
struct stat stbuf;
|
||||
int result;
|
||||
|
||||
S3FS_PRN_INFO1("[path=%s]", path);
|
||||
|
||||
// getattr will prefix the path with the remote mountpoint
|
||||
if(0 != get_object_attribute("/", &stbuf, NULL)){
|
||||
return -1;
|
||||
if(0 != (result = get_object_attribute("/", &stbuf, NULL))){
|
||||
return result;
|
||||
}
|
||||
if(!S_ISDIR(stbuf.st_mode)){
|
||||
return -1;
|
||||
return -ENOTDIR;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -2983,8 +2984,8 @@ static int s3fs_setxattr(const char* path, const char* name, const char* value,
|
||||
}else{
|
||||
// allow to put header
|
||||
// updatemeta already merged the orgmeta of the opened files.
|
||||
if(0 != put_headers(strpath.c_str(), updatemeta, true)){
|
||||
return -EIO;
|
||||
if(0 != (result = put_headers(strpath.c_str(), updatemeta, true))){
|
||||
return result;
|
||||
}
|
||||
StatCache::getStatCacheData()->DelStat(nowcache);
|
||||
}
|
||||
@ -2997,8 +2998,8 @@ static int s3fs_setxattr(const char* path, const char* name, const char* value,
|
||||
return result;
|
||||
}
|
||||
|
||||
if(0 != put_headers(strpath.c_str(), meta, true)){
|
||||
return -EIO;
|
||||
if(0 != (result = put_headers(strpath.c_str(), meta, true))){
|
||||
return result;
|
||||
}
|
||||
StatCache::getStatCacheData()->DelStat(nowcache);
|
||||
}
|
||||
@ -3261,8 +3262,8 @@ static int s3fs_removexattr(const char* path, const char* name)
|
||||
if(updatemeta["x-amz-meta-xattr"].empty()){
|
||||
updatemeta.erase("x-amz-meta-xattr");
|
||||
}
|
||||
if(0 != put_headers(strpath.c_str(), updatemeta, true)){
|
||||
return -EIO;
|
||||
if(0 != (result = put_headers(strpath.c_str(), updatemeta, true))){
|
||||
return result;
|
||||
}
|
||||
StatCache::getStatCacheData()->DelStat(nowcache);
|
||||
}
|
||||
@ -3272,8 +3273,8 @@ static int s3fs_removexattr(const char* path, const char* name)
|
||||
updatemeta.erase("x-amz-meta-xattr");
|
||||
}
|
||||
merge_headers(meta, updatemeta, true);
|
||||
if(0 != put_headers(strpath.c_str(), meta, true)){
|
||||
return -EIO;
|
||||
if(0 != (result = put_headers(strpath.c_str(), meta, true))){
|
||||
return result;
|
||||
}
|
||||
StatCache::getStatCacheData()->DelStat(nowcache);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user