Compare commits

...

172 Commits

Author SHA1 Message Date
Andrew Gaul 5c1932f702
Upgrade CI to Ubuntu 24.04 LTS (#2456) 2024-05-12 11:12:25 +09:00
LiuBingrun ccdcccd44c
Fix DeadLock in FdManager::ChangeEntityToTempPath (#2455)
commit e3b50ad introduce smart pointer to manage FdEntity

But in ChangeEntityToTempPath, we should not destroy the entity.

We should move the entry to the temp ky

Signed-off-by: liubingrun <liubr1@chinatelecom.cn>
2024-05-11 11:29:40 +09:00
Andrew Gaul 3864f58c22
Upgrade CI to Fedora 40 (#2451) 2024-05-11 09:25:05 +09:00
Takeshi Nakatani c36827d1de
Fixed README.md for Github Action Badge URL (#2449) 2024-04-28 20:10:53 +09:00
Takeshi Nakatani e2cc36a37f
Updated COMPILATION.md about compilation on linux (#2445) 2024-04-28 14:31:01 +09:00
Takeshi Nakatani cf6102f91b
Changed due to s3fs-fuse logo change (#2448) 2024-04-28 14:28:24 +09:00
Sébastien Brochet dd6815b90f retry request on HTTP 429 error 2024-04-14 12:09:26 +09:00
Takeshi Nakatani 95026804e9 Support SSL client cert and added ssl_client_cert option 2024-04-14 10:21:48 +09:00
Takeshi Nakatani 9ab5a2ea73 Fixed configure error for GHA:sanitize_thread 2024-03-19 21:37:19 +09:00
Takeshi Nakatani a5cdd05c25 Added ipresolve option 2024-03-13 22:29:17 +09:00
Andrew Gaul 31676f6201
Convert thpoolman_param to value (#2430)
This simplifies memory management.
2024-03-13 21:27:12 +09:00
Andrew Gaul c97f7a2a13
Address clang-tidy 18 warnings (#2428) 2024-03-07 01:04:22 +09:00
Andrew Gaul be54c34ecb
Remove unneeded XML macros (#2427) 2024-03-07 00:45:34 +09:00
Andrew Gaul 79597c7960
Upgrade CI to Alpine 3.19 (#2429) 2024-03-07 00:23:00 +09:00
Andrew Gaul 70a30d6e26 Update ChangeLog and configure.ac for 1.94
Fixes #2420.
2024-02-25 13:08:43 +09:00
Takeshi Nakatani b97fd470a5 Abort for SSE-KMS encryption type and not SSL/TLS specified 2024-02-23 13:11:56 +09:00
Andrew Gaul 4d7fd60305
Call abort instead of exit in tests (#2416)
This can give useful core dumps.
2024-02-23 12:28:29 +09:00
Takeshi Nakatani da38dc73ad Gentoo + libxml2-2.12 requires inclusion of parser.h 2024-02-20 08:28:42 +09:00
Takeshi Nakatani e89adf6633 Fixed a bug that mounting with ksmid specified to fail 2024-02-18 21:18:50 +09:00
Takeshi Nakatani fa2bcfc60d Fixed a bug in multi head request parameter 2024-02-12 17:37:03 +09:00
Takeshi Nakatani ed1d431a1f Improved to output error details when bucket check fails 2024-02-12 17:36:47 +09:00
Takeshi Nakatani 67442cf054 Changed the level of messages by the get_base_exp function 2024-02-12 17:35:45 +09:00
Takeshi Nakatani a7186b6072 Updated actions/checkout from v3 to v4 2024-02-07 21:29:42 +09:00
Takeshi Nakatani 517574c40c Fixed a bug in fdatasync(fsync) 2024-02-06 14:11:37 +09:00
Jason Carpenter 5e6f21a9ff
fix: ListBucket edge cases (#2399) 2024-02-03 13:24:40 +09:00
Takeshi Nakatani 54aa278df0
Fixed errors reported by cppcheck 2.13.0 (#2400) 2024-01-25 00:46:45 +09:00
Takeshi Nakatani 2f9fb74a42
Corrected list_bucket to search in stat cache during creating new file (#2376) 2024-01-24 22:10:14 +09:00
Andrew Gaul b82632547c
Replace miscellaneous pointers with unique_ptr (#2388) 2023-12-23 13:06:41 +09:00
Andrew Gaul e3b50ad3e1
Convert FdEntity to std::unique_ptr (#2383) 2023-12-07 23:56:35 +09:00
Andrew Gaul b139507ae6
Simplify locking with C++11 atomics (#2382) 2023-11-27 01:12:49 +09:00
Andrew Gaul feb0845103
Use JDK 21 for Ubuntu 23.10 (#2380) 2023-11-27 00:55:35 +09:00
Andrew Gaul f041812939
Revert "Call C++11 get_time and put_time (#2375)" (#2381)
This reverts commit 10a72bfd0f.  These
commit is incompatible with older CentOS 7 libstdc++.
2023-11-27 00:51:17 +09:00
Andrew Gaul 2b57e74330
Use std::unique_ptr in threadpoolman (#2374) 2023-11-26 01:49:17 +09:00
Andrew Gaul b671fa7a9c
Pass std::unique_ptr by value (#2373)
This ensures that the parameter is moved.
2023-11-26 01:48:47 +09:00
Andrew Gaul 691669749e
Remove obsolete C++11 #ifdef (#2377) 2023-11-21 00:37:42 +09:00
Andrew Gaul 10a72bfd0f
Call C++11 get_time and put_time (#2375)
This removes workarounds and fixed-length buffers.
2023-11-20 18:45:27 +09:00
Andrew Gaul 43f81b76af
Enable clang-tidy CERT warnings (#2371) 2023-11-19 10:00:42 +09:00
Andrew Gaul 68bbfee8ea
Address clang-tidy modernize-deprecated-headers (#2370) 2023-11-19 10:00:16 +09:00
Takeshi Nakatani ec8caf64b8 Reverted the macos CI process(using macos-fuse-t) 2023-11-17 21:08:34 +09:00
Eryu Guan bcacca6599 s3fs: make dir size not zero
Directory has size 0, which looks weired and may confuse users. So fake
dir size as 4k.

Signed-off-by: Eryu Guan <eguan@linux.alibaba.com>
2023-11-17 17:55:57 +09:00
Takeshi Nakatani 4fdd2456d4 Fixed a script of not functioning environment in Github Actions 2023-11-15 23:17:26 +09:00
Takeshi Nakatani 3f6e8a8707 Fixed indent in integration-test-main.sh 2023-11-15 23:15:40 +09:00
Andrew Gaul 4845831f93
Convert some const to constexpr (#2342)
This guarantees that the function or value will resolve at compile-time.
2023-11-14 22:15:17 +09:00
Andrew Gaul 919575f312
Upgrade CI to Fedora 39 (#2365) 2023-11-12 16:01:09 +09:00
Andrew Gaul 0cd73e406d
Address clang-tidy 17 warnings (#2362) 2023-11-12 11:48:08 +09:00
Andrew Gaul 807ec1f6f7
Upgrade CI to Alpine 3.18 (#2332)
References #2328.
2023-11-12 11:08:44 +09:00
Takeshi Nakatani e2ac9b45e8 Re-improved updating of temporary stat cache when new file 2023-11-11 07:45:32 +09:00
Takeshi Nakatani b15ed13807 Force disk free space recovery in test(for only macos) 2023-11-10 15:51:22 +09:00
Takeshi Nakatani f9d3941d9d Fixed a bug in the re-upload part of Streamupload 2023-11-10 10:41:26 +09:00
Takeshi Nakatani 34c379babb Improved updating of temporary stat cache while creating a file 2023-11-10 10:23:15 +09:00
Takeshi Nakatani 7b5111c955 Suppress some message levels on macos 2023-11-09 14:16:43 +09:00
Andrew Gaul a3964b3fcd
Upgrade CI to Ubuntu 23.10 (#2355) 2023-10-22 22:18:29 +09:00
AdamQQQ 3856637cd2
s3fs: add option free_space_ratio to control cache size (#2351)
* Try to cleanup cache directory when initing without enough disk space

Also optimize log messages to print detailed errors to the user.

Signed-off-by: Qinqi Qu <quqinqi@linux.alibaba.com>

* s3fs: add option free_space_ratio to control cache size

Since the ensure_diskfree option is not convenient enough, we have added
a new option "-o free_space_ratio" to control the space used by the s3fs
cache based on the current disk size.

The value of this option can be between 0 and 100. It will control the
size of the cache according to this ratio to ensure that the idle ratio
of the disk is greater than this value.

For example, when the value is 10 and the disk space is 50GB, it will
ensure that the disk will reserve at least 50GB * 10% = 5GB of remaining
space.

Signed-off-by: Qinqi Qu <quqinqi@linux.alibaba.com>

---------

Signed-off-by: Qinqi Qu <quqinqi@linux.alibaba.com>
2023-10-20 18:11:47 +09:00
Jan Stastny 2871975d1e Set SSE headers when checking bucket 2023-10-18 21:45:47 +09:00
Andrew Gaul d5dd17644d
Add a helper script to compile all targets (#2337)
This is useful to compile different SSL libraries and 32-bit targets.
2023-10-15 11:54:52 +09:00
Takeshi Nakatani 5e5b4f0a49 Fixed ETag parsing at completing the Multipart upload part 2023-10-13 11:13:52 +09:00
Andrew Gaul e5b15bed7d
Pass by value to trim functions (#2345)
These already force a copy so passing by value has the same
performance but is simpler.  But this allows the compiler to perform
copy elision on temporaries and the caller to explicitly std::move in
others.
2023-10-12 22:21:33 +09:00
Takeshi Nakatani 2e4a6928c3 Changed argument name in ParallelMultipartUploadAll 2023-10-09 13:17:25 +09:00
Qinqi Qu 1aa77f6cda s3fs_cred: print detailed error message when stat file fails
Signed-off-by: Qinqi Qu <quqinqi@linux.alibaba.com>
2023-10-09 13:11:47 +09:00
Takeshi Nakatani d0c4b5c763 Fixed a bug in exclusive control of pagelist in FdEntity class 2023-09-26 21:26:17 +09:00
Andrew Gaul 361e10d09c
Add scope_guard for ad-hoc resource management (#2313)
References #2261.  Suggested by:
https://stackoverflow.com/questions/10270328/the-simplest-and-neatest-c11-scopeguard
2023-09-26 07:52:55 +09:00
Andrew Gaul 95cfbe30ed
Add error checking to test_concurrent_writes (#2299)
This reveals a situation where s3fs triggers an unexpected
EntityTooSmall error.
2023-09-26 07:32:02 +09:00
Andrew Gaul 87b8bafaea
Address unknown pragma warning with GCC (#2324) 2023-09-26 01:16:14 +09:00
Andrew Gaul 1a703e623a
Remove volatile qualifiers deprecated in C++23 (#2323)
These are protected by upload_list_lock.  Addresses warnings of the
form:

warning: ‘++’ expression of ‘volatile’-qualified type is deprecated
2023-09-26 00:15:05 +09:00
Andrew Gaul 1ebefca029
Reorder $CXXFLAGS to the end (#2322)
This allows overriding flags like -std=c++11.
2023-09-26 00:05:54 +09:00
Andrew Gaul ffff26e165
Add stat helper for user and group (#2320) 2023-09-26 00:04:24 +09:00
Andrew Gaul 61df7bf42c
Use std::unique_ptr for fclose (#2318)
References #2261.
2023-09-25 23:55:11 +09:00
Andrew Gaul c5fb42ff10
Use std::unique_ptr in libxml functions (#2317)
References #2261.
2023-09-25 23:46:52 +09:00
Takeshi Nakatani cbc33cd7ae Fixed a bug upload boundary calculation in StreamUpload 2023-09-25 09:28:37 +09:00
Takeshi Nakatani 645c10a3c3 Fixed test_not_existed_dir_obj test condition 2023-09-25 08:16:32 +09:00
Takeshi Nakatani 54293a66b3 Simplify the determination of the --cached option of the stat 2023-09-25 08:08:58 +09:00
Takeshi Nakatani 01b3caa38c Fixed errors of cppcheck 2.12.0 2023-09-24 19:55:02 +09:00
Andrew Gaul 64642e1d1b
Do not cache stat attributes (#2319)
This is a workaround for CI failures.
2023-09-24 18:32:07 +09:00
Iain Samuel McLean Elder 546cdd0d91 Improve docs on environment variables 2023-09-21 12:40:46 +09:00
Andrew Gaul a83f4a48d0
Add extra logging to debug test (#2316) 2023-09-15 21:50:01 +09:00
Andrew Gaul 99d3e68d59
Revert ls change (#2315)
echo does not split the words on newlines.
2023-09-15 21:48:04 +09:00
Andrew Gaul 01189e99fc
Store mvnode in vector instead of manual linked list (#2312)
This simplifies code and avoids manual memory management.  References #2261.
2023-09-13 22:32:15 +09:00
Andrew Gaul f493cb5846
Remove unnecessary uses of ls (#2311)
Other call sites need the call to readdir/getdents64.
2023-09-13 22:27:12 +09:00
Andrew Gaul e9814b4a4d Add Debian bookworm to CI 2023-09-11 00:59:23 +09:00
Andrew Gaul 3b12aaf2ab
Do not escape percent (#2310)
This addresses warnings of the form:

grep: warning: stray \ before %
2023-09-10 12:51:36 +09:00
Andrew Gaul 7e20278489
Address some Shellcheck SC2012 warnings (#2306) 2023-09-10 12:50:18 +09:00
Andrew Gaul 3d73d5a687
Delete unneeded constructors and assignment operators (#2309) 2023-09-06 23:52:10 +09:00
Andrew Gaul fa3a472c6b
Remove several calls to free (#2308) 2023-09-06 23:50:33 +09:00
Andrew Gaul 5f38301861
Emit unexpected file names in failed test_list (#2307) 2023-09-06 23:47:12 +09:00
Andrew Gaul 4d5632912a
Initialize variable before use (#2302)
clang-analyzer found a path where this could be used without
initialization.
2023-09-06 23:32:49 +09:00
Takeshi Nakatani a74034a012 Fixed a bug with setting the statvfs value 2023-09-05 09:03:11 -07:00
Andrew Gaul 3f64c72c24
Explicitly grep for ps args (#2301) 2023-09-03 22:03:45 +09:00
AdamQQQ 68c45ce791
s3fs: print unmounting hint when the mount point is stale (#2295)
When the error code returned by the stat information of the mount point
is ENOTCONN, print unmount command hint for user to fix.

Signed-off-by: Qinqi Qu <quqinqi@linux.alibaba.com>
2023-09-03 10:50:09 +09:00
Andrew Gaul e8cb6d6d34
Abort after failed MPU (#2298)
This reclaims storage after a failed MPU which caused OutOfMemory
issues in #2291.
2023-08-29 23:29:16 +09:00
Andrew Gaul a2f2f72aaf
Enable Valgrind in CI (#2297)
Using HTTP instead of HTTPS and
82107f4b6c improve test run-time so that
this is now feasible.
2023-08-29 23:11:26 +09:00
Andrew Gaul 7bb9609827
Return errors from AutoFdEntity::Open (#2296)
Found via pjdfstest which creates a PATH_MAX path that should return
NAMETOOLONG.
2023-08-29 22:57:30 +09:00
Andrew Gaul 82107f4b6c
Skip is_uid_include_group when GID available (#2292)
This can avoid an expensive computation which is 20% of test runtime.
2023-08-27 15:24:33 +09:00
Eryu Guan ee49ca4abf
s3fs: print fuse context in s3fs fuse operations (#2274)
Print fuse context like pid in fuse operations, so we know which process
is triggering this operation.

Signed-off-by: Eryu Guan <eguan@linux.alibaba.com>
2023-08-27 14:00:57 +09:00
Andrew Gaul 48548f0896
Remove unused functions (#2289) 2023-08-22 23:17:43 +09:00
Andrew Gaul 981e97ee76
Use default move constructor (#2288)
This is identical to the explicit one.
2023-08-22 23:14:09 +09:00
Andrew Gaul a568aa70fd
Replace uses of emplace with operator=(&&) (#2287)
emplace does not overwrite the value if the key already exists.  C++17 has
insert_or_assign but C++11 only has operator= which invokes the default
constructor.  Follows on to 6781ef5bd1.
2023-08-22 23:12:12 +09:00
Takeshi Nakatani 218adcb29b
Fixed errors in cache.cpp from cppcheck 2.11.1 (#2286) 2023-08-20 19:00:20 +09:00
Andrew Gaul 6c55bcfdd8
Own values in add_header (#2285)
Also fix up indentation.
2023-08-20 18:59:18 +09:00
Andrew Gaul 8d04ee3e01
Own values in stat_cache and symlink_cache (#2284)
This removes an unnecessary use of unique_ptr.
2023-08-20 12:10:47 +09:00
Takeshi Nakatani 6781ef5bd1 Reverted to direct array access instead of std::map emplace 2023-08-20 09:44:38 +09:00
Andrew Gaul 7e94b64ae7
Use unique_ptr in SSL functions (#2282)
References #2261.
2023-08-19 23:29:00 +09:00
Andrew Gaul 64a142723a
Document environment variable configuration (#2281) 2023-08-19 23:23:05 +09:00
Andrew Gaul 50f6c38c84
Replace xattr_value with std::string (#2280) 2023-08-19 11:12:43 +09:00
Andrew Gaul 9fb4c32c6a
Test filenames longer than POSIX maximum (#2277) 2023-08-18 08:58:44 +09:00
Takeshi Nakatani 280ed5d706
Additional fix for #2276(Convert BodyData to std::string) (#2278) 2023-08-18 00:35:50 +09:00
Andrew Gaul 2518ff3568
Convert BodyData to std::string (#2276)
This is simpler and avoids some copies.
2023-08-17 22:49:41 +09:00
Andrew Gaul c65ce8a42c
Add clang-tidy to CI (#2270) 2023-08-17 22:42:11 +09:00
Andrew Gaul e5986d0034
Run all tests with sanitizers (#2275) 2023-08-17 22:27:06 +09:00
Andrew Gaul b2bb12fd2c
Remove unneeded explicit std::string constructors (#2273)
std::string(const char*) implicitly constructs these.  The remaining call sites
requires string literals from C++14.
2023-08-17 22:12:28 +09:00
Andrew Gaul 7f30353fb9
Return std::unique_ptr from S3fsCurl callbacks (#2272)
References #2261.
2023-08-17 22:08:56 +09:00
Takeshi Nakatani 235bccced5 Added make check for src directory for Linux OS 2023-08-16 08:07:02 +09:00
Andrew Gaul 67e6b9e495
Simplify xattr_value with owned values (#2262)
References #2261.
2023-08-15 22:54:46 +09:00
Andrew Gaul ea42911530
Build s3fs in parallel like in CI (#2267) 2023-08-15 22:37:39 +09:00
Andrew Gaul 6823c5a7ec
Enable clang-tidy cppcoreguidelines (#2269) 2023-08-15 22:12:33 +09:00
Andrew Gaul d1272d296a
Tighten up CLI argument handling (#2268)
This ensures that each option is only handled once.
2023-08-15 21:45:38 +09:00
Andrew Gaul d2a571a868
Set exit code for Valgrind (#2265)
Otherwise errors can be ignored for successful tests with memory
errors.
2023-08-15 21:33:34 +09:00
Andrew Gaul d120e54284
Improve illegal bucket name error message (#2263)
This may help users debug situations like:

https://stackoverflow.com/questions/76359564/why-does-mounting-s3fs-bucket-on-centos-7-using-fstab-fail-but-mount-a-works
2023-08-15 21:31:05 +09:00
Andrew Gaul 3a6af38582
Tighten up Content-Type checking (#2258) 2023-08-15 21:23:59 +09:00
Andrew Gaul e157d811cb
Use std::string::compare and operator== where possible (#2256) 2023-08-15 21:22:36 +09:00
Andrew Gaul 56a4e67009
Replace more raw pointers with std::unique_ptr (#2255) 2023-08-14 00:03:10 +09:00
Takeshi Nakatani 5b93765802 Fixed a warning for compiling C/C++ codes 2023-08-13 20:49:39 +09:00
Takeshi Nakatani acea1d33f9 Fixed string test for s3fs_base64 2023-08-13 20:48:38 +09:00
Andrew Gaul 528a61718d
Convert manual memory allocations to std::unique_ptr (#2253) 2023-08-11 23:26:07 +09:00
Andrew Gaul c5a75a1fb2
Delete copy constructors and assignment operators (#2257)
One of these was buggy and others had the wrong parameters and return
types.
2023-08-11 13:12:03 +09:00
Andrew Gaul 3790a0f8b4
Calculate MD5 without using a temporary file (#2252)
This mirrors the SHA256 code.
2023-08-07 00:17:15 +09:00
Andrew Gaul 779afe5d62
Make help more consistent (#2251) 2023-08-06 22:25:10 +09:00
Andrew Gaul 26b5658d70
Wrap ps3fscred with std::unique_ptr (#2250)
This removes many manual memory deallocations.
2023-08-06 22:23:25 +09:00
Andrew Gaul c568a69452
Return std::string from base64 encoding function (#2248)
This is avoids manual memory allocations.
2023-08-06 22:22:02 +09:00
Andrew Gaul 13ad53eef7
Convert most std::list to std::vector (#2247)
This tends to be more efficient due to fewer allocations.  Also fix std::sort
comparator which should be strictly less than.
2023-08-05 10:05:32 +09:00
Andrew Gaul b14758baff
Fix junk_data for 32-bit platforms (#2245)
Previously this had a mismatch between size_t and unsigned long long.
2023-08-05 09:37:18 +09:00
Andrew Gaul b5c3fc0a08
Convert fixed-size allocations to C++11 std::array (#2242)
This is safer and more efficient.
2023-08-05 09:36:22 +09:00
Andrew Gaul b29f8d0f2b
Use C++ enum class for most enums (#2241)
This promotes type-safety.
2023-07-30 22:53:17 +09:00
Andrew Gaul 5699875e30
Use C++11 emplace where possible (#2240)
This is more concise and sometimes more efficient.
2023-07-30 22:51:20 +09:00
Andrew Gaul 3081e419e1
Simplify direct shellcheck download with jq (#2239) 2023-07-29 09:22:55 +09:00
Andrew Gaul a7b38a6940
Address stray warnings (#2237) 2023-07-29 09:19:18 +09:00
Andrew Gaul 1f04165a33
Convert most str callers to C++11 std::to_string (#2238)
Remaining ones handle timespec.
2023-07-28 18:21:55 +09:00
Andrew Gaul 36db898d01
Use C++11 std::map::erase return value (#2236) 2023-07-27 23:34:43 +09:00
Andrew Gaul 38a1ff42e5
Convert test binaries to C++ (#2235)
This ensures that they are compiled with a consistent compiler and set of flags
as the rest of s3fs.
2023-07-27 23:15:19 +09:00
Andrew Gaul a4a2841c05
Use C++11 nullptr instead of 0 or NULL (#2234)
This improves type-safety.
2023-07-27 21:56:58 +09:00
Andrew Gaul 0ece204393 Fix -Wshorten-64-to-32 warnings 2023-07-27 12:23:26 +09:00
Andrew Gaul 6344d74ae3
Replace some raw pointers with std::unique_ptr (#2195)
This simplifies code paths and makes memory leaks less likely.  It
also makes memory ownership more explicit by requiring std::move.
This commit requires C++11.  References #2179.
2023-07-27 09:12:28 +09:00
Takeshi Nakatani faec0d9d15 Refixed for cppcheck 2.1x 2023-07-26 07:55:33 +09:00
Andrew Gaul e14a2eb94b
Add AWS CLI config for use_sse=custom (#2230) 2023-07-25 23:31:20 +09:00
Takeshi Nakatani cb3dc28e6e Supported cppcheck 2.10(and changed std from c++03 to c++11 for RHEL7) 2023-07-25 08:08:06 +09:00
Takeshi Nakatani 38dc65180b Fixed checking cppcheck version in ci.yml 2023-07-24 11:07:31 +09:00
Andrew Gaul 2405706643
Insert SSE headers when appropriate (#2228)
References #2218.  References #2227.
2023-07-23 16:17:34 +09:00
Andrew Gaul 5371cd1468
Update ChangeLog and configure.ac for 1.93 (#2225)
Fixes #2213.
2023-07-19 22:31:43 +09:00
Eryu Guan 7978395083 Use smart pointer to manage pcfstat object
Previously pcfstat points to a raw pointer, and it may be leaked if
function returned before deleting it.

So use smart pointer to automatically release the object.

Note that currently s3fs only uses c++03, so we use auto_ptr here, not
unique_ptr, which requires c++11.

Fixes: 6ca5a24a7f ("Fix two inconsistency issues between stat cache and cache file (#2152)")
Signed-off-by: Eryu Guan <eguan@linux.alibaba.com>
2023-07-19 20:15:55 +09:00
Takeshi Nakatani d0a944fcaa Fixed data race about fuse_fill_dir_t function and data pointer 2023-07-14 22:32:12 +09:00
Andrew Gaul 537384e9b5
Guard filler calls with filled check (#2215)
Follows on to e650d8c55c.
2023-07-13 22:46:19 +09:00
Andrew Gaul e650d8c55c
Explicitly handle CommonPrefixes with nocompat_dir (#2212)
Previously the test missed listing implicit directories and another
test was incorrect.  This fixes a regression from 1.91.
2023-07-13 21:15:34 +09:00
Takeshi Nakatani 9663215bb4 Fixed data race at OPENSSL_sk_dup/free in libcurl 2023-07-12 22:51:57 +09:00
Takeshi Nakatani b2537052ef Fixed data race in threads found thread sanitizer 2023-07-09 20:53:27 +09:00
Eryu Guan 2e51908bec Check FdEntity::Open() status correctly
FdEntity::Open() returns -errno on error, but FdManager::Open() only
checks if its ret is -1. This may lead to use '-errno' as pseudo fd
in next read or write, which would fail due to '-errno' is not in
fent map.

Signed-off-by: Eryu Guan <eguan@linux.alibaba.com>
2023-07-05 08:50:12 +09:00
LiuBingrun a8edbd8622
fix streamuplod not working (#2204)
fuse_main will do fork to daemonize. if ThreadPool is created before
this, worker thread will not be placed in child process.

Move ThreadPool Init to s3fs_init and destory to s3fs_destory.

Signed-off-by: liubingrun <liubr1@chinatelecom.cn>
2023-07-02 10:56:59 +09:00
Andrew Gaul 06d0852e74
Use JDK 17 for Ubuntu CI (#2200)
Also remove stale 18.04 configuration.
2023-06-26 22:07:04 +09:00
Alex Fishman 03066ea29a
Fix extended attribute support when using FUSE-T (#2201)
* Add support for FUSE-T on macos

Signed-off-by: Alex Fishman <alex@fuse-t.org>
Signed-off-by: alex <alex@alex-NUC10.lan>

* Ignore value pointer when size is zero on setattrx

Signed-off-by: Alex Fishman <alex@fuse-t.org>

---------

Signed-off-by: Alex Fishman <alex@fuse-t.org>
Signed-off-by: alex <alex@alex-NUC10.lan>
2023-06-26 22:05:37 +09:00
Andrew Gaul e66c9a82a2
Add sanitize_thread to CI (#2199) 2023-06-25 23:13:49 +09:00
Andrew Gaul e86e6cf24f Add Ubuntu 23.04 and remove 18.04
18.04 is EOL:

https://ubuntu.com/blog/ubuntu-18-04-eol-for-devices

This removes a FUSE 3 blocker.  References #1159.
2023-06-25 22:53:59 +09:00
Andrew Gaul 7e8238abc0
Upgrade to JDK 17 (#2196)
Newer distros package this but some older ones do not.  Also remove
stale Ubuntu 16.04 configuration.
2023-06-25 18:05:38 +09:00
Andrew Gaul 6448c8f1a8
Protect FdEntity::physical_fd with fdent_lock (#2194)
* Protect FdEntity::physical_fd with fdent_lock
Found via ThreadSanitizer.
2023-06-25 16:43:15 +09:00
Andrew Gaul 3b6688253f
Address cppcheck 2.10 warnings (#2163)
Disable newer cppcheck until we can diagnose this further.
References #2162.
2023-06-25 16:04:16 +09:00
Alex Fishman 45e7cd085a Add support for FUSE-T on macos
Signed-off-by: Alex Fishman <alex@fuse-t.org>
Signed-off-by: alex <alex@alex-NUC10.lan>
2023-06-24 22:26:42 +09:00
Takeshi Nakatani 7c9cf84316 Added several memory checks to CI 2023-06-11 10:23:08 -07:00
Takeshi Nakatani 580775b47c Removed unnecessary debug output 2023-06-07 20:48:50 -07:00
Takeshi Nakatani eab26a1e01 Fixed test setting for macOS 2023-06-07 20:48:34 -07:00
Andrew Gaul 1910856c6c
Remove wait and check loop from mk_test_file (#2175)
This appears to be some kind of eventual consistency check.  This
should have no effect given S3Proxy and recent AWS strong consistency.
Also it is likely ineffective given the other test object creation
operations.
2023-06-07 23:24:31 +09:00
Andrew Gaul 4b3e715291
Always return nanoseconds from get_time helpers (#2174)
This makes Linux and macOS more consistent.
2023-05-30 18:52:55 +09:00
Eryu Guan 6ca5a24a7f
Fix two inconsistency issues between stat cache and cache file (#2152)
* Fix inconsistency between stat cache file and cache file

We unlock stat cache file too early in FdEntity::Open(), and would
truncate cache file and update stat cache file, so there's a window that
stat cache doesn't reflect cache file status.

Suggested-by: Takeshi Nakatani <ggtakec@gmail.com>
Signed-off-by: Eryu Guan <eguan@linux.alibaba.com>

* Mark pagelist as unloaded if cache file has been truncated

If cache file size doesn't match object size, the cache file might be
corrupted, so invalidate it and save new cache stat file.

Suggested-by: Takeshi Nakatani <ggtakec@gmail.com>
Signed-off-by: Eryu Guan <eguan@linux.alibaba.com>

---------

Signed-off-by: Eryu Guan <eguan@linux.alibaba.com>
2023-05-30 18:39:50 +09:00
Takeshi Nakatani 0d6b02090e
Revert "Update curl.cpp: reduce memory cache use (#2157)" (#2170)
This reverts commit 5b487f651a.
2023-05-27 20:33:43 +09:00
Tan Guofu 5b487f651a
Update curl.cpp: reduce memory cache use (#2157)
flush the file and clean the page cache when filepart download done
2023-05-27 19:15:47 +09:00
Andrew Gaul e7a364d610
Specify _FORTIFY_SOURCE=3 (#2168)
This can find more kinds of buffer overflows:

https://developers.redhat.com/articles/2022/09/17/gccs-new-fortification-level
2023-05-27 17:20:29 +09:00
93 changed files with 4160 additions and 3670 deletions

View File

@ -1,3 +1,4 @@
WarningsAsErrors: '*'
Checks: '
-*,
bugprone-*,
@ -8,7 +9,29 @@ Checks: '
-bugprone-macro-parentheses,
-bugprone-narrowing-conversions,
-bugprone-unhandled-self-assignment,
cppcoreguidelines-pro-type-cstyle-cast,
cert-*,
-cert-dcl50-cpp,
-cert-env33-c,
-cert-err33-c,
-cert-err58-cpp,
cppcoreguidelines-*,
-cppcoreguidelines-avoid-c-arrays,
-cppcoreguidelines-avoid-do-while,
-cppcoreguidelines-avoid-magic-numbers,
-cppcoreguidelines-avoid-non-const-global-variables,
-cppcoreguidelines-init-variables,
-cppcoreguidelines-macro-usage,
-cppcoreguidelines-narrowing-conversions,
-cppcoreguidelines-no-malloc,
-cppcoreguidelines-owning-memory,
-cppcoreguidelines-pro-bounds-array-to-pointer-decay,
-cppcoreguidelines-pro-bounds-constant-array-index,
-cppcoreguidelines-pro-bounds-pointer-arithmetic,
-cppcoreguidelines-pro-type-const-cast,
-cppcoreguidelines-pro-type-member-init,
-cppcoreguidelines-pro-type-reinterpret-cast,
-cppcoreguidelines-pro-type-union-access,
-cppcoreguidelines-pro-type-vararg,
google-*,
-google-build-using-namespace,
-google-readability-casting,
@ -18,25 +41,27 @@ Checks: '
-google-runtime-references,
misc-*,
-misc-const-correctness,
-misc-include-cleaner,
-misc-no-recursion,
-misc-redundant-expression,
-misc-unused-parameters,
-misc-use-anonymous-namespace,
modernize-*,
-modernize-avoid-c-arrays,
-modernize-deprecated-headers,
-modernize-loop-convert,
-modernize-make-unique,
-modernize-raw-string-literal,
-modernize-return-braced-init-list,
-modernize-use-auto,
-modernize-use-emplace,
-modernize-use-nullptr,
-modernize-use-default-member-init,
-modernize-use-trailing-return-type,
-modernize-use-using,
performance-*,
-performance-avoid-endl,
-performance-no-int-to-ptr,
portability-*,
readability-*,
-readability-avoid-nested-conditional-operator,
-readability-braces-around-statements,
-readability-else-after-return,
-readability-function-cognitive-complexity,
@ -47,5 +72,6 @@ Checks: '
-readability-isolate-declaration,
-readability-magic-numbers,
-readability-named-parameter,
-readability-redundant-declaration,
-readability-simplify-boolean-expr,
-readability-suspicious-call-argument'

View File

@ -50,18 +50,19 @@ jobs:
#
matrix:
container:
- ubuntu:24.04
- ubuntu:22.04
- ubuntu:20.04
- ubuntu:18.04
- debian:bookworm
- debian:bullseye
- debian:buster
- rockylinux:9
- rockylinux:8
- centos:centos7
- fedora:38
- fedora:37
- fedora:40
- fedora:39
- opensuse/leap:15
- alpine:3.17
- alpine:3.19
container:
image: ${{ matrix.container }}
@ -82,7 +83,17 @@ jobs:
run: |
if [ "${{ matrix.container }}" = "opensuse/leap:15" ]; then zypper install -y tar gzip; fi
- name: Checkout source code
# [NOTE]
# actions/checkout@v3 uses nodejs v16 and will be deprecated.
# However, @v4 does not work on centos7 depending on the glibc version,
# so we will continue to use @v3.
#
- name: Checkout source code(other than centos7)
if: matrix.container != 'centos:centos7'
uses: actions/checkout@v4
- name: Checkout source code(only centos7)
if: matrix.container == 'centos:centos7'
uses: actions/checkout@v3
# [NOTE]
@ -99,52 +110,61 @@ jobs:
/bin/sh -c "./configure ${CONFIGURE_OPTIONS}"
make --jobs=$(nproc)
- name: clang-tidy
run: |
# skip if clang-tidy does not exist, e.g., CentOS 7
if command -v clang-tidy; then
make -C src/ clang-tidy
make -C test/ clang-tidy
fi
- name: Cppcheck
run: |
# work around resource leak false positives on older Linux distributions
if cppcheck --version | awk '{if ($2 <= 1.86) { exit(1) } }'; then
# specify the version range to run cppcheck (cppcheck version number is x.y or x.y.z)
if cppcheck --version | sed -e 's/\./ /g' | awk '{if (($2 * 1000 + $3) <= 2004) { exit(1) } }'; then
make cppcheck
fi
- name: Shellcheck
run: |
make shellcheck
if shellcheck --version | awk -F '[ .]' '/version:/ && ($2 * 1000 + $3 <= 7) { exit(1) }'; then
make shellcheck
fi
- name: Test suite
run: |
make check -C src
make ALL_TESTS=1 check -C test || (test/filter-suite-log.sh test/test-suite.log; exit 1)
# [NOTE]
# This Job does not work for macOS 11 and later because load_osxfuse returns exit code = 1.
# Apple states "You must be signed in as an administrator user to install new kernel
# extensions, and your Mac must be rebooted for the extensions to load.", so it needs
# to reboot OS.
# As of May 2023, GitHub Actions are no longer able to launch macos 10.15 as runner,
# so we can not run this Job.
# In the future, if it is found a solution, we will resume this Job execution.
# Using macos-fuse-t
# This product(package) is a workaround for osxfuse which required an OS reboot(macos 11 and later).
# see. https://github.com/macos-fuse-t/fuse-t
# About osxfuse
# This job doesn't work with Github Actions using macOS 11+ because "load_osxfuse" returns
# "exit code = 1".(requires OS reboot)
#
macos10:
if: false
runs-on: macos-10.15
macos12:
runs-on: macos-12
steps:
- name: Checkout source code
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Brew tap
run: |
TAPS="$(brew --repository)/Library/Taps";
if [ -e "$TAPS/caskroom/homebrew-cask" ]; then rm -rf "$TAPS/caskroom/homebrew-cask"; fi;
HOMEBREW_NO_AUTO_UPDATE=1 brew tap homebrew/homebrew-cask
HOMEBREW_NO_AUTO_UPDATE=1 brew tap macos-fuse-t/homebrew-cask
- name: Install osxfuse
- name: Install fuse-t
run: |
HOMEBREW_NO_AUTO_UPDATE=1 brew install osxfuse
HOMEBREW_NO_AUTO_UPDATE=1 brew install fuse-t
- name: Install brew other packages
run: |
S3FS_BREW_PACKAGES='automake cppcheck python3 coreutils gnu-sed shellcheck';
S3FS_BREW_PACKAGES='automake cppcheck python3 coreutils gnu-sed shellcheck jq';
for s3fs_brew_pkg in ${S3FS_BREW_PACKAGES}; do if brew list | grep -q ${s3fs_brew_pkg}; then if brew outdated | grep -q ${s3fs_brew_pkg}; then HOMEBREW_NO_AUTO_UPDATE=1 brew upgrade ${s3fs_brew_pkg}; fi; else HOMEBREW_NO_AUTO_UPDATE=1 brew install ${s3fs_brew_pkg}; fi; done;
- name: Install awscli2
@ -153,31 +173,120 @@ jobs:
curl "https://awscli.amazonaws.com/AWSCLIV2.pkg" -o "AWSCLIV2.pkg"
sudo installer -pkg AWSCLIV2.pkg -target /
- name: Check osxfuse permission
run: |
if [ -f /Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs ]; then sudo chmod +s /Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs; elif [ -f /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse ]; then sudo chmod +s /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse; else exit 1; fi
- name: Build
run: |
./autogen.sh
PKG_CONFIG_PATH=/usr/local/opt/curl/lib/pkgconfig:/usr/local/opt/openssl/lib/pkgconfig ./configure CXXFLAGS='-std=c++03 -DS3FS_PTHREAD_ERRORCHECK=1'
PKG_CONFIG_PATH=/usr/local/opt/curl/lib/pkgconfig:/usr/local/opt/openssl/lib/pkgconfig ./configure CXXFLAGS='-std=c++11 -DS3FS_PTHREAD_ERRORCHECK=1'
make --jobs=$(sysctl -n hw.ncpu)
- name: Cppcheck
run: |
make cppcheck
# specify the version range to run cppcheck (cppcheck version number is x.y or x.y.z)
if cppcheck --version | sed -e 's/\./ /g' | awk '{if (($2 * 1000 + $3) <= 2004) { exit(1) } }'; then
make cppcheck
fi
- name: Shellcheck
run: |
make shellcheck
if shellcheck --version | awk -F '[ .]' '/version:/ && ($2 * 1000 + $3 <= 7) { exit(1) }'; then
make shellcheck
fi
- name: Test suite
run: |
make check -C src
echo "user_allow_other" | sudo tee -a /etc/fuse.conf >/dev/null
if [ -f /Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs ]; then /Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs; elif [ -f /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse ]; then /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse; else exit 1; fi
make ALL_TESTS=1 check -C test || (test/filter-suite-log.sh test/test-suite.log; exit 1)
MemoryTest:
runs-on: ubuntu-latest
#
# build matrix for containers
#
strategy:
#
# do not stop jobs automatically if any of the jobs fail
#
fail-fast: false
#
# matrix for type of checking
#
# [NOTE]
# Currently following test is not supported:
# - sanitize_memory : Future support planned
#
matrix:
checktype:
- glibc_debug
- sanitize_address
- sanitize_others
- sanitize_thread
- valgrind
container:
image: fedora:40
options: "--privileged --cap-add SYS_ADMIN --device /dev/fuse"
steps:
- name: Checkout source code
uses: actions/checkout@v4
- name: Install packages
run: |
.github/workflows/linux-ci-helper.sh fedora:40
- name: Install clang
run: |
dnf install -y clang
if [ "${{ matrix.checktype }}" = "valgrind" ]; then
dnf install -y valgrind
fi
#
# Set CXX/CXXFLAGS and Variables for test
#
- name: Set variables
run: |
COMMON_CXXFLAGS='-g -Wno-cpp -DS3FS_PTHREAD_ERRORCHECK=1'
if [ "${{ matrix.checktype }}" = "glibc_debug" ]; then
echo "CXXFLAGS=${COMMON_CXXFLAGS} -O0 -D_GLIBCXX_DEBUG" >> $GITHUB_ENV
elif [ "${{ matrix.checktype }}" = "sanitize_address" ]; then
echo 'CXX=clang++' >> $GITHUB_ENV
echo "CXXFLAGS=${COMMON_CXXFLAGS} -O0 -fsanitize=address -fsanitize-address-use-after-scope" >> $GITHUB_ENV
echo 'ASAN_OPTIONS=detect_leaks=1,detect_stack_use_after_return=1' >> $GITHUB_ENV
elif [ "${{ matrix.checktype }}" = "sanitize_memory" ]; then
echo 'CXX=clang++' >> $GITHUB_ENV
echo "CXXFLAGS=${COMMON_CXXFLAGS} -O0 -fsanitize=memory" >> $GITHUB_ENV
elif [ "${{ matrix.checktype }}" = "sanitize_thread" ]; then
echo 'CXX=clang++' >> $GITHUB_ENV
echo "CXXFLAGS=${COMMON_CXXFLAGS} -O0 -fsanitize=thread" >> $GITHUB_ENV
echo 'TSAN_OPTIONS=halt_on_error=1' >> $GITHUB_ENV
# [NOTE]
# Set this to avoid following error when running configure.
# "FATAL: ThreadSanitizer: unexpected memory mapping"
sysctl vm.mmap_rnd_bits=28
elif [ "${{ matrix.checktype }}" = "sanitize_others" ]; then
echo 'CXX=clang++' >> $GITHUB_ENV
echo "CXXFLAGS=${COMMON_CXXFLAGS} -O1 -fsanitize=undefined,implicit-conversion,local-bounds,unsigned-integer-overflow" >> $GITHUB_ENV
elif [ "${{ matrix.checktype }}" = "valgrind" ]; then
echo "CXXFLAGS=${COMMON_CXXFLAGS} -O1" >> $GITHUB_ENV
echo 'VALGRIND=--leak-check=full' >> $GITHUB_ENV
echo 'RETRIES=100' >> $GITHUB_ENV
echo 'S3_URL=http://127.0.0.1:8081' >> $GITHUB_ENV
fi
- name: Build
run: |
./autogen.sh
/bin/sh -c "CXX=${CXX} CXXFLAGS=\"${CXXFLAGS}\" ./configure --prefix=/usr --with-openssl"
make
- name: Test suite
run: |
/bin/sh -c "ALL_TESTS=1 ASAN_OPTIONS=${ASAN_OPTIONS} TSAN_OPTIONS=${TSAN_OPTIONS} VALGRIND=${VALGRIND} RETRIES=${RETRIES} make check -C test || (test/filter-suite-log.sh test/test-suite.log; exit 1)"
#
# Local variables:
# tab-width: 4

View File

@ -66,8 +66,8 @@ AWSCLI_ZIP_FILE="awscliv2.zip"
#-----------------------------------------------------------
# Parameters for configure(set environments)
#-----------------------------------------------------------
# shellcheck disable=SC2089
CONFIGURE_OPTIONS="CXXFLAGS='-O -std=c++03 -DS3FS_PTHREAD_ERRORCHECK=1' --prefix=/usr --with-openssl"
CXXFLAGS="-O -DS3FS_PTHREAD_ERRORCHECK=1"
CONFIGURE_OPTIONS="--prefix=/usr --with-openssl"
#-----------------------------------------------------------
# OS dependent variables
@ -80,12 +80,21 @@ PACKAGE_INSTALL_ADDITIONAL_OPTIONS=""
SHELLCHECK_DIRECT_INSTALL=0
AWSCLI_DIRECT_INSTALL=1
if [ "${CONTAINER_FULLNAME}" = "ubuntu:22.04" ]; then
if [ "${CONTAINER_FULLNAME}" = "ubuntu:24.04" ]; then
PACKAGE_MANAGER_BIN="apt-get"
PACKAGE_UPDATE_OPTIONS="update -y -qq"
PACKAGE_INSTALL_OPTIONS="install -y"
INSTALL_PACKAGES="autoconf autotools-dev default-jre-headless fuse libfuse-dev libcurl4-openssl-dev libxml2-dev locales-all mime-support libtool pkg-config libssl-dev attr curl python3-pip unzip"
INSTALL_PACKAGES="autoconf autotools-dev clang-tidy openjdk-21-jre-headless fuse jq libfuse-dev libcurl4-openssl-dev libxml2-dev locales-all mailcap libtool pkg-config libssl-dev attr curl python3-pip unzip"
INSTALL_CHECKER_PKGS="cppcheck shellcheck"
INSTALL_CHECKER_PKG_OPTIONS=""
elif [ "${CONTAINER_FULLNAME}" = "ubuntu:22.04" ]; then
PACKAGE_MANAGER_BIN="apt-get"
PACKAGE_UPDATE_OPTIONS="update -y -qq"
PACKAGE_INSTALL_OPTIONS="install -y"
INSTALL_PACKAGES="autoconf autotools-dev clang-tidy openjdk-17-jre-headless fuse jq libfuse-dev libcurl4-openssl-dev libxml2-dev locales-all mime-support libtool pkg-config libssl-dev attr curl python3-pip unzip"
INSTALL_CHECKER_PKGS="cppcheck shellcheck"
INSTALL_CHECKER_PKG_OPTIONS=""
@ -94,25 +103,16 @@ elif [ "${CONTAINER_FULLNAME}" = "ubuntu:20.04" ]; then
PACKAGE_UPDATE_OPTIONS="update -y -qq"
PACKAGE_INSTALL_OPTIONS="install -y"
INSTALL_PACKAGES="autoconf autotools-dev default-jre-headless fuse libfuse-dev libcurl4-openssl-dev libxml2-dev locales-all mime-support libtool pkg-config libssl-dev attr curl python3-pip unzip"
INSTALL_PACKAGES="autoconf autotools-dev clang-tidy openjdk-17-jre-headless fuse jq libfuse-dev libcurl4-openssl-dev libxml2-dev locales-all mime-support libtool pkg-config libssl-dev attr curl python3-pip unzip"
INSTALL_CHECKER_PKGS="cppcheck shellcheck"
INSTALL_CHECKER_PKG_OPTIONS=""
elif [ "${CONTAINER_FULLNAME}" = "ubuntu:18.04" ]; then
elif [ "${CONTAINER_FULLNAME}" = "debian:bookworm" ]; then
PACKAGE_MANAGER_BIN="apt-get"
PACKAGE_UPDATE_OPTIONS="update -y -qq"
PACKAGE_INSTALL_OPTIONS="install -y"
INSTALL_PACKAGES="autoconf autotools-dev default-jre-headless fuse libfuse-dev libcurl4-openssl-dev libxml2-dev locales-all mime-support libtool pkg-config libssl-dev attr curl python3-pip unzip"
INSTALL_CHECKER_PKGS="cppcheck shellcheck"
INSTALL_CHECKER_PKG_OPTIONS=""
elif [ "${CONTAINER_FULLNAME}" = "ubuntu:16.04" ]; then
PACKAGE_MANAGER_BIN="apt-get"
PACKAGE_UPDATE_OPTIONS="update -y -qq"
PACKAGE_INSTALL_OPTIONS="install -y"
INSTALL_PACKAGES="autoconf autotools-dev default-jre-headless fuse libfuse-dev libcurl4-openssl-dev libxml2-dev locales-all mime-support libtool pkg-config libssl-dev attr curl python3-pip unzip"
INSTALL_PACKAGES="autoconf autotools-dev clang-tidy openjdk-17-jre-headless fuse jq libfuse-dev libcurl4-openssl-dev libxml2-dev locales-all mime-support libtool pkg-config libssl-dev attr curl procps python3-pip unzip"
INSTALL_CHECKER_PKGS="cppcheck shellcheck"
INSTALL_CHECKER_PKG_OPTIONS=""
@ -121,7 +121,7 @@ elif [ "${CONTAINER_FULLNAME}" = "debian:bullseye" ]; then
PACKAGE_UPDATE_OPTIONS="update -y -qq"
PACKAGE_INSTALL_OPTIONS="install -y"
INSTALL_PACKAGES="autoconf autotools-dev default-jre-headless fuse libfuse-dev libcurl4-openssl-dev libxml2-dev locales-all mime-support libtool pkg-config libssl-dev attr curl procps python3-pip unzip"
INSTALL_PACKAGES="autoconf autotools-dev clang-tidy openjdk-17-jre-headless fuse jq libfuse-dev libcurl4-openssl-dev libxml2-dev locales-all mime-support libtool pkg-config libssl-dev attr curl procps python3-pip unzip"
INSTALL_CHECKER_PKGS="cppcheck shellcheck"
INSTALL_CHECKER_PKG_OPTIONS=""
@ -130,7 +130,7 @@ elif [ "${CONTAINER_FULLNAME}" = "debian:buster" ]; then
PACKAGE_UPDATE_OPTIONS="update -y -qq"
PACKAGE_INSTALL_OPTIONS="install -y"
INSTALL_PACKAGES="autoconf autotools-dev default-jre-headless fuse libfuse-dev libcurl4-openssl-dev libxml2-dev locales-all mime-support libtool pkg-config libssl-dev attr curl procps python3-pip unzip"
INSTALL_PACKAGES="autoconf autotools-dev clang-tidy default-jre-headless fuse jq libfuse-dev libcurl4-openssl-dev libxml2-dev locales-all mime-support libtool pkg-config libssl-dev attr curl procps python3-pip unzip"
INSTALL_CHECKER_PKGS="cppcheck shellcheck"
INSTALL_CHECKER_PKG_OPTIONS=""
@ -146,7 +146,7 @@ elif [ "${CONTAINER_FULLNAME}" = "rockylinux:9" ]; then
#
PACKAGE_INSTALL_ADDITIONAL_OPTIONS="--allowerasing"
INSTALL_PACKAGES="curl-devel fuse fuse-devel gcc libstdc++-devel gcc-c++ glibc-langpack-en java-11-openjdk-headless libxml2-devel mailcap git automake make openssl-devel attr diffutils curl python3 procps unzip xz https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm"
INSTALL_PACKAGES="clang-tools-extra curl-devel fuse fuse-devel gcc libstdc++-devel gcc-c++ glibc-langpack-en java-17-openjdk-headless jq libxml2-devel mailcap git automake make openssl openssl-devel attr diffutils curl python3 procps unzip xz https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm"
INSTALL_CHECKER_PKGS="cppcheck"
INSTALL_CHECKER_PKG_OPTIONS="--enablerepo=epel"
@ -160,7 +160,7 @@ elif [ "${CONTAINER_FULLNAME}" = "rockylinux:8" ]; then
PACKAGE_UPDATE_OPTIONS="update -y -qq"
PACKAGE_INSTALL_OPTIONS="install -y"
INSTALL_PACKAGES="curl-devel fuse fuse-devel gcc libstdc++-devel gcc-c++ glibc-langpack-en java-11-openjdk-headless libxml2-devel mailcap git automake make openssl-devel attr diffutils curl python3 unzip"
INSTALL_PACKAGES="clang-tools-extra curl-devel fuse fuse-devel gcc libstdc++-devel gcc-c++ glibc-langpack-en java-17-openjdk-headless jq libxml2-devel mailcap git automake make openssl openssl-devel attr diffutils curl python3 unzip"
INSTALL_CHECKER_PKGS="cppcheck"
INSTALL_CHECKER_PKG_OPTIONS="--enablerepo=powertools"
@ -179,27 +179,25 @@ elif [ "${CONTAINER_FULLNAME}" = "centos:centos7" ]; then
# And in this version, it cannot be passed due to following error.
# "shellcheck: ./test/integration-test-main.sh: hGetContents: invalid argument (invalid byte sequence)"
#
INSTALL_PACKAGES="curl-devel fuse fuse-devel gcc libstdc++-devel gcc-c++ glibc-langpack-en java-11-openjdk-headless libxml2-devel mailcap git automake make openssl-devel attr curl python3 epel-release unzip"
INSTALL_CHECKER_PKGS="cppcheck"
INSTALL_PACKAGES="curl-devel fuse fuse-devel gcc libstdc++-devel llvm-toolset-7-clang-tools-extra gcc-c++ glibc-langpack-en java-11-openjdk-headless libxml2-devel mailcap git automake make openssl openssl-devel attr curl python3 epel-release unzip"
INSTALL_CHECKER_PKGS="cppcheck jq"
INSTALL_CHECKER_PKG_OPTIONS="--enablerepo=epel"
elif [ "${CONTAINER_FULLNAME}" = "fedora:38" ]; then
elif [ "${CONTAINER_FULLNAME}" = "fedora:40" ]; then
PACKAGE_MANAGER_BIN="dnf"
PACKAGE_UPDATE_OPTIONS="update -y -qq"
PACKAGE_INSTALL_OPTIONS="install -y"
# TODO: Cannot use java-latest-openjdk (17) due to modules issue in S3Proxy/jclouds/Guice
INSTALL_PACKAGES="curl-devel fuse fuse-devel gcc libstdc++-devel gcc-c++ glibc-langpack-en java-11-openjdk-headless libxml2-devel mailcap git automake make openssl-devel curl attr diffutils procps python3-pip unzip"
INSTALL_PACKAGES="clang-tools-extra curl-devel fuse fuse-devel gcc libstdc++-devel gcc-c++ glibc-langpack-en java-latest-openjdk-headless jq libxml2-devel mailcap git automake make openssl openssl-devel curl attr diffutils procps python3-pip unzip"
INSTALL_CHECKER_PKGS="cppcheck ShellCheck"
INSTALL_CHECKER_PKG_OPTIONS=""
elif [ "${CONTAINER_FULLNAME}" = "fedora:37" ]; then
elif [ "${CONTAINER_FULLNAME}" = "fedora:39" ]; then
PACKAGE_MANAGER_BIN="dnf"
PACKAGE_UPDATE_OPTIONS="update -y -qq"
PACKAGE_INSTALL_OPTIONS="install -y"
# TODO: Cannot use java-latest-openjdk (17) due to modules issue in S3Proxy/jclouds/Guice
INSTALL_PACKAGES="curl-devel fuse fuse-devel gcc libstdc++-devel gcc-c++ glibc-langpack-en java-11-openjdk-headless libxml2-devel mailcap git automake make openssl-devel curl attr diffutils procps python3-pip unzip"
INSTALL_PACKAGES="clang-tools-extra curl-devel fuse fuse-devel gcc libstdc++-devel gcc-c++ glibc-langpack-en java-latest-openjdk-headless jq libxml2-devel mailcap git automake make openssl openssl-devel curl attr diffutils procps python3-pip unzip"
INSTALL_CHECKER_PKGS="cppcheck ShellCheck"
INSTALL_CHECKER_PKG_OPTIONS=""
@ -208,16 +206,16 @@ elif [ "${CONTAINER_FULLNAME}" = "opensuse/leap:15" ]; then
PACKAGE_UPDATE_OPTIONS="refresh"
PACKAGE_INSTALL_OPTIONS="install -y"
INSTALL_PACKAGES="automake curl-devel fuse fuse-devel gcc-c++ java-11-openjdk-headless libxml2-devel make openssl-devel python3-pip curl attr ShellCheck unzip"
INSTALL_PACKAGES="automake clang-tools curl-devel fuse fuse-devel gcc-c++ java-17-openjdk-headless jq libxml2-devel make openssl openssl-devel python3-pip curl attr ShellCheck unzip"
INSTALL_CHECKER_PKGS="cppcheck ShellCheck"
INSTALL_CHECKER_PKG_OPTIONS=""
elif [ "${CONTAINER_FULLNAME}" = "alpine:3.17" ]; then
elif [ "${CONTAINER_FULLNAME}" = "alpine:3.19" ]; then
PACKAGE_MANAGER_BIN="apk"
PACKAGE_UPDATE_OPTIONS="update --no-progress"
PACKAGE_INSTALL_OPTIONS="add --no-progress --no-cache"
INSTALL_PACKAGES="bash curl g++ make automake autoconf libtool git curl-dev fuse-dev libxml2-dev coreutils procps attr sed mailcap openjdk11 aws-cli"
INSTALL_PACKAGES="bash clang-extra-tools curl g++ make automake autoconf libtool git curl-dev fuse-dev jq libxml2-dev openssl coreutils procps attr sed mailcap openjdk17 aws-cli"
INSTALL_CHECKER_PKGS="cppcheck shellcheck"
INSTALL_CHECKER_PKG_OPTIONS=""
@ -252,7 +250,7 @@ echo "${PRGNAME} [INFO] Install cppcheck package."
if [ "${SHELLCHECK_DIRECT_INSTALL}" -eq 1 ]; then
echo "${PRGNAME} [INFO] Install shellcheck package from github archive."
if ! LATEST_SHELLCHECK_DOWNLOAD_URL=$(curl -s -S https://api.github.com/repos/koalaman/shellcheck/releases/latest | grep '"browser_download_url"' | grep 'linux.x86_64' | sed -e 's|"||g' -e 's|^.*browser_download_url:[[:space:]]*||g' | tr -d '\n'); then
if ! LATEST_SHELLCHECK_DOWNLOAD_URL=$(curl --silent --show-error https://api.github.com/repos/koalaman/shellcheck/releases/latest | jq -r '.assets[].browser_download_url | select(contains("linux.x86_64"))'); then
echo "Could not get shellcheck package url"
exit 1
fi
@ -292,8 +290,8 @@ fi
#-----------------------------------------------------------
echo "${PRGNAME} [INFO] Set environment for configure options"
# shellcheck disable=SC2090
export CONFIGURE_OPTIONS
echo "CXXFLAGS=${CXXFLAGS}" >> "${GITHUB_ENV}"
echo "CONFIGURE_OPTIONS=${CONFIGURE_OPTIONS}" >> "${GITHUB_ENV}"
echo "${PRGNAME} [INFO] Finish Linux helper for installing packages."

View File

@ -6,7 +6,9 @@ If you want specific instructions for some distributions, check the [wiki](https
Keep in mind using the pre-built packages when available.
1. Ensure your system satisfies build and runtime dependencies for:
## Compilation on Linux
### Ensure your system satisfies build and runtime dependencies for:
* fuse >= 2.8.4
* automake
@ -14,7 +16,10 @@ Keep in mind using the pre-built packages when available.
* make
* libcurl
* libxml2
* openssl
* openssl/gnutls/nss
* Please prepare the library according to the OS on which you will compile.
* It is necessary to match the library used by libcurl.
* Install the OpenSSL, GnuTLS or NSS devel package.
* mime.types (the package providing depends on the OS)
* s3fs tries to detect `/etc/mime.types` as default regardless of the OS
* Else s3fs tries to detect `/etc/apache2/mime.types` if OS is macOS
@ -22,16 +27,38 @@ Keep in mind using the pre-built packages when available.
* Alternatively, you can set mime.types file path with `mime` option without detecting these default files
* pkg-config (or your OS equivalent)
2. Then compile from master via the following commands:
* NOTE
If you have any trouble about details on required packages, see `INSTALL_PACKAGES` in [linux-ci-helper.sh](https://github.com/s3fs-fuse/s3fs-fuse/blob/master/.github/workflows/linux-ci-helper.sh).
### Then compile from master via the following commands:
1. Clone the source code:
```sh
git clone https://github.com/s3fs-fuse/s3fs-fuse.git
```
2. Configuration:
```sh
cd s3fs-fuse
./autogen.sh
./configure
```
Depending on the TLS library (OpenSSL/GnuTLS/NSS), add `--with-openssl`, `--with-gnutls` or `--with-nss` when executing `configure`. (If omitted, it is equivalent to `--with-openssl`.)
3. Bulding:
```sh
make
```
4. Installing:
```sh
sudo make install
```
### NOTE - The required libraries/components required to run s3fs are:
* fuse >= 2.8.4
* libcurl
* libxml2
* openssl/gnutls/nss
* mime.types (the package providing depends on the OS)
```
git clone https://github.com/s3fs-fuse/s3fs-fuse.git
cd s3fs-fuse
./autogen.sh
./configure
make
sudo make install
```
## Compilation on Windows (using MSYS2)

View File

@ -1,6 +1,23 @@
ChangeLog for S3FS
------------------
Version 1.94 -- 23 Feb, 2024 (major changes only)
#2409 - Fixed a bug that mounting with ksmid specified to fail
#2404 - Fixed ordering problem between fdatasync and flush
#2399 - Fixed ListBucket/IAM edge cases
#2376 - Corrected list_bucket to search in stat cache during creating new file
#2369 - Make dir size 4096 not 0
#2351 - Added option free_space_ratio to control cache size
#2325 - Fixed a bug upload boundary calculation in StreamUpload
#2298 - Abort MPU when MPU fails to avoid litter
#2261 - Use explicit ownership for memory
#2179 - Require C++11
Version 1.93 -- 19 Jul, 2023 (major changes only)
#2212 - Allow listing implicit directories
#2194 - #2209 - #2211 - #2214 - #2215 - Fix thread safety issues
#2191 - #2201 - Add support for FUSE-T on macOS
Version 1.92 -- 21 May, 2023 (major changes only)
#1802 - #2104 - New option: streamupload
#1922 - Enable noobj_cache by default

View File

@ -34,7 +34,7 @@ release : dist ../utils/release.sh
cppcheck:
cppcheck --quiet --error-exitcode=1 \
--inline-suppr \
--std=c++03 \
--std=c++11 \
--xml \
-D HAVE_ATTR_XATTR_H \
-D HAVE_SYS_EXTATTR_H \
@ -44,6 +44,8 @@ cppcheck:
--enable=warning,style,information,missingInclude \
--suppress=missingIncludeSystem \
--suppress=unmatchedSuppression \
--suppress=useStlAlgorithm \
--suppress=checkLevelNormal \
src/ test/
#
@ -80,6 +82,6 @@ shellcheck:
# tab-width: 4
# c-basic-offset: 4
# End:
# vim600: expandtab sw=4 ts= fdm=marker
# vim600: expandtab sw=4 ts=4 fdm=marker
# vim<600: expandtab sw=4 ts=4
#

View File

@ -4,9 +4,11 @@ s3fs allows Linux, macOS, and FreeBSD to mount an S3 bucket via [FUSE(Filesystem
s3fs makes you operate files and directories in S3 bucket like a local file system.
s3fs preserves the native object format for files, allowing use of other tools like [AWS CLI](https://github.com/aws/aws-cli).
[![s3fs-fuse CI](https://github.com/s3fs-fuse/s3fs-fuse/workflows/s3fs-fuse%20CI/badge.svg)](https://github.com/s3fs-fuse/s3fs-fuse/actions)
[![s3fs-fuse CI](https://github.com/s3fs-fuse/s3fs-fuse/actions/workflows/ci.yml/badge.svg)](https://github.com/s3fs-fuse/s3fs-fuse/actions/workflows/ci.yml)
[![Twitter Follow](https://img.shields.io/twitter/follow/s3fsfuse.svg?style=social&label=Follow)](https://twitter.com/s3fsfuse)
![s3fs-fuse](https://github.com/ggtakec/s3fs-fuse-images/blob/master/images/s3fslogo.png)
## Features
* large subset of POSIX including reading/writing files, directories, symlinks, mode, uid/gid, and extended attributes
@ -93,6 +95,8 @@ Otherwise consult the [compilation instructions](COMPILATION.md).
s3fs supports the standard
[AWS credentials file](https://docs.aws.amazon.com/cli/latest/userguide/cli-config-files.html)
stored in `${HOME}/.aws/credentials`. Alternatively, s3fs supports a custom passwd file.
Finally s3fs recognizes the `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, and `AWS_SESSION_TOKEN`
environment variables.
The default location for the s3fs password file can be created:

View File

@ -20,7 +20,7 @@
dnl Process this file with autoconf to produce a configure script.
AC_PREREQ([2.69])
AC_INIT([s3fs],[1.92])
AC_INIT([s3fs],[1.94])
AC_CONFIG_HEADER([config.h])
AC_CANONICAL_TARGET
@ -34,7 +34,7 @@ AC_CHECK_HEADERS([attr/xattr.h])
AC_CHECK_HEADERS([sys/extattr.h])
AC_CHECK_FUNCS([fallocate])
CXXFLAGS="$CXXFLAGS -Wall -fno-exceptions -D_FILE_OFFSET_BITS=64 -D_FORTIFY_SOURCE=2"
CXXFLAGS="-Wall -fno-exceptions -D_FILE_OFFSET_BITS=64 -D_FORTIFY_SOURCE=3 -std=c++11 $CXXFLAGS"
dnl ----------------------------------------------
dnl For macOS
@ -48,6 +48,7 @@ case "$target" in
*-darwin* )
# Do something specific for mac
min_fuse_version=2.7.3
min_fuse_t_version=1.0.20
;;
*)
# Default Case
@ -56,6 +57,18 @@ case "$target" in
;;
esac
dnl ----------------------------------------------
dnl Checking the FUSE library
dnl ----------------------------------------------
dnl Distinguish between Linux (libfuse) and macOS (FUSE-T).
dnl
found_fuse_t=no
PKG_CHECK_MODULES([FUSE_T], [fuse-t >= ${min_fuse_t_version}], [found_fuse_t=yes], [found_fuse_t=no])
AS_IF([test "$found_fuse_t" = "yes"],
[PKG_CHECK_MODULES([fuse_library_checking], [fuse-t >= ${min_fuse_t_version}])],
[PKG_CHECK_MODULES([fuse_library_checking], [fuse >= ${min_fuse_version}])])
dnl ----------------------------------------------
dnl Choice SSL library
dnl ----------------------------------------------
@ -181,15 +194,16 @@ AS_IF(
dnl
dnl For PKG_CONFIG before checking nss/gnutls.
dnl this is redundant checking, but we need checking before following.
dnl
PKG_CHECK_MODULES([common_lib_checking], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 ])
AC_MSG_CHECKING([compile s3fs with])
case "${auth_lib}" in
openssl)
AC_MSG_RESULT(OpenSSL)
PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 libcrypto >= 0.9 ])
AS_IF([test "$found_fuse_t" = "yes"],
[PKG_CHECK_MODULES([DEPS], [fuse-t >= ${min_fuse_t_version} libcurl >= 7.0 libxml-2.0 >= 2.6 libcrypto >= 0.9 ])],
[PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 libcrypto >= 0.9 ])])
AC_MSG_CHECKING([openssl 3.0 or later])
AC_COMPILE_IFELSE(
[AC_LANG_PROGRAM([[#include <openssl/opensslv.h>
@ -206,7 +220,9 @@ gnutls)
AS_IF([test "$gnutls_nettle" = ""], [AC_CHECK_LIB(gcrypt, gcry_control, [gnutls_nettle=0])])
AS_IF([test $gnutls_nettle = 0],
[
PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 gnutls >= 2.12.0 ])
AS_IF([test "$found_fuse_t" = "yes"],
[PKG_CHECK_MODULES([DEPS], [fuse-t >= ${min_fuse_t_version} libcurl >= 7.0 libxml-2.0 >= 2.6 gnutls >= 2.12.0 ])],
[PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 gnutls >= 2.12.0 ])])
LIBS="-lgnutls -lgcrypt $LIBS"
AC_MSG_CHECKING([gnutls is build with])
AC_MSG_RESULT(gcrypt)
@ -220,7 +236,9 @@ nettle)
AS_IF([test "$gnutls_nettle" = ""], [AC_CHECK_LIB(nettle, nettle_MD5Init, [gnutls_nettle=1])])
AS_IF([test $gnutls_nettle = 1],
[
PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 nettle >= 2.7.1 ])
AS_IF([test "$found_fuse_t" = "yes"],
[PKG_CHECK_MODULES([DEPS], [fuse-t >= ${min_fuse_t_version} libcurl >= 7.0 libxml-2.0 >= 2.6 nettle >= 2.7.1 ])],
[PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 nettle >= 2.7.1 ])])
LIBS="-lgnutls -lnettle $LIBS"
AC_MSG_CHECKING([gnutls is build with])
AC_MSG_RESULT(nettle)
@ -229,7 +247,9 @@ nettle)
;;
nss)
AC_MSG_RESULT(NSS)
PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 nss >= 3.15.0 ])
AS_IF([test "$found_fuse_t" = "yes"],
[PKG_CHECK_MODULES([DEPS], [fuse-t >= ${min_fuse_t_version} libcurl >= 7.0 libxml-2.0 >= 2.6 nss >= 3.15.0 ])],
[PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 nss >= 3.15.0 ])])
;;
*)
AC_MSG_ERROR([unknown ssl library type.])

View File

@ -110,6 +110,7 @@ You can use "k" for short "kmsid".
If you san specify SSE-KMS type with your <kms id> in AWS KMS, you can set it after "kmsid:" (or "k:").
If you specify only "kmsid" ("k"), you need to set AWSSSEKMSID environment which value is <kms id>.
You must be careful about that you can not use the KMS id which is not same EC2 region.
Additionally, if you specify SSE-KMS, your endpoints must use Secure Sockets Layer(SSL) or Transport Layer Security(TLS).
.TP
\fB\-o\fR load_sse_c - specify SSE-C keys
Specify the custom-provided encryption keys file path for decrypting at downloading.
@ -178,6 +179,18 @@ server certificate won't be checked against the available certificate authoritie
\fB\-o\fR ssl_verify_hostname (default="2")
When 0, do not verify the SSL certificate against the hostname.
.TP
\fB\-o\fR ssl_client_cert (default="")
Specify an SSL client certificate.
Specify this optional parameter in the following format:
"<SSL Cert>[:<Cert Type>[:<Private Key>[:<Key Type>
[:<Password>]]]]"
<SSL Cert>: Client certificate.
Specify the file path or NickName(for NSS, etc.).
<Cert Type>: Type of certificate, default is "PEM"(optional).
<Private Key>: Certificate's private key file(optional).
<Key Type>: Type of private key, default is "PEM"(optional).
<Password>: Passphrase of the private key(optional). It is also possible to omit this value and specify it using the environment variable "S3FS_SSL_PRIVKEY_PASSWORD".
.TP
\fB\-o\fR nodnscache - disable DNS cache.
s3fs is always using DNS cache, this option make DNS cache disable.
.TP
@ -227,6 +240,12 @@ sets MB to ensure disk free space. This option means the threshold of free space
s3fs makes file for downloading, uploading and caching files.
If the disk free space is smaller than this value, s3fs do not use disk space as possible in exchange for the performance.
.TP
\fB\-o\fR free_space_ratio (default="10")
sets min free space ratio of the disk. The value of this option can be between 0 and 100. It will control
the size of the cache according to this ratio to ensure that the idle ratio of the disk is greater than this value.
For example, when the disk space is 50GB, the default value will
ensure that the disk will reserve at least 50GB * 10%% = 5GB of remaining space.
.TP
\fB\-o\fR multipart_threshold (default="25")
threshold, in MB, to use multipart upload instead of
single-part. Must be at least 5 MB.
@ -411,6 +430,12 @@ Username and passphrase are valid only for HTTP schema.
If the HTTP proxy does not require authentication, this option is not required.
Separate the username and passphrase with a ':' character and specify each as a URL-encoded string.
.TP
\fB\-o\fR ipresolve (default="whatever")
Select what type of IP addresses to use when establishing a connection.
Default('whatever') can use addresses of all IP versions(IPv4 and IPv6) that your system allows.
If you specify 'IPv4', only IPv4 addresses are used.
And when 'IPv6' is specified, only IPv6 addresses will be used.
.TP
\fB\-o\fR logfile - specify the log output file.
s3fs outputs the log file to syslog. Alternatively, if s3fs is started with the "-f" option specified, the log will be output to the stdout/stderr.
You can use this option to specify the log file that s3fs outputs.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.3 KiB

View File

@ -35,12 +35,10 @@ s3fs_SOURCES = \
s3fs_xml.cpp \
metaheader.cpp \
mpu_util.cpp \
mvnode.cpp \
curl.cpp \
curl_handlerpool.cpp \
curl_multi.cpp \
curl_util.cpp \
bodydata.cpp \
s3objlist.cpp \
cache.cpp \
string_util.cpp \

View File

@ -34,7 +34,7 @@
//-------------------------------------------------------------------
// Symbols
//-------------------------------------------------------------------
#define ADD_HEAD_REGEX "reg:"
static constexpr char ADD_HEAD_REGEX[] = "reg:";
//-------------------------------------------------------------------
// Class AdditionalHeader
@ -65,7 +65,7 @@ AdditionalHeader::~AdditionalHeader()
bool AdditionalHeader::Load(const char* file)
{
if(!file){
S3FS_PRN_WARN("file is NULL.");
S3FS_PRN_WARN("file is nullptr.");
return false;
}
Unload();
@ -78,7 +78,6 @@ bool AdditionalHeader::Load(const char* file)
// read file
std::string line;
ADDHEAD *paddhead;
while(getline(AH, line)){
if(line.empty()){
continue;
@ -111,49 +110,32 @@ bool AdditionalHeader::Load(const char* file)
return false;
}
paddhead = new ADDHEAD;
if(0 == strncasecmp(key.c_str(), ADD_HEAD_REGEX, strlen(ADD_HEAD_REGEX))){
// regex
if(key.size() <= strlen(ADD_HEAD_REGEX)){
S3FS_PRN_ERR("file format error: %s key(suffix) does not have key std::string.", key.c_str());
delete paddhead;
continue;
}
key.erase(0, strlen(ADD_HEAD_REGEX));
// compile
regex_t* preg = new regex_t;
int result;
if(0 != (result = regcomp(preg, key.c_str(), REG_EXTENDED | REG_NOSUB))){ // we do not need matching info
char errbuf[256];
regerror(result, preg, errbuf, sizeof(errbuf));
S3FS_PRN_ERR("failed to compile regex from %s key by %s.", key.c_str(), errbuf);
delete preg;
delete paddhead;
continue;
}
// set
paddhead->pregex = preg;
paddhead->basestring = key;
paddhead->headkey = head;
paddhead->headvalue = value;
// compile
std::unique_ptr<regex_t> preg(new regex_t);
int result;
if(0 != (result = regcomp(preg.get(), key.c_str(), REG_EXTENDED | REG_NOSUB))){ // we do not need matching info
char errbuf[256];
regerror(result, preg.get(), errbuf, sizeof(errbuf));
S3FS_PRN_ERR("failed to compile regex from %s key by %s.", key.c_str(), errbuf);
continue;
}
addheadlist.emplace_back(std::move(preg), key, head, value);
}else{
// not regex, directly comparing
paddhead->pregex = NULL;
paddhead->basestring = key;
paddhead->headkey = head;
paddhead->headvalue = value;
addheadlist.emplace_back(nullptr, key, head, value);
}
// add list
addheadlist.push_back(paddhead);
// set flag
if(!is_enable){
is_enable = true;
}
is_enable = true;
}
return true;
}
@ -162,16 +144,6 @@ void AdditionalHeader::Unload()
{
is_enable = false;
for(addheadlist_t::iterator iter = addheadlist.begin(); iter != addheadlist.end(); ++iter){
ADDHEAD *paddhead = *iter;
if(paddhead){
if(paddhead->pregex){
regfree(paddhead->pregex);
delete paddhead->pregex;
}
delete paddhead;
}
}
addheadlist.clear();
}
@ -181,7 +153,7 @@ bool AdditionalHeader::AddHeader(headers_t& meta, const char* path) const
return true;
}
if(!path){
S3FS_PRN_WARN("path is NULL.");
S3FS_PRN_WARN("path is nullptr.");
return false;
}
@ -193,22 +165,19 @@ bool AdditionalHeader::AddHeader(headers_t& meta, const char* path) const
// Because to allow duplicate key, and then scanning the entire table.
//
for(addheadlist_t::const_iterator iter = addheadlist.begin(); iter != addheadlist.end(); ++iter){
const ADDHEAD *paddhead = *iter;
if(!paddhead){
continue;
}
const add_header *paddhead = &*iter;
if(paddhead->pregex){
// regex
regmatch_t match; // not use
if(0 == regexec(paddhead->pregex, path, 1, &match, 0)){
if(0 == regexec(paddhead->pregex.get(), path, 1, &match, 0)){
// match -> adding header
meta[paddhead->headkey] = paddhead->headvalue;
}
}else{
// directly comparing
if(paddhead->basestring.length() < pathlength){
if(paddhead->basestring.empty() || 0 == strcmp(&path[pathlength - paddhead->basestring.length()], paddhead->basestring.c_str())){
if(paddhead->basestring.empty() || paddhead->basestring == &path[pathlength - paddhead->basestring.length()]){
// match -> adding header
meta[paddhead->headkey] = paddhead->headvalue;
}
@ -246,19 +215,17 @@ bool AdditionalHeader::Dump() const
ssdbg << "Additional Header list[" << addheadlist.size() << "] = {" << std::endl;
for(addheadlist_t::const_iterator iter = addheadlist.begin(); iter != addheadlist.end(); ++iter, ++cnt){
const ADDHEAD *paddhead = *iter;
const add_header *paddhead = &*iter;
ssdbg << " [" << cnt << "] = {" << std::endl;
ssdbg << " [" << cnt << "] = {" << std::endl;
if(paddhead){
if(paddhead->pregex){
ssdbg << " type\t\t--->\tregex" << std::endl;
}else{
ssdbg << " type\t\t--->\tsuffix matching" << std::endl;
}
ssdbg << " base std::string\t--->\t" << paddhead->basestring << std::endl;
ssdbg << " add header\t--->\t" << paddhead->headkey << ": " << paddhead->headvalue << std::endl;
if(paddhead->pregex){
ssdbg << " type\t\t--->\tregex" << std::endl;
}else{
ssdbg << " type\t\t--->\tsuffix matching" << std::endl;
}
ssdbg << " base std::string\t--->\t" << paddhead->basestring << std::endl;
ssdbg << " add header\t--->\t" << paddhead->headkey << ": " << paddhead->headvalue << std::endl;
ssdbg << " }" << std::endl;
}

View File

@ -21,21 +21,40 @@
#ifndef S3FS_ADDHEAD_H_
#define S3FS_ADDHEAD_H_
#include <memory>
#include <regex.h>
#include <vector>
#include "metaheader.h"
//----------------------------------------------
// Structure / Typedef
//----------------------------------------------
typedef struct add_header{
regex_t* pregex; // not NULL means using regex, NULL means comparing suffix directly.
struct add_header{
add_header(std::unique_ptr<regex_t> pregex, std::string basestring, std::string headkey, std::string headvalue)
: pregex(std::move(pregex))
, basestring(std::move(basestring))
, headkey(std::move(headkey))
, headvalue(std::move(headvalue))
{}
~add_header() {
if(pregex){
regfree(pregex.get());
}
}
add_header(const add_header&) = delete;
add_header(add_header&& val) = default;
add_header& operator=(const add_header&) = delete;
add_header& operator=(add_header&&) = delete;
std::unique_ptr<regex_t> pregex; // not nullptr means using regex, nullptr means comparing suffix directly.
std::string basestring;
std::string headkey;
std::string headvalue;
}ADDHEAD;
};
typedef std::vector<ADDHEAD *> addheadlist_t;
typedef std::vector<add_header> addheadlist_t;
//----------------------------------------------
// Class AdditionalHeader
@ -50,6 +69,10 @@ class AdditionalHeader
protected:
AdditionalHeader();
~AdditionalHeader();
AdditionalHeader(const AdditionalHeader&) = delete;
AdditionalHeader(AdditionalHeader&&) = delete;
AdditionalHeader& operator=(const AdditionalHeader&) = delete;
AdditionalHeader& operator=(AdditionalHeader&&) = delete;
public:
// Reference singleton

View File

@ -40,7 +40,10 @@ class AutoLock
bool is_lock_acquired;
private:
AutoLock(const AutoLock&);
AutoLock(const AutoLock&) = delete;
AutoLock(AutoLock&&) = delete;
AutoLock& operator=(const AutoLock&) = delete;
AutoLock& operator=(AutoLock&&) = delete;
public:
explicit AutoLock(pthread_mutex_t* pmutex, Type type = NONE);

View File

@ -1,122 +0,0 @@
/*
* s3fs - FUSE-based file system backed by Amazon S3
*
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include "s3fs_logger.h"
#include "bodydata.h"
//-------------------------------------------------------------------
// Variables
//-------------------------------------------------------------------
static const int BODYDATA_RESIZE_APPEND_MIN = 1024;
static const int BODYDATA_RESIZE_APPEND_MID = 1024 * 1024;
static const int BODYDATA_RESIZE_APPEND_MAX = 10 * 1024 * 1024;
//-------------------------------------------------------------------
// Utility Functions
//-------------------------------------------------------------------
static size_t adjust_block(size_t bytes, size_t block)
{
return ((bytes / block) + ((bytes % block) ? 1 : 0)) * block;
}
//-------------------------------------------------------------------
// Class BodyData
//-------------------------------------------------------------------
bool BodyData::Resize(size_t addbytes)
{
if(IsSafeSize(addbytes)){
return true;
}
// New size
size_t need_size = adjust_block((lastpos + addbytes + 1) - bufsize, sizeof(off_t));
if(BODYDATA_RESIZE_APPEND_MAX < bufsize){
need_size = (BODYDATA_RESIZE_APPEND_MAX < need_size ? need_size : BODYDATA_RESIZE_APPEND_MAX);
}else if(BODYDATA_RESIZE_APPEND_MID < bufsize){
need_size = (BODYDATA_RESIZE_APPEND_MID < need_size ? need_size : BODYDATA_RESIZE_APPEND_MID);
}else if(BODYDATA_RESIZE_APPEND_MIN < bufsize){
need_size = ((bufsize * 2) < need_size ? need_size : (bufsize * 2));
}else{
need_size = (BODYDATA_RESIZE_APPEND_MIN < need_size ? need_size : BODYDATA_RESIZE_APPEND_MIN);
}
// realloc
char* newtext;
if(NULL == (newtext = reinterpret_cast<char*>(realloc(text, (bufsize + need_size))))){
S3FS_PRN_CRIT("not enough memory (realloc returned NULL)");
free(text);
text = NULL;
return false;
}
text = newtext;
bufsize += need_size;
return true;
}
void BodyData::Clear()
{
if(text){
free(text);
text = NULL;
}
lastpos = 0;
bufsize = 0;
}
bool BodyData::Append(void* ptr, size_t bytes)
{
if(!ptr){
return false;
}
if(0 == bytes){
return true;
}
if(!Resize(bytes)){
return false;
}
memcpy(&text[lastpos], ptr, bytes);
lastpos += bytes;
text[lastpos] = '\0';
return true;
}
const char* BodyData::str() const
{
if(!text){
static const char strnull[] = "";
return strnull;
}
return text;
}
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: expandtab sw=4 ts=4 fdm=marker
* vim<600: expandtab sw=4 ts=4
*/

View File

@ -1,72 +0,0 @@
/*
* s3fs - FUSE-based file system backed by Amazon S3
*
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef S3FS_BODYDATA_H_
#define S3FS_BODYDATA_H_
//----------------------------------------------
// Class BodyData
//----------------------------------------------
// memory class for curl write memory callback
//
class BodyData
{
private:
char* text;
size_t lastpos;
size_t bufsize;
private:
bool IsSafeSize(size_t addbytes) const
{
return ((lastpos + addbytes + 1) > bufsize ? false : true);
}
bool Resize(size_t addbytes);
public:
BodyData() : text(NULL), lastpos(0), bufsize(0) {}
~BodyData()
{
Clear();
}
void Clear();
bool Append(void* ptr, size_t bytes);
bool Append(void* ptr, size_t blockSize, size_t numBlocks)
{
return Append(ptr, (blockSize * numBlocks));
}
const char* str() const;
size_t size() const
{
return lastpos;
}
};
#endif // S3FS_BODYDATA_H_
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: expandtab sw=4 ts=4 fdm=marker
* vim<600: expandtab sw=4 ts=4
*/

View File

@ -84,9 +84,9 @@ struct sort_statiterlist{
// ascending order
bool operator()(const stat_cache_t::iterator& src1, const stat_cache_t::iterator& src2) const
{
int result = CompareStatCacheTime(src1->second->cache_date, src2->second->cache_date);
int result = CompareStatCacheTime(src1->second.cache_date, src2->second.cache_date);
if(0 == result){
if(src1->second->hit_count < src2->second->hit_count){
if(src1->second.hit_count < src2->second.hit_count){
result = -1;
}
}
@ -103,9 +103,9 @@ struct sort_symlinkiterlist{
// ascending order
bool operator()(const symlink_cache_t::iterator& src1, const symlink_cache_t::iterator& src2) const
{
int result = CompareStatCacheTime(src1->second->cache_date, src2->second->cache_date); // use the same as Stats
int result = CompareStatCacheTime(src1->second.cache_date, src2->second.cache_date); // use the same as Stats
if(0 == result){
if(src1->second->hit_count < src2->second->hit_count){
if(src1->second.hit_count < src2->second.hit_count){
result = -1;
}
}
@ -204,9 +204,6 @@ void StatCache::Clear()
{
AutoLock lock(&StatCache::stat_cache_lock);
for(stat_cache_t::iterator iter = stat_cache.begin(); iter != stat_cache.end(); ++iter){
delete (*iter).second;
}
stat_cache.clear();
S3FS_MALLOCTRIM(0);
}
@ -228,8 +225,8 @@ bool StatCache::GetStat(const std::string& key, struct stat* pst, headers_t* met
iter = stat_cache.find(strpath);
}
if(iter != stat_cache.end() && (*iter).second){
stat_cache_entry* ent = (*iter).second;
if(iter != stat_cache.end()){
stat_cache_entry* ent = &iter->second;
if(0 < ent->notruncate || !IsExpireTime || !IsExpireStatCacheTime(ent->cache_date, ExpireTime)){
if(ent->noobjcache){
if(!IsCacheNoObject){
@ -248,7 +245,7 @@ bool StatCache::GetStat(const std::string& key, struct stat* pst, headers_t* met
std::string tag = lower(hiter->first);
if(tag == "etag"){
stretag = hiter->second;
if('\0' != petag[0] && 0 != strcmp(petag, stretag.c_str())){
if('\0' != petag[0] && petag != stretag){
is_delete_cache = true;
}
break;
@ -264,13 +261,13 @@ bool StatCache::GetStat(const std::string& key, struct stat* pst, headers_t* met
S3FS_PRN_DBG("stat cache hit [path=%s][time=%lld.%09ld][hit count=%lu]",
strpath.c_str(), static_cast<long long>(ent->cache_date.tv_sec), ent->cache_date.tv_nsec, ent->hit_count);
if(pst!= NULL){
if(pst!= nullptr){
*pst= ent->stbuf;
}
if(meta != NULL){
if(meta != nullptr){
*meta = ent->meta;
}
if(pisforce != NULL){
if(pisforce != nullptr){
(*pisforce) = ent->isforce;
}
ent->hit_count++;
@ -314,12 +311,12 @@ bool StatCache::IsNoObjectCache(const std::string& key, bool overcheck)
iter = stat_cache.find(strpath);
}
if(iter != stat_cache.end() && (*iter).second) {
stat_cache_entry* ent = (*iter).second;
if(0 < ent->notruncate || !IsExpireTime || !IsExpireStatCacheTime((*iter).second->cache_date, ExpireTime)){
if((*iter).second->noobjcache){
if(iter != stat_cache.end()) {
const stat_cache_entry* ent = &iter->second;
if(0 < ent->notruncate || !IsExpireTime || !IsExpireStatCacheTime(iter->second.cache_date, ExpireTime)){
if(iter->second.noobjcache){
// noobjcache = true means no object.
SetStatCacheTime((*iter).second->cache_date);
SetStatCacheTime((*iter).second.cache_date);
return true;
}
}else{
@ -341,69 +338,66 @@ bool StatCache::AddStat(const std::string& key, const headers_t& meta, bool forc
}
S3FS_PRN_INFO3("add stat cache entry[path=%s]", key.c_str());
bool found;
bool do_truncate;
{
AutoLock lock(&StatCache::stat_cache_lock);
found = stat_cache.end() != stat_cache.find(key);
do_truncate = stat_cache.size() > CacheSize;
}
AutoLock lock(&StatCache::stat_cache_lock);
if(found){
DelStat(key.c_str());
if(stat_cache.end() != stat_cache.find(key)){
// found cache
DelStat(key.c_str(), AutoLock::ALREADY_LOCKED);
}else{
if(do_truncate){
if(!TruncateCache()){
// check: need to truncate cache
if(stat_cache.size() > CacheSize){
// cppcheck-suppress unmatchedSuppression
// cppcheck-suppress knownConditionTrueFalse
if(!TruncateCache(AutoLock::ALREADY_LOCKED)){
return false;
}
}
}
// make new
stat_cache_entry* ent = new stat_cache_entry();
if(!convert_header_to_stat(key.c_str(), meta, &(ent->stbuf), forcedir)){
delete ent;
stat_cache_entry ent;
if(!convert_header_to_stat(key.c_str(), meta, &ent.stbuf, forcedir)){
return false;
}
ent->hit_count = 0;
ent->isforce = forcedir;
ent->noobjcache = false;
ent->notruncate = (no_truncate ? 1L : 0L);
ent->meta.clear();
SetStatCacheTime(ent->cache_date); // Set time.
ent.hit_count = 0;
ent.isforce = forcedir;
ent.noobjcache = false;
ent.notruncate = (no_truncate ? 1L : 0L);
ent.meta.clear();
SetStatCacheTime(ent.cache_date); // Set time.
//copy only some keys
for(headers_t::const_iterator iter = meta.begin(); iter != meta.end(); ++iter){
std::string tag = lower(iter->first);
std::string value = iter->second;
if(tag == "content-type"){
ent->meta[iter->first] = value;
ent.meta[iter->first] = value;
}else if(tag == "content-length"){
ent->meta[iter->first] = value;
ent.meta[iter->first] = value;
}else if(tag == "etag"){
ent->meta[iter->first] = value;
ent.meta[iter->first] = value;
}else if(tag == "last-modified"){
ent->meta[iter->first] = value;
ent.meta[iter->first] = value;
}else if(is_prefix(tag.c_str(), "x-amz")){
ent->meta[tag] = value; // key is lower case for "x-amz"
ent.meta[tag] = value; // key is lower case for "x-amz"
}
}
// add
AutoLock lock(&StatCache::stat_cache_lock);
std::pair<stat_cache_t::iterator, bool> pair = stat_cache.insert(std::make_pair(key, ent));
if(!pair.second){
delete pair.first->second;
pair.first->second = ent;
}
const auto& value = stat_cache[key] = std::move(ent);
// check symbolic link cache
if(!S_ISLNK(ent->stbuf.st_mode)){
if(!S_ISLNK(value.stbuf.st_mode)){
if(symlink_cache.end() != symlink_cache.find(key)){
// if symbolic link cache has key, thus remove it.
DelSymlink(key.c_str(), AutoLock::ALREADY_LOCKED);
}
}
// If no_truncate flag is set, set file name to notruncate_file_cache
//
if(no_truncate){
AddNotruncateCache(key);
}
return true;
}
@ -414,7 +408,7 @@ bool StatCache::AddStat(const std::string& key, const headers_t& meta, bool forc
// Since the file mode may change while the file is open, it is
// updated as well.
//
bool StatCache::UpdateMetaStats(const std::string& key, headers_t& meta)
bool StatCache::UpdateMetaStats(const std::string& key, const headers_t& meta)
{
if(CacheSize < 1){
return true;
@ -423,13 +417,13 @@ bool StatCache::UpdateMetaStats(const std::string& key, headers_t& meta)
AutoLock lock(&StatCache::stat_cache_lock);
stat_cache_t::iterator iter = stat_cache.find(key);
if(stat_cache.end() == iter || !(iter->second)){
if(stat_cache.end() == iter){
return true;
}
stat_cache_entry* ent = iter->second;
stat_cache_entry* ent = &iter->second;
// update only meta keys
for(headers_t::iterator metaiter = meta.begin(); metaiter != meta.end(); ++metaiter){
for(headers_t::const_iterator metaiter = meta.begin(); metaiter != meta.end(); ++metaiter){
std::string tag = lower(metaiter->first);
std::string value = metaiter->second;
if(tag == "content-type"){
@ -464,42 +458,33 @@ bool StatCache::AddNoObjectCache(const std::string& key)
}
S3FS_PRN_INFO3("add no object cache entry[path=%s]", key.c_str());
bool found;
bool do_truncate;
{
AutoLock lock(&StatCache::stat_cache_lock);
found = stat_cache.end() != stat_cache.find(key);
do_truncate = stat_cache.size() > CacheSize;
}
AutoLock lock(&StatCache::stat_cache_lock);
if(found){
DelStat(key.c_str());
if(stat_cache.end() != stat_cache.find(key)){
// found
DelStat(key.c_str(), AutoLock::ALREADY_LOCKED);
}else{
if(do_truncate){
if(!TruncateCache()){
// check: need to truncate cache
if(stat_cache.size() > CacheSize){
// cppcheck-suppress unmatchedSuppression
// cppcheck-suppress knownConditionTrueFalse
if(!TruncateCache(AutoLock::ALREADY_LOCKED)){
return false;
}
}
}
// make new
stat_cache_entry* ent = new stat_cache_entry();
memset(&(ent->stbuf), 0, sizeof(struct stat));
ent->hit_count = 0;
ent->isforce = false;
ent->noobjcache = true;
ent->notruncate = 0L;
ent->meta.clear();
SetStatCacheTime(ent->cache_date); // Set time.
stat_cache_entry ent;
memset(&ent.stbuf, 0, sizeof(struct stat));
ent.hit_count = 0;
ent.isforce = false;
ent.noobjcache = true;
ent.notruncate = 0L;
ent.meta.clear();
SetStatCacheTime(ent.cache_date); // Set time.
// add
AutoLock lock(&StatCache::stat_cache_lock);
std::pair<stat_cache_t::iterator, bool> pair = stat_cache.insert(std::make_pair(key, ent));
if(!pair.second){
delete pair.first->second;
pair.first->second = ent;
}
stat_cache[key] = std::move(ent);
// check symbolic link cache
if(symlink_cache.end() != symlink_cache.find(key)){
@ -515,22 +500,28 @@ void StatCache::ChangeNoTruncateFlag(const std::string& key, bool no_truncate)
stat_cache_t::iterator iter = stat_cache.find(key);
if(stat_cache.end() != iter){
stat_cache_entry* ent = iter->second;
if(ent){
if(no_truncate){
++(ent->notruncate);
}else{
if(0L < ent->notruncate){
--(ent->notruncate);
stat_cache_entry* ent = &iter->second;
if(no_truncate){
if(0L == ent->notruncate){
// need to add no truncate cache.
AddNotruncateCache(key);
}
++(ent->notruncate);
}else{
if(0L < ent->notruncate){
--(ent->notruncate);
if(0L == ent->notruncate){
// need to delete from no truncate cache.
DelNotruncateCache(key);
}
}
}
}
}
bool StatCache::TruncateCache()
bool StatCache::TruncateCache(AutoLock::Type locktype)
{
AutoLock lock(&StatCache::stat_cache_lock);
AutoLock lock(&StatCache::stat_cache_lock, locktype);
if(stat_cache.empty()){
return true;
@ -539,10 +530,9 @@ bool StatCache::TruncateCache()
// 1) erase over expire time
if(IsExpireTime){
for(stat_cache_t::iterator iter = stat_cache.begin(); iter != stat_cache.end(); ){
stat_cache_entry* entry = iter->second;
if(!entry || (0L == entry->notruncate && IsExpireStatCacheTime(entry->cache_date, ExpireTime))){
delete entry;
stat_cache.erase(iter++);
const stat_cache_entry* entry = &iter->second;
if(0L == entry->notruncate && IsExpireStatCacheTime(entry->cache_date, ExpireTime)){
iter = stat_cache.erase(iter);
}else{
++iter;
}
@ -559,8 +549,8 @@ bool StatCache::TruncateCache()
statiterlist_t erase_iters;
for(stat_cache_t::iterator iter = stat_cache.begin(); iter != stat_cache.end() && 0 < erase_count; ++iter){
// check no truncate
stat_cache_entry* ent = iter->second;
if(ent && 0L < ent->notruncate){
const stat_cache_entry* ent = &iter->second;
if(0L < ent->notruncate){
// skip for no truncate entry and keep extra counts for this entity.
if(0 < erase_count){
--erase_count; // decrement
@ -580,7 +570,6 @@ bool StatCache::TruncateCache()
stat_cache_t::iterator siter = *iiter;
S3FS_PRN_DBG("truncate stat cache[path=%s]", siter->first.c_str());
delete siter->second;
stat_cache.erase(siter);
}
S3FS_MALLOCTRIM(0);
@ -598,9 +587,9 @@ bool StatCache::DelStat(const char* key, AutoLock::Type locktype)
AutoLock lock(&StatCache::stat_cache_lock, locktype);
stat_cache_t::iterator iter;
if(stat_cache.end() != (iter = stat_cache.find(std::string(key)))){
delete (*iter).second;
if(stat_cache.end() != (iter = stat_cache.find(key))){
stat_cache.erase(iter);
DelNotruncateCache(key);
}
if(0 < strlen(key) && 0 != strcmp(key, "/")){
std::string strpath = key;
@ -612,8 +601,8 @@ bool StatCache::DelStat(const char* key, AutoLock::Type locktype)
strpath += "/";
}
if(stat_cache.end() != (iter = stat_cache.find(strpath))){
delete (*iter).second;
stat_cache.erase(iter);
DelNotruncateCache(strpath);
}
}
S3FS_MALLOCTRIM(0);
@ -629,8 +618,8 @@ bool StatCache::GetSymlink(const std::string& key, std::string& value)
AutoLock lock(&StatCache::stat_cache_lock);
symlink_cache_t::iterator iter = symlink_cache.find(strpath);
if(iter != symlink_cache.end() && iter->second){
symlink_cache_entry* ent = iter->second;
if(iter != symlink_cache.end()){
symlink_cache_entry* ent = &iter->second;
if(!IsExpireTime || !IsExpireStatCacheTime(ent->cache_date, ExpireTime)){ // use the same as Stats
// found
S3FS_PRN_DBG("symbolic link cache hit [path=%s][time=%lld.%09ld][hit count=%lu]",
@ -662,45 +651,36 @@ bool StatCache::AddSymlink(const std::string& key, const std::string& value)
}
S3FS_PRN_INFO3("add symbolic link cache entry[path=%s, value=%s]", key.c_str(), value.c_str());
bool found;
bool do_truncate;
{
AutoLock lock(&StatCache::stat_cache_lock);
found = symlink_cache.end() != symlink_cache.find(key);
do_truncate = symlink_cache.size() > CacheSize;
}
AutoLock lock(&StatCache::stat_cache_lock);
if(found){
DelSymlink(key.c_str());
if(symlink_cache.end() != symlink_cache.find(key)){
// found
DelSymlink(key.c_str(), AutoLock::ALREADY_LOCKED);
}else{
if(do_truncate){
if(!TruncateSymlink()){
// check: need to truncate cache
if(symlink_cache.size() > CacheSize){
// cppcheck-suppress unmatchedSuppression
// cppcheck-suppress knownConditionTrueFalse
if(!TruncateSymlink(AutoLock::ALREADY_LOCKED)){
return false;
}
}
}
// make new
symlink_cache_entry* ent = new symlink_cache_entry();
ent->link = value;
ent->hit_count = 0;
SetStatCacheTime(ent->cache_date); // Set time(use the same as Stats).
symlink_cache_entry ent;
ent.link = value;
ent.hit_count = 0;
SetStatCacheTime(ent.cache_date); // Set time(use the same as Stats).
// add
AutoLock lock(&StatCache::stat_cache_lock);
std::pair<symlink_cache_t::iterator, bool> pair = symlink_cache.insert(std::make_pair(key, ent));
if(!pair.second){
delete pair.first->second;
pair.first->second = ent;
}
symlink_cache[key] = std::move(ent);
return true;
}
bool StatCache::TruncateSymlink()
bool StatCache::TruncateSymlink(AutoLock::Type locktype)
{
AutoLock lock(&StatCache::stat_cache_lock);
AutoLock lock(&StatCache::stat_cache_lock, locktype);
if(symlink_cache.empty()){
return true;
@ -709,10 +689,9 @@ bool StatCache::TruncateSymlink()
// 1) erase over expire time
if(IsExpireTime){
for(symlink_cache_t::iterator iter = symlink_cache.begin(); iter != symlink_cache.end(); ){
symlink_cache_entry* entry = iter->second;
if(!entry || IsExpireStatCacheTime(entry->cache_date, ExpireTime)){ // use the same as Stats
delete entry;
symlink_cache.erase(iter++);
const symlink_cache_entry* entry = &iter->second;
if(IsExpireStatCacheTime(entry->cache_date, ExpireTime)){ // use the same as Stats
iter = symlink_cache.erase(iter);
}else{
++iter;
}
@ -738,7 +717,6 @@ bool StatCache::TruncateSymlink()
symlink_cache_t::iterator siter = *iiter;
S3FS_PRN_DBG("truncate symbolic link cache[path=%s]", siter->first.c_str());
delete siter->second;
symlink_cache.erase(siter);
}
S3FS_MALLOCTRIM(0);
@ -756,8 +734,7 @@ bool StatCache::DelSymlink(const char* key, AutoLock::Type locktype)
AutoLock lock(&StatCache::stat_cache_lock, locktype);
symlink_cache_t::iterator iter;
if(symlink_cache.end() != (iter = symlink_cache.find(std::string(key)))){
delete iter->second;
if(symlink_cache.end() != (iter = symlink_cache.find(key))){
symlink_cache.erase(iter);
}
S3FS_MALLOCTRIM(0);
@ -765,6 +742,116 @@ bool StatCache::DelSymlink(const char* key, AutoLock::Type locktype)
return true;
}
// [NOTE]
// Need to lock StatCache::stat_cache_lock before calling this method.
//
bool StatCache::AddNotruncateCache(const std::string& key)
{
if(key.empty() || '/' == *key.rbegin()){
return false;
}
std::string parentdir = mydirname(key);
std::string filename = mybasename(key);
if(parentdir.empty() || filename.empty()){
return false;
}
parentdir += '/'; // directory path must be '/' termination.
notruncate_dir_map_t::iterator iter = notruncate_file_cache.find(parentdir);
if(iter == notruncate_file_cache.end()){
// add new list
notruncate_filelist_t list;
list.push_back(filename);
notruncate_file_cache[parentdir] = list;
}else{
// add filename to existed list
notruncate_filelist_t& filelist = iter->second;
notruncate_filelist_t::const_iterator fiter = std::find(filelist.begin(), filelist.end(), filename);
if(fiter == filelist.end()){
filelist.push_back(filename);
}
}
return true;
}
// [NOTE]
// Need to lock StatCache::stat_cache_lock before calling this method.
//
bool StatCache::DelNotruncateCache(const std::string& key)
{
if(key.empty() || '/' == *key.rbegin()){
return false;
}
std::string parentdir = mydirname(key);
std::string filename = mybasename(key);
if(parentdir.empty() || filename.empty()){
return false;
}
parentdir += '/'; // directory path must be '/' termination.
notruncate_dir_map_t::iterator iter = notruncate_file_cache.find(parentdir);
if(iter != notruncate_file_cache.end()){
// found directory in map
notruncate_filelist_t& filelist = iter->second;
notruncate_filelist_t::iterator fiter = std::find(filelist.begin(), filelist.end(), filename);
if(fiter != filelist.end()){
// found filename in directory file list
filelist.erase(fiter);
if(filelist.empty()){
notruncate_file_cache.erase(parentdir);
}
}
}
return true;
}
// [Background]
// When s3fs creates a new file, the file does not exist until the file contents
// are uploaded.(because it doesn't create a 0 byte file)
// From the time this file is created(opened) until it is uploaded(flush), it
// will have a Stat cache with the No truncate flag added.
// This avoids file not existing errors in operations such as chmod and utimens
// that occur in the short period before file upload.
// Besides this, we also need to support readdir(list_bucket), this method is
// called to maintain the cache for readdir and return its value.
//
// [NOTE]
// Add the file names under parentdir to the list.
// However, if the same file name exists in the list, it will not be added.
// parentdir must be terminated with a '/'.
//
bool StatCache::GetNotruncateCache(const std::string& parentdir, notruncate_filelist_t& list)
{
if(parentdir.empty()){
return false;
}
std::string dirpath = parentdir;
if('/' != *dirpath.rbegin()){
dirpath += '/';
}
AutoLock lock(&StatCache::stat_cache_lock);
notruncate_dir_map_t::iterator iter = notruncate_file_cache.find(dirpath);
if(iter == notruncate_file_cache.end()){
// not found directory map
return true;
}
// found directory in map
const notruncate_filelist_t& filelist = iter->second;
for(notruncate_filelist_t::const_iterator fiter = filelist.begin(); fiter != filelist.end(); ++fiter){
if(list.end() == std::find(list.begin(), list.end(), *fiter)){
// found notuncate file that does not exist in the list, so add it.
list.push_back(*fiter);
}
}
return true;
}
//-------------------------------------------------------------------
// Functions
//-------------------------------------------------------------------
@ -795,7 +882,7 @@ bool convert_header_to_stat(const char* path, const headers_t& meta, struct stat
mtime.tv_sec = 0;
mtime.tv_nsec = 0;
}
set_timespec_to_stat(*pst, ST_TYPE_MTIME, mtime);
set_timespec_to_stat(*pst, stat_time_type::MTIME, mtime);
}
// ctime
@ -807,7 +894,7 @@ bool convert_header_to_stat(const char* path, const headers_t& meta, struct stat
ctime.tv_sec = 0;
ctime.tv_nsec = 0;
}
set_timespec_to_stat(*pst, ST_TYPE_CTIME, ctime);
set_timespec_to_stat(*pst, stat_time_type::CTIME, ctime);
}
// atime
@ -819,11 +906,15 @@ bool convert_header_to_stat(const char* path, const headers_t& meta, struct stat
atime.tv_sec = 0;
atime.tv_nsec = 0;
}
set_timespec_to_stat(*pst, ST_TYPE_ATIME, atime);
set_timespec_to_stat(*pst, stat_time_type::ATIME, atime);
}
// size
pst->st_size = get_size(meta);
if(S_ISDIR(pst->st_mode)){
pst->st_size = 4096;
}else{
pst->st_size = get_size(meta);
}
// uid/gid
pst->st_uid = get_uid(meta);

View File

@ -50,7 +50,7 @@ struct stat_cache_entry {
}
};
typedef std::map<std::string, stat_cache_entry*> stat_cache_t; // key=path
typedef std::map<std::string, stat_cache_entry> stat_cache_t; // key=path
//
// Struct for symbolic link cache
@ -67,7 +67,13 @@ struct symlink_cache_entry {
}
};
typedef std::map<std::string, symlink_cache_entry*> symlink_cache_t;
typedef std::map<std::string, symlink_cache_entry> symlink_cache_t;
//
// Typedefs for No truncate file name cache
//
typedef std::vector<std::string> notruncate_filelist_t; // untruncated file name list in dir
typedef std::map<std::string, notruncate_filelist_t> notruncate_dir_map_t; // key is parent dir path
//-------------------------------------------------------------------
// Class StatCache
@ -93,6 +99,7 @@ class StatCache
unsigned long CacheSize;
bool IsCacheNoObject;
symlink_cache_t symlink_cache;
notruncate_dir_map_t notruncate_file_cache;
private:
StatCache();
@ -101,9 +108,12 @@ class StatCache
void Clear();
bool GetStat(const std::string& key, struct stat* pst, headers_t* meta, bool overcheck, const char* petag, bool* pisforce);
// Truncate stat cache
bool TruncateCache();
bool TruncateCache(AutoLock::Type locktype = AutoLock::NONE);
// Truncate symbolic link cache
bool TruncateSymlink();
bool TruncateSymlink(AutoLock::Type locktype = AutoLock::NONE);
bool AddNotruncateCache(const std::string& key);
bool DelNotruncateCache(const std::string& key);
public:
// Reference singleton
@ -133,29 +143,29 @@ class StatCache
}
// Get stat cache
bool GetStat(const std::string& key, struct stat* pst, headers_t* meta, bool overcheck = true, bool* pisforce = NULL)
bool GetStat(const std::string& key, struct stat* pst, headers_t* meta, bool overcheck = true, bool* pisforce = nullptr)
{
return GetStat(key, pst, meta, overcheck, NULL, pisforce);
return GetStat(key, pst, meta, overcheck, nullptr, pisforce);
}
bool GetStat(const std::string& key, struct stat* pst, bool overcheck = true)
{
return GetStat(key, pst, NULL, overcheck, NULL, NULL);
return GetStat(key, pst, nullptr, overcheck, nullptr, nullptr);
}
bool GetStat(const std::string& key, headers_t* meta, bool overcheck = true)
{
return GetStat(key, NULL, meta, overcheck, NULL, NULL);
return GetStat(key, nullptr, meta, overcheck, nullptr, nullptr);
}
bool HasStat(const std::string& key, bool overcheck = true)
{
return GetStat(key, NULL, NULL, overcheck, NULL, NULL);
return GetStat(key, nullptr, nullptr, overcheck, nullptr, nullptr);
}
bool HasStat(const std::string& key, const char* etag, bool overcheck = true)
{
return GetStat(key, NULL, NULL, overcheck, etag, NULL);
return GetStat(key, nullptr, nullptr, overcheck, etag, nullptr);
}
bool HasStat(const std::string& key, struct stat* pst, const char* etag)
{
return GetStat(key, pst, NULL, true, etag, NULL);
return GetStat(key, pst, nullptr, true, etag, nullptr);
}
// Cache For no object
@ -166,7 +176,7 @@ class StatCache
bool AddStat(const std::string& key, const headers_t& meta, bool forcedir = false, bool no_truncate = false);
// Update meta stats
bool UpdateMetaStats(const std::string& key, headers_t& meta);
bool UpdateMetaStats(const std::string& key, const headers_t& meta);
// Change no truncate flag
void ChangeNoTruncateFlag(const std::string& key, bool no_truncate);
@ -182,6 +192,9 @@ class StatCache
bool GetSymlink(const std::string& key, std::string& value);
bool AddSymlink(const std::string& key, const std::string& value);
bool DelSymlink(const char* key, AutoLock::Type locktype = AutoLock::NONE);
// Cache for Notruncate file
bool GetNotruncateCache(const std::string& parentdir, notruncate_filelist_t& list);
};
//-------------------------------------------------------------------

View File

@ -30,8 +30,8 @@
// Global variables
//-------------------------------------------------------------------
// TODO: namespace these
static const int64_t FIVE_GB = 5LL * 1024LL * 1024LL * 1024LL;
static const off_t MIN_MULTIPART_SIZE = 5 * 1024 * 1024;
static constexpr int64_t FIVE_GB = 5LL * 1024LL * 1024LL * 1024LL;
static constexpr off_t MIN_MULTIPART_SIZE = 5 * 1024 * 1024;
extern bool foreground;
extern bool nomultipart;

View File

@ -29,36 +29,24 @@
//-------------------------------------------------------------------
std::string s3fs_get_content_md5(int fd)
{
unsigned char* md5;
char* base64;
std::string Signature;
if(NULL == (md5 = s3fs_md5_fd(fd, 0, -1))){
return std::string("");
md5_t md5;
if(!s3fs_md5_fd(fd, 0, -1, &md5)){
// TODO: better return value?
return "";
}
if(NULL == (base64 = s3fs_base64(md5, get_md5_digest_length()))){
delete[] md5;
return std::string(""); // ENOMEM
}
delete[] md5;
Signature = base64;
delete[] base64;
return Signature;
return s3fs_base64(md5.data(), md5.size());
}
std::string s3fs_sha256_hex_fd(int fd, off_t start, off_t size)
{
size_t digestlen = get_sha256_digest_length();
unsigned char* sha256;
sha256_t sha256;
if(NULL == (sha256 = s3fs_sha256_fd(fd, start, size))){
return std::string("");
if(!s3fs_sha256_fd(fd, start, size, &sha256)){
// TODO: better return value?
return "";
}
std::string sha256hex = s3fs_hex_lower(sha256, digestlen);
delete[] sha256;
std::string sha256hex = s3fs_hex_lower(sha256.data(), sha256.size());
return sha256hex;
}

File diff suppressed because it is too large Load Diff

View File

@ -22,12 +22,11 @@
#define S3FS_CURL_H_
#include <curl/curl.h>
#include <list>
#include <map>
#include <memory>
#include <vector>
#include "autolock.h"
#include "bodydata.h"
#include "metaheader.h"
#include "fdcache_page.h"
@ -83,7 +82,7 @@ class Semaphore;
typedef bool (*s3fscurl_lazy_setup)(S3fsCurl* s3fscurl);
typedef std::map<std::string, std::string> sseckeymap_t;
typedef std::list<sseckeymap_t> sseckeylist_t;
typedef std::vector<sseckeymap_t> sseckeylist_t;
// Class for lapping curl
//
@ -92,25 +91,28 @@ class S3fsCurl
friend class S3fsMultiCurl;
private:
enum REQTYPE {
REQTYPE_UNSET = -1,
REQTYPE_DELETE = 0,
REQTYPE_HEAD,
REQTYPE_PUTHEAD,
REQTYPE_PUT,
REQTYPE_GET,
REQTYPE_CHKBUCKET,
REQTYPE_LISTBUCKET,
REQTYPE_PREMULTIPOST,
REQTYPE_COMPLETEMULTIPOST,
REQTYPE_UPLOADMULTIPOST,
REQTYPE_COPYMULTIPOST,
REQTYPE_MULTILIST,
REQTYPE_IAMCRED,
REQTYPE_ABORTMULTIUPLOAD,
REQTYPE_IAMROLE
enum class REQTYPE {
UNSET = -1,
DELETE = 0,
HEAD,
PUTHEAD,
PUT,
GET,
CHKBUCKET,
LISTBUCKET,
PREMULTIPOST,
COMPLETEMULTIPOST,
UPLOADMULTIPOST,
COPYMULTIPOST,
MULTILIST,
IAMCRED,
ABORTMULTIUPLOAD,
IAMROLE
};
// Environment name
static constexpr char S3FS_SSL_PRIVKEY_PASSWORD[] = "S3FS_SSL_PRIVKEY_PASSWORD";
// class variables
static pthread_mutex_t curl_warnings_lock;
static bool curl_warnings_once; // emit older curl warnings only once
@ -140,6 +142,11 @@ class S3fsCurl
static bool is_dump_body;
static S3fsCred* ps3fscred;
static long ssl_verify_hostname;
static std::string client_cert;
static std::string client_cert_type;
static std::string client_priv_key;
static std::string client_priv_key_type;
static std::string client_key_password;
static curltime_t curl_times;
static curlprogress_t curl_progress;
static std::string curl_ca_bundle;
@ -157,6 +164,7 @@ class S3fsCurl
static std::string proxy_url;
static bool proxy_http;
static std::string proxy_userpwd; // load from file(<username>:<passphrase>)
static long ipresolve_type; // this value is a libcurl symbol.
// variables
CURL* hCurl;
@ -167,8 +175,8 @@ class S3fsCurl
std::string url; // target object path(url)
struct curl_slist* requestHeaders;
headers_t responseHeaders; // header data by HeaderCallback
BodyData bodydata; // body data by WriteMemoryCallback
BodyData headdata; // header data by WriteMemoryCallback
std::string bodydata; // body data by WriteMemoryCallback
std::string headdata; // header data by WriteMemoryCallback
long LastResponseCode;
const unsigned char* postdata; // use by post method and read callback function.
off_t postdata_remaining; // use by post method and read callback function.
@ -194,14 +202,18 @@ class S3fsCurl
CURLcode curlCode; // handle curl return
public:
static const long S3FSCURL_RESPONSECODE_NOTSET = -1;
static const long S3FSCURL_RESPONSECODE_FATAL_ERROR = -2;
static const int S3FSCURL_PERFORM_RESULT_NOTSET = 1;
static constexpr long S3FSCURL_RESPONSECODE_NOTSET = -1;
static constexpr long S3FSCURL_RESPONSECODE_FATAL_ERROR = -2;
static constexpr int S3FSCURL_PERFORM_RESULT_NOTSET = 1;
public:
// constructor/destructor
explicit S3fsCurl(bool ahbe = false);
~S3fsCurl();
S3fsCurl(const S3fsCurl&) = delete;
S3fsCurl(S3fsCurl&&) = delete;
S3fsCurl& operator=(const S3fsCurl&) = delete;
S3fsCurl& operator=(S3fsCurl&&) = delete;
private:
// class methods
@ -225,10 +237,10 @@ class S3fsCurl
static bool UploadMultipartPostCallback(S3fsCurl* s3fscurl, void* param);
static bool CopyMultipartPostCallback(S3fsCurl* s3fscurl, void* param);
static bool MixMultipartPostCallback(S3fsCurl* s3fscurl, void* param);
static S3fsCurl* UploadMultipartPostRetryCallback(S3fsCurl* s3fscurl);
static S3fsCurl* CopyMultipartPostRetryCallback(S3fsCurl* s3fscurl);
static S3fsCurl* MixMultipartPostRetryCallback(S3fsCurl* s3fscurl);
static S3fsCurl* ParallelGetObjectRetryCallback(S3fsCurl* s3fscurl);
static std::unique_ptr<S3fsCurl> UploadMultipartPostRetryCallback(S3fsCurl* s3fscurl);
static std::unique_ptr<S3fsCurl> CopyMultipartPostRetryCallback(S3fsCurl* s3fscurl);
static std::unique_ptr<S3fsCurl> MixMultipartPostRetryCallback(S3fsCurl* s3fscurl);
static std::unique_ptr<S3fsCurl> ParallelGetObjectRetryCallback(S3fsCurl* s3fscurl);
// lazy functions for set curl options
static bool CopyMultipartPostSetCurlOpts(S3fsCurl* s3fscurl);
@ -268,7 +280,7 @@ class S3fsCurl
static bool InitCredentialObject(S3fsCred* pcredobj);
static bool InitMimeType(const std::string& strFile);
static bool DestroyS3fsCurl();
static S3fsCurl* CreateParallelS3fsCurl(const char* tpath, int fd, off_t start, off_t size, int part_num, bool is_copy, etagpair* petag, const std::string& upload_id, int& result);
static std::unique_ptr<S3fsCurl> CreateParallelS3fsCurl(const char* tpath, int fd, off_t start, off_t size, int part_num, bool is_copy, etagpair* petag, const std::string& upload_id, int& result);
static int ParallelMultipartUploadRequest(const char* tpath, headers_t& meta, int fd);
static int ParallelMixMultipartUploadRequest(const char* tpath, headers_t& meta, int fd, const fdpage_list_t& mixuppages);
static int ParallelGetObjectRequest(const char* tpath, int fd, off_t start, off_t size);
@ -313,6 +325,7 @@ class S3fsCurl
static bool IsDumpBody() { return S3fsCurl::is_dump_body; }
static long SetSslVerifyHostname(long value);
static long GetSslVerifyHostname() { return S3fsCurl::ssl_verify_hostname; }
static bool SetSSLClientCertOptions(const std::string& values);
static void ResetOffset(S3fsCurl* pCurl);
// maximum parallel GET and PUT requests
static int SetMaxParallelCount(int value);
@ -337,6 +350,7 @@ class S3fsCurl
static bool IsRequesterPays() { return S3fsCurl::requester_pays; }
static bool SetProxy(const char* url);
static bool SetProxyUserPwd(const char* userpwd);
static bool SetIPResolveType(const char* value);
// methods
bool CreateCurlHandle(bool only_pool = false, bool remake = false);
@ -348,7 +362,7 @@ class S3fsCurl
int RequestPerform(bool dontAddAuthHeaders=false);
int DeleteRequest(const char* tpath);
int GetIAMv2ApiToken(const char* token_url, int token_ttl, const char* token_ttl_hdr, std::string& response);
bool PreHeadRequest(const char* tpath, const char* bpath = NULL, const char* savedpath = NULL, size_t ssekey_pos = -1);
bool PreHeadRequest(const char* tpath, const char* bpath = nullptr, const char* savedpath = nullptr, size_t ssekey_pos = -1);
bool PreHeadRequest(const std::string& tpath, const std::string& bpath, const std::string& savedpath, size_t ssekey_pos = -1) {
return PreHeadRequest(tpath.c_str(), bpath.c_str(), savedpath.c_str(), ssekey_pos);
}
@ -357,7 +371,7 @@ class S3fsCurl
int PutRequest(const char* tpath, headers_t& meta, int fd);
int PreGetObjectRequest(const char* tpath, int fd, off_t start, off_t size, sse_type_t ssetype, const std::string& ssevalue);
int GetObjectRequest(const char* tpath, int fd, off_t start = -1, off_t size = -1);
int CheckBucket(const char* check_path, bool compat_dir);
int CheckBucket(const char* check_path, bool compat_dir, bool force_no_sse);
int ListBucketRequest(const char* tpath, const char* query);
int PreMultipartPostRequest(const char* tpath, headers_t& meta, std::string& upload_id, bool is_copy);
int CompleteMultipartPostRequest(const char* tpath, const std::string& upload_id, etaglist_t& parts);
@ -377,8 +391,8 @@ class S3fsCurl
std::string GetUrl() const { return url; }
std::string GetOp() const { return op; }
const headers_t* GetResponseHeaders() const { return &responseHeaders; }
const BodyData* GetBodyData() const { return &bodydata; }
const BodyData* GetHeadData() const { return &headdata; }
const std::string* GetBodyData() const { return &bodydata; }
const std::string* GetHeadData() const { return &headdata; }
CURLcode GetCurlCode() const { return curlCode; }
long GetLastResponseCode() const { return LastResponseCode; }
bool SetUseAhbe(bool ahbe);

View File

@ -53,11 +53,15 @@ bool CurlHandlerPool::Init()
bool CurlHandlerPool::Destroy()
{
while(!mPool.empty()){
CURL* hCurl = mPool.back();
mPool.pop_back();
if(hCurl){
curl_easy_cleanup(hCurl);
{
AutoLock lock(&mLock);
while(!mPool.empty()){
CURL* hCurl = mPool.back();
mPool.pop_back();
if(hCurl){
curl_easy_cleanup(hCurl);
}
}
}
if (0 != pthread_mutex_destroy(&mLock)) {
@ -69,15 +73,14 @@ bool CurlHandlerPool::Destroy()
CURL* CurlHandlerPool::GetHandler(bool only_pool)
{
CURL* hCurl = NULL;
{
AutoLock lock(&mLock);
if(!mPool.empty()){
hCurl = mPool.back();
mPool.pop_back();
S3FS_PRN_DBG("Get handler from pool: rest = %d", static_cast<int>(mPool.size()));
}
AutoLock lock(&mLock);
CURL* hCurl = nullptr;
if(!mPool.empty()){
hCurl = mPool.back();
mPool.pop_back();
S3FS_PRN_DBG("Get handler from pool: rest = %d", static_cast<int>(mPool.size()));
}
if(only_pool){
return hCurl;
@ -94,10 +97,9 @@ void CurlHandlerPool::ReturnHandler(CURL* hCurl, bool restore_pool)
if(!hCurl){
return;
}
AutoLock lock(&mLock);
if(restore_pool){
AutoLock lock(&mLock);
S3FS_PRN_DBG("Return handler to pool");
mPool.push_back(hCurl);
@ -115,6 +117,16 @@ void CurlHandlerPool::ReturnHandler(CURL* hCurl, bool restore_pool)
}
}
void CurlHandlerPool::ResetHandler(CURL* hCurl)
{
if(!hCurl){
return;
}
AutoLock lock(&mLock);
curl_easy_reset(hCurl);
}
/*
* Local variables:
* tab-width: 4

View File

@ -23,6 +23,7 @@
#include <cassert>
#include <curl/curl.h>
#include <list>
//----------------------------------------------
// Typedefs
@ -39,12 +40,17 @@ class CurlHandlerPool
{
assert(maxHandlers > 0);
}
CurlHandlerPool(const CurlHandlerPool&) = delete;
CurlHandlerPool(CurlHandlerPool&&) = delete;
CurlHandlerPool& operator=(const CurlHandlerPool&) = delete;
CurlHandlerPool& operator=(CurlHandlerPool&&) = delete;
bool Init();
bool Destroy();
CURL* GetHandler(bool only_pool);
void ReturnHandler(CURL* hCurl, bool restore_pool);
void ResetHandler(CURL* hCurl);
private:
int mMaxHandlers;

View File

@ -33,7 +33,7 @@
//-------------------------------------------------------------------
// Class S3fsMultiCurl
//-------------------------------------------------------------------
S3fsMultiCurl::S3fsMultiCurl(int maxParallelism) : maxParallelism(maxParallelism), SuccessCallback(NULL), NotFoundCallback(NULL), RetryCallback(NULL), pSuccessCallbackParam(NULL), pNotFoundCallbackParam(NULL)
S3fsMultiCurl::S3fsMultiCurl(int maxParallelism, bool not_abort) : maxParallelism(maxParallelism), not_abort(not_abort), SuccessCallback(nullptr), NotFoundCallback(nullptr), RetryCallback(nullptr), pSuccessCallbackParam(nullptr), pNotFoundCallbackParam(nullptr)
{
int result;
pthread_mutexattr_t attr;
@ -60,19 +60,17 @@ bool S3fsMultiCurl::ClearEx(bool is_all)
{
s3fscurllist_t::iterator iter;
for(iter = clist_req.begin(); iter != clist_req.end(); ++iter){
S3fsCurl* s3fscurl = *iter;
S3fsCurl* s3fscurl = iter->get();
if(s3fscurl){
s3fscurl->DestroyCurlHandle();
delete s3fscurl; // with destroy curl handle.
}
}
clist_req.clear();
if(is_all){
for(iter = clist_all.begin(); iter != clist_all.end(); ++iter){
S3fsCurl* s3fscurl = *iter;
S3fsCurl* s3fscurl = iter->get();
s3fscurl->DestroyCurlHandle();
delete s3fscurl;
}
clist_all.clear();
}
@ -117,12 +115,12 @@ void* S3fsMultiCurl::SetNotFoundCallbackParam(void* param)
return old;
}
bool S3fsMultiCurl::SetS3fsCurlObject(S3fsCurl* s3fscurl)
bool S3fsMultiCurl::SetS3fsCurlObject(std::unique_ptr<S3fsCurl> s3fscurl)
{
if(!s3fscurl){
return false;
}
clist_all.push_back(s3fscurl);
clist_all.push_back(std::move(s3fscurl));
return true;
}
@ -137,7 +135,7 @@ int S3fsMultiCurl::MultiPerform()
for(s3fscurllist_t::iterator iter = clist_req.begin(); iter != clist_req.end(); ++iter) {
pthread_t thread;
S3fsCurl* s3fscurl = *iter;
S3fsCurl* s3fscurl = iter->get();
if(!s3fscurl){
continue;
}
@ -168,7 +166,7 @@ int S3fsMultiCurl::MultiPerform()
isMultiHead |= s3fscurl->GetOp() == "HEAD";
rc = pthread_create(&thread, NULL, S3fsMultiCurl::RequestPerformWrapper, static_cast<void*>(s3fscurl));
rc = pthread_create(&thread, nullptr, S3fsMultiCurl::RequestPerformWrapper, static_cast<void*>(s3fscurl));
if (rc != 0) {
success = false;
S3FS_PRN_ERR("failed pthread_create - rc(%d)", rc);
@ -206,7 +204,7 @@ int S3fsMultiCurl::MultiRead()
int result = 0;
for(s3fscurllist_t::iterator iter = clist_req.begin(); iter != clist_req.end(); ){
S3fsCurl* s3fscurl = *iter;
std::unique_ptr<S3fsCurl> s3fscurl(std::move(*iter));
bool isRetry = false;
bool isPostpone = false;
@ -220,7 +218,9 @@ int S3fsMultiCurl::MultiRead()
isPostpone = true;
}else if(400 > responseCode){
// add into stat cache
if(SuccessCallback && !SuccessCallback(s3fscurl, pSuccessCallbackParam)){
// cppcheck-suppress unmatchedSuppression
// cppcheck-suppress knownPointerToBool
if(SuccessCallback && !SuccessCallback(s3fscurl.get(), pSuccessCallbackParam)){
S3FS_PRN_WARN("error from success callback function(%s).", s3fscurl->url.c_str());
}
}else if(400 == responseCode){
@ -234,7 +234,9 @@ int S3fsMultiCurl::MultiRead()
S3FS_PRN_WARN("failed a request(%ld: %s)", responseCode, s3fscurl->url.c_str());
}
// Call callback function
if(NotFoundCallback && !NotFoundCallback(s3fscurl, pNotFoundCallbackParam)){
// cppcheck-suppress unmatchedSuppression
// cppcheck-suppress knownPointerToBool
if(NotFoundCallback && !NotFoundCallback(s3fscurl.get(), pNotFoundCallbackParam)){
S3FS_PRN_WARN("error from not found callback function(%s).", s3fscurl->url.c_str());
}
}else if(500 == responseCode){
@ -271,35 +273,34 @@ int S3fsMultiCurl::MultiRead()
if(isPostpone){
clist_req.erase(iter);
clist_req.push_back(s3fscurl); // Re-evaluate at the end
clist_req.push_back(std::move(s3fscurl)); // Re-evaluate at the end
iter = clist_req.begin();
}else{
if(!isRetry || 0 != result){
if(!isRetry || (!not_abort && 0 != result)){
// If an EIO error has already occurred, it will be terminated
// immediately even if retry processing is required.
s3fscurl->DestroyCurlHandle();
delete s3fscurl;
}else{
S3fsCurl* retrycurl = NULL;
// Reset offset
if(isNeedResetOffset){
S3fsCurl::ResetOffset(s3fscurl);
S3fsCurl::ResetOffset(s3fscurl.get());
}
// For retry
std::unique_ptr<S3fsCurl> retrycurl;
const S3fsCurl* retrycurl_ptr = retrycurl.get(); // save this due to std::move below
if(RetryCallback){
retrycurl = RetryCallback(s3fscurl);
if(NULL != retrycurl){
clist_all.push_back(retrycurl);
retrycurl = RetryCallback(s3fscurl.get());
if(nullptr != retrycurl){
clist_all.push_back(std::move(retrycurl));
}else{
// set EIO and wait for other parts.
result = -EIO;
}
}
if(s3fscurl != retrycurl){
// cppcheck-suppress mismatchingContainers
if(s3fscurl.get() != retrycurl_ptr){
s3fscurl->DestroyCurlHandle();
delete s3fscurl;
}
}
iter = clist_req.erase(iter);
@ -307,12 +308,11 @@ int S3fsMultiCurl::MultiRead()
}
clist_req.clear();
if(0 != result){
if(!not_abort && 0 != result){
// If an EIO error has already occurred, clear all retry objects.
for(s3fscurllist_t::iterator iter = clist_all.begin(); iter != clist_all.end(); ++iter){
S3fsCurl* s3fscurl = *iter;
S3fsCurl* s3fscurl = iter->get();
s3fscurl->DestroyCurlHandle();
delete s3fscurl;
}
clist_all.clear();
}
@ -333,8 +333,7 @@ int S3fsMultiCurl::Request()
int result;
s3fscurllist_t::iterator iter;
for(iter = clist_all.begin(); iter != clist_all.end(); ++iter){
S3fsCurl* s3fscurl = *iter;
clist_req.push_back(s3fscurl);
clist_req.push_back(std::move(*iter));
}
clist_all.clear();
@ -362,7 +361,7 @@ int S3fsMultiCurl::Request()
void* S3fsMultiCurl::RequestPerformWrapper(void* arg)
{
S3fsCurl* s3fscurl= static_cast<S3fsCurl*>(arg);
void* result = NULL;
void* result = nullptr;
if(!s3fscurl){
return reinterpret_cast<void*>(static_cast<intptr_t>(-EIO));
}

View File

@ -21,15 +21,18 @@
#ifndef S3FS_CURL_MULTI_H_
#define S3FS_CURL_MULTI_H_
#include <memory>
#include <vector>
//----------------------------------------------
// Typedef
//----------------------------------------------
class S3fsCurl;
typedef std::vector<S3fsCurl*> s3fscurllist_t;
typedef std::vector<std::unique_ptr<S3fsCurl>> s3fscurllist_t;
typedef bool (*S3fsMultiSuccessCallback)(S3fsCurl* s3fscurl, void* param); // callback for succeed multi request
typedef bool (*S3fsMultiNotFoundCallback)(S3fsCurl* s3fscurl, void* param); // callback for succeed multi request
typedef S3fsCurl* (*S3fsMultiRetryCallback)(S3fsCurl* s3fscurl); // callback for failure and retrying
typedef std::unique_ptr<S3fsCurl> (*S3fsMultiRetryCallback)(S3fsCurl* s3fscurl); // callback for failure and retrying
//----------------------------------------------
// class S3fsMultiCurl
@ -41,6 +44,7 @@ class S3fsMultiCurl
s3fscurllist_t clist_all; // all of curl requests
s3fscurllist_t clist_req; // curl requests are sent
bool not_abort; // complete all requests without aborting on errors
S3fsMultiSuccessCallback SuccessCallback;
S3fsMultiNotFoundCallback NotFoundCallback;
@ -59,7 +63,7 @@ class S3fsMultiCurl
static void* RequestPerformWrapper(void* arg);
public:
explicit S3fsMultiCurl(int maxParallelism);
explicit S3fsMultiCurl(int maxParallelism, bool not_abort = false);
~S3fsMultiCurl();
int GetMaxParallelism() const { return maxParallelism; }
@ -70,7 +74,7 @@ class S3fsMultiCurl
void* SetSuccessCallbackParam(void* param);
void* SetNotFoundCallbackParam(void* param);
bool Clear() { return ClearEx(true); }
bool SetS3fsCurlObject(S3fsCurl* s3fscurl);
bool SetS3fsCurlObject(std::unique_ptr<S3fsCurl> s3fscurl);
int Request();
};

View File

@ -37,23 +37,6 @@
// This function is like curl_slist_append function, but this adds data by a-sorting.
// Because AWS signature needs sorted header.
//
struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* data)
{
if(!data){
return list;
}
std::string strkey = data;
std::string strval;
std::string::size_type pos = strkey.find(':', 0);
if(std::string::npos != pos){
strval = strkey.substr(pos + 1);
strkey.erase(pos);
}
return curl_slist_sort_insert(list, strkey.c_str(), strval.c_str());
}
struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* key, const char* value)
{
if(!key){
@ -61,11 +44,11 @@ struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* k
}
// key & value are trimmed and lower (only key)
std::string strkey = trim(std::string(key));
std::string strval = value ? trim(std::string(value)) : "";
std::string strkey = trim(key);
std::string strval = value ? trim(value) : "";
std::string strnew = key + std::string(": ") + strval;
char* data;
if(NULL == (data = strdup(strnew.c_str()))){
if(nullptr == (data = strdup(strnew.c_str()))){
return list;
}
@ -88,7 +71,8 @@ struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* k
}
struct curl_slist* new_item;
if(NULL == (new_item = static_cast<struct curl_slist*>(malloc(sizeof(*new_item))))){
// Must use malloc since curl_slist_free_all calls free.
if(nullptr == (new_item = static_cast<struct curl_slist*>(malloc(sizeof(*new_item))))){
free(data);
return list;
}
@ -107,7 +91,7 @@ struct curl_slist* curl_slist_remove(struct curl_slist* list, const char* key)
return list;
}
std::string strkey = trim(std::string(key));
std::string strkey = trim(key);
struct curl_slist **p = &list;
while(*p){
std::string strcur = (*p)->data;
@ -176,37 +160,6 @@ std::string get_header_value(const struct curl_slist* list, const std::string &k
return "";
}
std::string get_canonical_headers(const struct curl_slist* list)
{
std::string canonical_headers;
if(!list){
canonical_headers = "\n";
return canonical_headers;
}
for( ; list; list = list->next){
std::string strhead = list->data;
size_t pos;
if(std::string::npos != (pos = strhead.find(':', 0))){
std::string strkey = trim(lower(strhead.substr(0, pos)));
std::string strval = trim(strhead.substr(pos + 1));
if (strval.empty()) {
// skip empty-value headers (as they are discarded by libcurl)
continue;
}
strhead = strkey;
strhead += ":";
strhead += strval;
}else{
strhead = trim(lower(strhead));
}
canonical_headers += strhead;
canonical_headers += "\n";
}
return canonical_headers;
}
std::string get_canonical_headers(const struct curl_slist* list, bool only_amz)
{
std::string canonical_headers;
@ -259,8 +212,8 @@ std::string prepare_url(const char* url)
std::string uri;
std::string hostname;
std::string path;
std::string url_str = std::string(url);
std::string token = std::string("/") + S3fsCred::GetBucket();
std::string url_str = url;
std::string token = "/" + S3fsCred::GetBucket();
size_t bucket_pos;
size_t bucket_length = token.size();
size_t uri_length = 0;
@ -292,40 +245,18 @@ std::string prepare_url(const char* url)
return url_str;
}
// [TODO]
// This function uses temporary file, but should not use it.
// For not using it, we implement function in each auth file(openssl, nss. gnutls).
//
bool make_md5_from_binary(const char* pstr, size_t length, std::string& md5)
{
if(!pstr || '\0' == pstr[0]){
S3FS_PRN_ERR("Parameter is wrong.");
return false;
}
FILE* fp;
if(NULL == (fp = tmpfile())){
S3FS_PRN_ERR("Could not make tmpfile.");
md5_t binary;
if(!s3fs_md5(reinterpret_cast<const unsigned char*>(pstr), length, &binary)){
return false;
}
if(length != fwrite(pstr, sizeof(char), length, fp)){
S3FS_PRN_ERR("Failed to write tmpfile.");
fclose(fp);
return false;
}
int fd;
if(0 != fflush(fp) || 0 != fseek(fp, 0L, SEEK_SET) || -1 == (fd = fileno(fp))){
S3FS_PRN_ERR("Failed to make MD5.");
fclose(fp);
return false;
}
// base64 md5
md5 = s3fs_get_content_md5(fd);
if(md5.empty()){
S3FS_PRN_ERR("Failed to make MD5.");
fclose(fp);
return false;
}
fclose(fp);
md5 = s3fs_base64(binary.data(), binary.size());
return true;
}
@ -333,8 +264,8 @@ std::string url_to_host(const std::string &url)
{
S3FS_PRN_INFO3("url is %s", url.c_str());
static const char HTTP[] = "http://";
static const char HTTPS[] = "https://";
static constexpr char HTTP[] = "http://";
static constexpr char HTTPS[] = "https://";
std::string hostname;
if (is_prefix(url.c_str(), HTTP)) {
@ -388,17 +319,9 @@ const char* getCurlDebugHead(curl_infotype type)
//
// compare ETag ignoring quotes and case
//
bool etag_equals(std::string s1, std::string s2)
bool etag_equals(const std::string& s1, const std::string& s2)
{
if(s1.length() > 1 && s1[0] == '\"' && *s1.rbegin() == '\"'){
s1.erase(s1.size() - 1);
s1.erase(0, 1);
}
if(s2.length() > 1 && s2[0] == '\"' && *s2.rbegin() == '\"'){
s2.erase(s2.size() - 1);
s2.erase(0, 1);
}
return 0 == strcasecmp(s1.c_str(), s2.c_str());
return 0 == strcasecmp(peeloff(s1).c_str(), peeloff(s2).c_str());
}
/*

View File

@ -23,12 +23,11 @@
#include <curl/curl.h>
class sse_type_t;
enum class sse_type_t;
//----------------------------------------------
// Functions
//----------------------------------------------
struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* data);
struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* key, const char* value);
struct curl_slist* curl_slist_remove(struct curl_slist* list, const char* key);
std::string get_sorted_header_keys(const struct curl_slist* list);
@ -43,7 +42,7 @@ std::string url_to_host(const std::string &url);
std::string get_bucket_host();
const char* getCurlDebugHead(curl_infotype type);
bool etag_equals(std::string s1, std::string s2);
bool etag_equals(const std::string& s1, const std::string& s2);
#endif // S3FS_CURL_UTIL_H_

View File

@ -38,7 +38,7 @@
//
// The following symbols are used by FdManager::RawCheckAllCache().
//
#define CACHEDBG_FMT_DIR_PROB "Directory: %s"
// These must be #defines due to string literal concatenation.
#define CACHEDBG_FMT_HEAD "---------------------------------------------------------------------------\n" \
"Check cache file and its stats file consistency at %s\n" \
"---------------------------------------------------------------------------"
@ -70,7 +70,7 @@
// This process may not be complete, but it is easy way can
// be realized.
//
#define NOCACHE_PATH_PREFIX_FORM " __S3FS_UNEXISTED_PATH_%lx__ / " // important space words for simply
static constexpr char NOCACHE_PATH_PREFIX_FORM[] = " __S3FS_UNEXISTED_PATH_%lx__ / "; // important space words for simply
//------------------------------------------------
// FdManager class variable
@ -119,7 +119,7 @@ bool FdManager::DeleteCacheDirectory()
}
std::string cache_path;
if(!FdManager::MakeCachePath(NULL, cache_path, false)){
if(!FdManager::MakeCachePath(nullptr, cache_path, false)){
return false;
}
if(!delete_files_in_dir(cache_path.c_str(), true)){
@ -155,18 +155,13 @@ int FdManager::DeleteCacheFile(const char* path)
}else{
S3FS_PRN_ERR("failed to delete file(%s): errno=%d", path, errno);
}
result = -errno;
return -errno;
}
if(!CacheFileStat::DeleteCacheFileStat(path)){
if(ENOENT == errno){
S3FS_PRN_DBG("failed to delete stat file(%s): errno=%d", path, errno);
if(0 != (result = CacheFileStat::DeleteCacheFileStat(path))){
if(-ENOENT == result){
S3FS_PRN_DBG("failed to delete stat file(%s): errno=%d", path, result);
}else{
S3FS_PRN_ERR("failed to delete stat file(%s): errno=%d", path, errno);
}
if(0 != errno){
result = -errno;
}else{
result = -EIO;
S3FS_PRN_ERR("failed to delete stat file(%s): errno=%d", path, result);
}
}
return result;
@ -218,7 +213,8 @@ bool FdManager::MakeRandomTempPath(const char* path, std::string& tmppath)
{
char szBuff[64];
sprintf(szBuff, NOCACHE_PATH_PREFIX_FORM, random()); // worry for performance, but maybe don't worry.
snprintf(szBuff, sizeof(szBuff), NOCACHE_PATH_PREFIX_FORM, random()); // worry for performance, but maybe don't worry.
szBuff[sizeof(szBuff) - 1] = '\0'; // for safety
tmppath = szBuff;
tmppath += path ? path : "";
return true;
@ -260,7 +256,7 @@ bool FdManager::InitFakeUsedDiskSize(off_t fake_freesize)
{
FdManager::fake_used_disk_space = 0; // At first, clear this value because this value is used in GetFreeDiskSpace.
off_t actual_freesize = FdManager::GetFreeDiskSpace(NULL);
off_t actual_freesize = FdManager::GetFreeDiskSpace(nullptr);
if(fake_freesize < actual_freesize){
FdManager::fake_used_disk_space = actual_freesize - fake_freesize;
@ -270,9 +266,38 @@ bool FdManager::InitFakeUsedDiskSize(off_t fake_freesize)
return true;
}
off_t FdManager::GetTotalDiskSpaceByRatio(int ratio)
{
return FdManager::GetTotalDiskSpace(nullptr) * ratio / 100;
}
off_t FdManager::GetTotalDiskSpace(const char* path)
{
struct statvfs vfsbuf;
int result = FdManager::GetVfsStat(path, &vfsbuf);
if(result == -1){
return 0;
}
off_t actual_totalsize = vfsbuf.f_blocks * vfsbuf.f_frsize;
return actual_totalsize;
}
off_t FdManager::GetFreeDiskSpace(const char* path)
{
struct statvfs vfsbuf;
int result = FdManager::GetVfsStat(path, &vfsbuf);
if(result == -1){
return 0;
}
off_t actual_freesize = vfsbuf.f_bavail * vfsbuf.f_frsize;
return (FdManager::fake_used_disk_space < actual_freesize ? (actual_freesize - FdManager::fake_used_disk_space) : 0);
}
int FdManager::GetVfsStat(const char* path, struct statvfs* vfsbuf){
std::string ctoppath;
if(!FdManager::cache_dir.empty()){
ctoppath = FdManager::cache_dir + "/";
@ -288,14 +313,12 @@ off_t FdManager::GetFreeDiskSpace(const char* path)
}else{
ctoppath += ".";
}
if(-1 == statvfs(ctoppath.c_str(), &vfsbuf)){
if(-1 == statvfs(ctoppath.c_str(), vfsbuf)){
S3FS_PRN_ERR("could not get vfs stat by errno(%d)", errno);
return 0;
return -1;
}
off_t actual_freesize = vfsbuf.f_bavail * vfsbuf.f_frsize;
return (FdManager::fake_used_disk_space < actual_freesize ? (actual_freesize - FdManager::fake_used_disk_space) : 0);
return 0;
}
bool FdManager::IsSafeDiskSpace(const char* path, off_t size)
@ -304,6 +327,18 @@ bool FdManager::IsSafeDiskSpace(const char* path, off_t size)
return size + FdManager::GetEnsureFreeDiskSpace() <= fsize;
}
bool FdManager::IsSafeDiskSpaceWithLog(const char* path, off_t size)
{
off_t fsize = FdManager::GetFreeDiskSpace(path);
off_t needsize = size + FdManager::GetEnsureFreeDiskSpace();
if(needsize <= fsize){
return true;
} else {
S3FS_PRN_EXIT("There is no enough disk space for used as cache(or temporary) directory by s3fs. Requires %.3f MB, already has %.3f MB.", static_cast<double>(needsize) / 1024 / 1024, static_cast<double>(fsize) / 1024 / 1024);
return false;
}
}
bool FdManager::HaveLseekHole()
{
if(FdManager::checked_lseek){
@ -311,16 +346,13 @@ bool FdManager::HaveLseekHole()
}
// create temporary file
FILE* ptmpfp;
int fd;
if(NULL == (ptmpfp = MakeTempFile()) || -1 == (fd = fileno(ptmpfp))){
int fd;
std::unique_ptr<FILE, decltype(&s3fs_fclose)> ptmpfp(MakeTempFile(), &s3fs_fclose);
if(nullptr == ptmpfp || -1 == (fd = fileno(ptmpfp.get()))){
S3FS_PRN_ERR("failed to open temporary file by errno(%d)", errno);
if(ptmpfp){
fclose(ptmpfp);
}
FdManager::checked_lseek = true;
FdManager::have_lseek_hole = false;
return FdManager::have_lseek_hole;
return false;
}
// check SEEK_DATA/SEEK_HOLE options
@ -337,7 +369,6 @@ bool FdManager::HaveLseekHole()
result = false;
}
}
fclose(ptmpfp);
FdManager::checked_lseek = true;
FdManager::have_lseek_hole = result;
@ -387,11 +418,11 @@ FILE* FdManager::MakeTempFile() {
fd = mkstemp(cfn);
if (-1 == fd) {
S3FS_PRN_ERR("failed to create tmp file. errno(%d)", errno);
return NULL;
return nullptr;
}
if (-1 == unlink(cfn)) {
S3FS_PRN_ERR("failed to delete tmp file. errno(%d)", errno);
return NULL;
return nullptr;
}
return fdopen(fd, "rb+");
}
@ -400,9 +431,9 @@ bool FdManager::HasOpenEntityFd(const char* path)
{
AutoLock auto_lock(&FdManager::fd_manager_lock);
FdEntity* ent;
const FdEntity* ent;
int fd = -1;
if(NULL == (ent = FdManager::singleton.GetFdEntity(path, fd, false, AutoLock::ALREADY_LOCKED))){
if(nullptr == (ent = FdManager::singleton.GetFdEntity(path, fd, false, AutoLock::ALREADY_LOCKED))){
return false;
}
return (0 < ent->GetOpenCount());
@ -415,7 +446,7 @@ int FdManager::GetOpenFdCount(const char* path)
{
AutoLock auto_lock(&FdManager::fd_manager_lock);
return FdManager::singleton.GetPseudoFdCount(path);
return FdManager::singleton.GetPseudoFdCount(path);
}
//------------------------------------------------
@ -452,9 +483,8 @@ FdManager::~FdManager()
{
if(this == FdManager::get()){
for(fdent_map_t::iterator iter = fent.begin(); fent.end() != iter; ++iter){
FdEntity* ent = (*iter).second;
S3FS_PRN_WARN("To exit with the cache file opened: path=%s, refcnt=%d", ent->GetPath(), ent->GetOpenCount());
delete ent;
FdEntity* ent = (*iter).second.get();
S3FS_PRN_WARN("To exit with the cache file opened: path=%s, refcnt=%d", ent->GetPath().c_str(), ent->GetOpenCount());
}
fent.clear();
@ -484,22 +514,22 @@ FdEntity* FdManager::GetFdEntity(const char* path, int& existfd, bool newfd, Aut
S3FS_PRN_INFO3("[path=%s][pseudo_fd=%d]", SAFESTRPTR(path), existfd);
if(!path || '\0' == path[0]){
return NULL;
return nullptr;
}
AutoLock auto_lock(&FdManager::fd_manager_lock, locktype);
fdent_map_t::iterator iter = fent.find(std::string(path));
fdent_map_t::iterator iter = fent.find(path);
if(fent.end() != iter && iter->second){
if(-1 == existfd){
if(newfd){
existfd = iter->second->OpenPseudoFd(O_RDWR); // [NOTE] O_RDWR flags
}
return iter->second;
return iter->second.get();
}else if(iter->second->FindPseudoFd(existfd)){
if(newfd){
existfd = iter->second->Dup(existfd);
}
return iter->second;
return iter->second.get();
}
}
@ -507,14 +537,14 @@ FdEntity* FdManager::GetFdEntity(const char* path, int& existfd, bool newfd, Aut
for(iter = fent.begin(); iter != fent.end(); ++iter){
if(iter->second && iter->second->FindPseudoFd(existfd)){
// found opened fd in map
if(0 == strcmp(iter->second->GetPath(), path)){
if(iter->second->GetPath() == path){
if(newfd){
existfd = iter->second->Dup(existfd);
}
return iter->second;
return iter->second.get();
}
// found fd, but it is used another file(file descriptor is recycled)
// so returns NULL.
// so returns nullptr.
break;
}
}
@ -524,26 +554,26 @@ FdEntity* FdManager::GetFdEntity(const char* path, int& existfd, bool newfd, Aut
// when the file is opened.
if(!FdManager::IsCacheDir()){
for(iter = fent.begin(); iter != fent.end(); ++iter){
if(iter->second && iter->second->IsOpen() && 0 == strcmp(iter->second->GetPath(), path)){
return iter->second;
if(iter->second && iter->second->IsOpen() && iter->second->GetPath() == path){
return iter->second.get();
}
}
}
return NULL;
return nullptr;
}
FdEntity* FdManager::Open(int& fd, const char* path, headers_t* pmeta, off_t size, const struct timespec& ts_mctime, int flags, bool force_tmpfile, bool is_create, bool ignore_modify, AutoLock::Type type)
FdEntity* FdManager::Open(int& fd, const char* path, const headers_t* pmeta, off_t size, const struct timespec& ts_mctime, int flags, bool force_tmpfile, bool is_create, bool ignore_modify, AutoLock::Type type)
{
S3FS_PRN_DBG("[path=%s][size=%lld][ts_mctime=%s][flags=0x%x][force_tmpfile=%s][create=%s][ignore_modify=%s]", SAFESTRPTR(path), static_cast<long long>(size), str(ts_mctime).c_str(), flags, (force_tmpfile ? "yes" : "no"), (is_create ? "yes" : "no"), (ignore_modify ? "yes" : "no"));
if(!path || '\0' == path[0]){
return NULL;
return nullptr;
}
AutoLock auto_lock(&FdManager::fd_manager_lock);
// search in mapping by key(path)
fdent_map_t::iterator iter = fent.find(std::string(path));
fdent_map_t::iterator iter = fent.find(path);
if(fent.end() == iter && !force_tmpfile && !FdManager::IsCacheDir()){
// If the cache directory is not specified, s3fs opens a temporary file
// when the file is opened.
@ -551,16 +581,15 @@ FdEntity* FdManager::Open(int& fd, const char* path, headers_t* pmeta, off_t siz
// search a entity in all which opened the temporary file.
//
for(iter = fent.begin(); iter != fent.end(); ++iter){
if(iter->second && iter->second->IsOpen() && 0 == strcmp(iter->second->GetPath(), path)){
if(iter->second && iter->second->IsOpen() && iter->second->GetPath() == path){
break; // found opened fd in mapping
}
}
}
FdEntity* ent;
if(fent.end() != iter){
// found
ent = iter->second;
FdEntity* ent = iter->second.get();
// [NOTE]
// If the file is being modified and ignore_modify flag is false,
@ -577,30 +606,31 @@ FdEntity* FdManager::Open(int& fd, const char* path, headers_t* pmeta, off_t siz
}
// (re)open
if(-1 == (fd = ent->Open(pmeta, size, ts_mctime, flags, type))){
if(0 > (fd = ent->Open(pmeta, size, ts_mctime, flags, type))){
S3FS_PRN_ERR("failed to (re)open and create new pseudo fd for path(%s).", path);
return NULL;
return nullptr;
}
return ent;
}else if(is_create){
// not found
std::string cache_path;
if(!force_tmpfile && !FdManager::MakeCachePath(path, cache_path, true)){
S3FS_PRN_ERR("failed to make cache path for object(%s).", path);
return NULL;
return nullptr;
}
// make new obj
ent = new FdEntity(path, cache_path.c_str());
std::unique_ptr<FdEntity> ent(new FdEntity(path, cache_path.c_str()));
// open
if(-1 == (fd = ent->Open(pmeta, size, ts_mctime, flags, type))){
delete ent;
return NULL;
if(0 > (fd = ent->Open(pmeta, size, ts_mctime, flags, type))){
S3FS_PRN_ERR("failed to open and create new pseudo fd for path(%s) errno:%d.", path, fd);
return nullptr;
}
if(!cache_path.empty()){
// using cache
fent[std::string(path)] = ent;
return (fent[path] = std::move(ent)).get();
}else{
// not using cache, so the key of fdentity is set not really existing path.
// (but not strictly unexisting path.)
@ -611,12 +641,11 @@ FdEntity* FdManager::Open(int& fd, const char* path, headers_t* pmeta, off_t siz
//
std::string tmppath;
FdManager::MakeRandomTempPath(path, tmppath);
fent[tmppath] = ent;
return (fent[tmppath] = std::move(ent)).get();
}
}else{
return NULL;
return nullptr;
}
return ent;
}
// [NOTE]
@ -633,11 +662,11 @@ FdEntity* FdManager::GetExistFdEntity(const char* path, int existfd)
for(fdent_map_t::iterator iter = fent.begin(); iter != fent.end(); ++iter){
if(iter->second && iter->second->FindPseudoFd(existfd)){
// found existfd in entity
return iter->second;
return iter->second.get();
}
}
// not found entity
return NULL;
return nullptr;
}
FdEntity* FdManager::OpenExistFdEntity(const char* path, int& fd, int flags)
@ -645,10 +674,10 @@ FdEntity* FdManager::OpenExistFdEntity(const char* path, int& fd, int flags)
S3FS_PRN_DBG("[path=%s][flags=0x%x]", SAFESTRPTR(path), flags);
// search entity by path, and create pseudo fd
FdEntity* ent = Open(fd, path, NULL, -1, S3FS_OMIT_TS, flags, false, false, false, AutoLock::NONE);
FdEntity* ent = Open(fd, path, nullptr, -1, S3FS_OMIT_TS, flags, false, false, false, AutoLock::NONE);
if(!ent){
// Not found entity
return NULL;
return nullptr;
}
return ent;
}
@ -667,7 +696,7 @@ int FdManager::GetPseudoFdCount(const char* path)
// search from all entity.
for(fdent_map_t::iterator iter = fent.begin(); iter != fent.end(); ++iter){
if(iter->second && 0 == strcmp(iter->second->GetPath(), path)){
if(iter->second && iter->second->GetPath() == path){
// found the entity for the path
return iter->second->GetOpenCount();
}
@ -688,7 +717,7 @@ void FdManager::Rename(const std::string &from, const std::string &to)
// search a entity in all which opened the temporary file.
//
for(iter = fent.begin(); iter != fent.end(); ++iter){
if(iter->second && iter->second->IsOpen() && 0 == strcmp(iter->second->GetPath(), from.c_str())){
if(iter->second && iter->second->IsOpen() && iter->second->GetPath() == from){
break; // found opened fd in mapping
}
}
@ -698,7 +727,7 @@ void FdManager::Rename(const std::string &from, const std::string &to)
// found
S3FS_PRN_DBG("[from=%s][to=%s]", from.c_str(), to.c_str());
FdEntity* ent = iter->second;
std::unique_ptr<FdEntity> ent(std::move(iter->second));
// retrieve old fd entity from map
fent.erase(iter);
@ -711,13 +740,13 @@ void FdManager::Rename(const std::string &from, const std::string &to)
}
// set new fd entity to map
fent[fentmapkey] = ent;
fent[fentmapkey] = std::move(ent);
}
}
bool FdManager::Close(FdEntity* ent, int fd)
{
S3FS_PRN_DBG("[ent->file=%s][pseudo_fd=%d]", ent ? ent->GetPath() : "", fd);
S3FS_PRN_DBG("[ent->file=%s][pseudo_fd=%d]", ent ? ent->GetPath().c_str() : "", fd);
if(!ent || -1 == fd){
return true; // returns success
@ -725,21 +754,20 @@ bool FdManager::Close(FdEntity* ent, int fd)
AutoLock auto_lock(&FdManager::fd_manager_lock);
for(fdent_map_t::iterator iter = fent.begin(); iter != fent.end(); ++iter){
if(iter->second == ent){
if(iter->second.get() == ent){
ent->Close(fd);
if(!ent->IsOpen()){
// remove found entity from map.
fent.erase(iter++);
iter = fent.erase(iter);
// check another key name for entity value to be on the safe side
for(; iter != fent.end(); ){
if(iter->second == ent){
fent.erase(iter++);
if(iter->second.get() == ent){
iter = fent.erase(iter);
}else{
++iter;
}
}
delete ent;
}
return true;
}
@ -747,17 +775,18 @@ bool FdManager::Close(FdEntity* ent, int fd)
return false;
}
bool FdManager::ChangeEntityToTempPath(FdEntity* ent, const char* path)
bool FdManager::ChangeEntityToTempPath(const FdEntity* ent, const char* path)
{
AutoLock auto_lock(&FdManager::fd_manager_lock);
for(fdent_map_t::iterator iter = fent.begin(); iter != fent.end(); ){
if(iter->second == ent){
fent.erase(iter++);
if(iter->second.get() == ent){
std::string tmppath;
FdManager::MakeRandomTempPath(path, tmppath);
fent[tmppath] = ent;
// Move the entry to the new key
fent[tmppath] = std::move(iter->second);
iter = fent.erase(iter);
return true;
}else{
++iter;
}
@ -791,7 +820,7 @@ void FdManager::CleanupCacheDirInternal(const std::string &path)
struct dirent* dent;
std::string abs_path = cache_dir + "/" + S3fsCred::GetBucket() + path;
if(NULL == (dp = opendir(abs_path.c_str()))){
if(nullptr == (dp = opendir(abs_path.c_str()))){
S3FS_PRN_ERR("could not open cache dir(%s) - errno(%d)", abs_path.c_str(), errno);
return;
}
@ -830,7 +859,7 @@ void FdManager::CleanupCacheDirInternal(const std::string &path)
bool FdManager::ReserveDiskSpace(off_t size)
{
if(IsSafeDiskSpace(NULL, size)){
if(IsSafeDiskSpace(nullptr, size)){
AutoLock auto_lock(&FdManager::reserved_diskspace_lock);
free_disk_space += size;
return true;
@ -883,14 +912,14 @@ bool FdManager::RawCheckAllCache(FILE* fp, const char* cache_stat_top_dir, const
DIR* statsdir;
std::string target_dir = cache_stat_top_dir;
target_dir += sub_path;
if(NULL == (statsdir = opendir(target_dir.c_str()))){
if(nullptr == (statsdir = opendir(target_dir.c_str()))){
S3FS_PRN_ERR("Could not open directory(%s) by errno(%d)", target_dir.c_str(), errno);
return false;
}
// loop in directory of cache file's stats
struct dirent* pdirent = NULL;
while(NULL != (pdirent = readdir(statsdir))){
const struct dirent* pdirent = nullptr;
while(nullptr != (pdirent = readdir(statsdir))){
if(DT_DIR == pdirent->d_type){
// found directory
if(0 == strcmp(pdirent->d_name, ".") || 0 == strcmp(pdirent->d_name, "..")){
@ -942,6 +971,7 @@ bool FdManager::RawCheckAllCache(FILE* fp, const char* cache_stat_top_dir, const
S3FS_PRN_CACHE(fp, CACHEDBG_FMT_CRIT_HEAD, "Could not open cache file");
continue;
}
scope_guard guard([&]() { close(cache_file_fd); });
// get inode number for cache file
struct stat st;
@ -950,7 +980,6 @@ bool FdManager::RawCheckAllCache(FILE* fp, const char* cache_stat_top_dir, const
S3FS_PRN_CACHE(fp, CACHEDBG_FMT_FILE_PROB, object_file_path.c_str(), strOpenedWarn.c_str());
S3FS_PRN_CACHE(fp, CACHEDBG_FMT_CRIT_HEAD, "Could not get file inode number for cache file");
close(cache_file_fd);
continue;
}
ino_t cache_file_inode = st.st_ino;
@ -963,7 +992,6 @@ bool FdManager::RawCheckAllCache(FILE* fp, const char* cache_stat_top_dir, const
S3FS_PRN_CACHE(fp, CACHEDBG_FMT_FILE_PROB, object_file_path.c_str(), strOpenedWarn.c_str());
S3FS_PRN_CACHE(fp, CACHEDBG_FMT_CRIT_HEAD, "Could not load cache file stats information");
close(cache_file_fd);
continue;
}
cfstat.Release();
@ -974,7 +1002,6 @@ bool FdManager::RawCheckAllCache(FILE* fp, const char* cache_stat_top_dir, const
S3FS_PRN_CACHE(fp, CACHEDBG_FMT_FILE_PROB, object_file_path.c_str(), strOpenedWarn.c_str());
S3FS_PRN_CACHE(fp, CACHEDBG_FMT_CRIT_HEAD2 "The cache file size(%lld) and the value(%lld) from cache file stats are different", static_cast<long long int>(st.st_size), static_cast<long long int>(pagelist.Size()));
close(cache_file_fd);
continue;
}
@ -1006,7 +1033,6 @@ bool FdManager::RawCheckAllCache(FILE* fp, const char* cache_stat_top_dir, const
}
err_area_list.clear();
warn_area_list.clear();
close(cache_file_fd);
}
}
closedir(statsdir);
@ -1025,7 +1051,7 @@ bool FdManager::CheckAllCache()
if(FdManager::check_cache_output.empty()){
fp = stdout;
}else{
if(NULL == (fp = fopen(FdManager::check_cache_output.c_str(), "a+"))){
if(nullptr == (fp = fopen(FdManager::check_cache_output.c_str(), "a+"))){
S3FS_PRN_ERR("Could not open(create) output file(%s) for checking all cache by errno(%d)", FdManager::check_cache_output.c_str(), errno);
return false;
}

View File

@ -47,7 +47,9 @@ class FdManager
private:
static off_t GetFreeDiskSpace(const char* path);
static off_t GetTotalDiskSpace(const char* path);
static bool IsDir(const std::string* dir);
static int GetVfsStat(const char* path, struct statvfs* vfsbuf);
int GetPseudoFdCount(const char* path);
void CleanupCacheDirInternal(const std::string &path = "");
@ -78,21 +80,23 @@ class FdManager
static off_t SetEnsureFreeDiskSpace(off_t size);
static bool InitFakeUsedDiskSize(off_t fake_freesize);
static bool IsSafeDiskSpace(const char* path, off_t size);
static bool IsSafeDiskSpaceWithLog(const char* path, off_t size);
static void FreeReservedDiskSpace(off_t size);
static bool ReserveDiskSpace(off_t size);
static bool HaveLseekHole();
static bool SetTmpDir(const char* dir);
static bool CheckTmpDirExist();
static FILE* MakeTempFile();
static off_t GetTotalDiskSpaceByRatio(int ratio);
// Return FdEntity associated with path, returning NULL on error. This operation increments the reference count; callers must decrement via Close after use.
// Return FdEntity associated with path, returning nullptr on error. This operation increments the reference count; callers must decrement via Close after use.
FdEntity* GetFdEntity(const char* path, int& existfd, bool newfd = true, AutoLock::Type locktype = AutoLock::NONE);
FdEntity* Open(int& fd, const char* path, headers_t* pmeta, off_t size, const struct timespec& ts_mctime, int flags, bool force_tmpfile, bool is_create, bool ignore_modify, AutoLock::Type type);
FdEntity* Open(int& fd, const char* path, const headers_t* pmeta, off_t size, const struct timespec& ts_mctime, int flags, bool force_tmpfile, bool is_create, bool ignore_modify, AutoLock::Type type);
FdEntity* GetExistFdEntity(const char* path, int existfd = -1);
FdEntity* OpenExistFdEntity(const char* path, int& fd, int flags = O_RDONLY);
void Rename(const std::string &from, const std::string &to);
bool Close(FdEntity* ent, int fd);
bool ChangeEntityToTempPath(FdEntity* ent, const char* path);
bool ChangeEntityToTempPath(const FdEntity* ent, const char* path);
void CleanupCacheDir();
bool CheckAllCache();

View File

@ -27,28 +27,10 @@
//------------------------------------------------
// AutoFdEntity methods
//------------------------------------------------
AutoFdEntity::AutoFdEntity() : pFdEntity(NULL), pseudo_fd(-1)
AutoFdEntity::AutoFdEntity() : pFdEntity(nullptr), pseudo_fd(-1)
{
}
// [NOTE]
// The copy constructor should not be called, then this is private method.
// Even if it is called, the consistency of the number of
// references can be maintained, but this case is not assumed.
//
AutoFdEntity::AutoFdEntity(AutoFdEntity& other) : pFdEntity(NULL), pseudo_fd(-1)
{
S3FS_PRN_WARN("This method should not be called. Please check the caller.");
if(other.pFdEntity){
if(-1 != (pseudo_fd = other.pFdEntity->Dup(other.pseudo_fd))){
pFdEntity = other.pFdEntity;
}else{
S3FS_PRN_ERR("Failed duplicating fd in AutoFdEntity.");
}
}
}
AutoFdEntity::~AutoFdEntity()
{
Close();
@ -61,7 +43,7 @@ bool AutoFdEntity::Close()
S3FS_PRN_ERR("Failed to close fdentity.");
return false;
}
pFdEntity = NULL;
pFdEntity = nullptr;
pseudo_fd = -1;
}
return true;
@ -79,7 +61,7 @@ int AutoFdEntity::Detach()
}
int fd = pseudo_fd;
pseudo_fd = -1;
pFdEntity = NULL;
pFdEntity = nullptr;
return fd;
}
@ -88,21 +70,24 @@ FdEntity* AutoFdEntity::Attach(const char* path, int existfd)
{
Close();
if(NULL == (pFdEntity = FdManager::get()->GetFdEntity(path, existfd, false))){
if(nullptr == (pFdEntity = FdManager::get()->GetFdEntity(path, existfd, false))){
S3FS_PRN_DBG("Could not find fd entity object(file=%s, pseudo_fd=%d)", path, existfd);
return NULL;
return nullptr;
}
pseudo_fd = existfd;
return pFdEntity;
}
FdEntity* AutoFdEntity::Open(const char* path, headers_t* pmeta, off_t size, const struct timespec& ts_mctime, int flags, bool force_tmpfile, bool is_create, bool ignore_modify, AutoLock::Type type)
FdEntity* AutoFdEntity::Open(const char* path, const headers_t* pmeta, off_t size, const struct timespec& ts_mctime, int flags, bool force_tmpfile, bool is_create, bool ignore_modify, AutoLock::Type type, int* error)
{
Close();
if(NULL == (pFdEntity = FdManager::get()->Open(pseudo_fd, path, pmeta, size, ts_mctime, flags, force_tmpfile, is_create, ignore_modify, type))){
if(nullptr == (pFdEntity = FdManager::get()->Open(pseudo_fd, path, pmeta, size, ts_mctime, flags, force_tmpfile, is_create, ignore_modify, type))){
if(error){
*error = pseudo_fd;
}
pseudo_fd = -1;
return NULL;
return nullptr;
}
return pFdEntity;
}
@ -115,8 +100,8 @@ FdEntity* AutoFdEntity::GetExistFdEntity(const char* path, int existfd)
Close();
FdEntity* ent;
if(NULL == (ent = FdManager::get()->GetExistFdEntity(path, existfd))){
return NULL;
if(nullptr == (ent = FdManager::get()->GetExistFdEntity(path, existfd))){
return nullptr;
}
return ent;
}
@ -125,34 +110,12 @@ FdEntity* AutoFdEntity::OpenExistFdEntity(const char* path, int flags)
{
Close();
if(NULL == (pFdEntity = FdManager::get()->OpenExistFdEntity(path, pseudo_fd, flags))){
return NULL;
if(nullptr == (pFdEntity = FdManager::get()->OpenExistFdEntity(path, pseudo_fd, flags))){
return nullptr;
}
return pFdEntity;
}
// [NOTE]
// This operator should not be called, then this is private method.
// Even if it is called, the consistency of the number of
// references can be maintained, but this case is not assumed.
//
bool AutoFdEntity::operator=(AutoFdEntity& other)
{
S3FS_PRN_WARN("This method should not be called. Please check the caller.");
Close();
if(other.pFdEntity){
if(-1 != (pseudo_fd = other.pFdEntity->Dup(other.pseudo_fd))){
pFdEntity = other.pFdEntity;
}else{
S3FS_PRN_ERR("Failed duplicating fd in AutoFdEntity.");
return false;
}
}
return true;
}
/*
* Local variables:
* tab-width: 4

View File

@ -43,8 +43,10 @@ class AutoFdEntity
int pseudo_fd;
private:
AutoFdEntity(AutoFdEntity& other);
bool operator=(AutoFdEntity& other);
AutoFdEntity(const AutoFdEntity&) = delete;
AutoFdEntity(AutoFdEntity&&) = delete;
AutoFdEntity& operator=(const AutoFdEntity&) = delete;
AutoFdEntity& operator=(AutoFdEntity&&) = delete;
public:
AutoFdEntity();
@ -55,7 +57,7 @@ class AutoFdEntity
FdEntity* Attach(const char* path, int existfd);
int GetPseudoFd() const { return pseudo_fd; }
FdEntity* Open(const char* path, headers_t* pmeta, off_t size, const struct timespec& ts_mctime, int flags, bool force_tmpfile, bool is_create, bool ignore_modify, AutoLock::Type type);
FdEntity* Open(const char* path, const headers_t* pmeta, off_t size, const struct timespec& ts_mctime, int flags, bool force_tmpfile, bool is_create, bool ignore_modify, AutoLock::Type type, int* error = nullptr);
FdEntity* GetExistFdEntity(const char* path, int existfd = -1);
FdEntity* OpenExistFdEntity(const char* path, int flags = O_RDONLY);
};

View File

@ -18,11 +18,12 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <cerrno>
#include <cstdio>
#include <cstdlib>
#include <cerrno>
#include <climits>
#include <memory>
#include <unistd.h>
#include <limits.h>
#include <sys/stat.h>
#include "common.h"
@ -40,7 +41,7 @@
//------------------------------------------------
// Symbols
//------------------------------------------------
static const int MAX_MULTIPART_CNT = 10 * 1000; // S3 multipart max count
static constexpr int MAX_MULTIPART_CNT = 10 * 1000; // S3 multipart max count
//------------------------------------------------
// FdEntity class variables
@ -106,8 +107,8 @@ ino_t FdEntity::GetInode(int fd)
//------------------------------------------------
FdEntity::FdEntity(const char* tpath, const char* cpath) :
is_lock_init(false), path(SAFESTRPTR(tpath)),
physical_fd(-1), pfile(NULL), inode(0), size_orgmeta(0),
cachepath(SAFESTRPTR(cpath)), pending_status(NO_UPDATE_PENDING)
physical_fd(-1), pfile(nullptr), inode(0), size_orgmeta(0),
cachepath(SAFESTRPTR(cpath)), pending_status(pending_status_t::NO_UPDATE_PENDING)
{
holding_mtime.tv_sec = -1;
holding_mtime.tv_nsec = 0;
@ -151,10 +152,6 @@ void FdEntity::Clear()
AutoLock auto_lock(&fdent_lock);
AutoLock auto_data_lock(&fdent_data_lock);
for(fdinfo_map_t::iterator iter = pseudo_fd_map.begin(); iter != pseudo_fd_map.end(); ++iter){
PseudoFdInfo* ppseudofdinfo = iter->second;
delete ppseudofdinfo;
}
pseudo_fd_map.clear();
if(-1 != physical_fd){
@ -174,7 +171,7 @@ void FdEntity::Clear()
}
if(pfile){
fclose(pfile);
pfile = NULL;
pfile = nullptr;
}
physical_fd = -1;
inode = 0;
@ -220,9 +217,7 @@ void FdEntity::Close(int fd)
// search pseudo fd and close it.
fdinfo_map_t::iterator iter = pseudo_fd_map.find(fd);
if(pseudo_fd_map.end() != iter){
PseudoFdInfo* ppseudoinfo = iter->second;
pseudo_fd_map.erase(iter);
delete ppseudoinfo;
}else{
S3FS_PRN_WARN("Not found pseudo_fd(%d) in entity object(%s)", fd, path.c_str());
}
@ -246,7 +241,7 @@ void FdEntity::Close(int fd)
}
if(pfile){
fclose(pfile);
pfile = NULL;
pfile = nullptr;
}
physical_fd = -1;
inode = 0;
@ -274,10 +269,10 @@ int FdEntity::Dup(int fd, AutoLock::Type locktype)
S3FS_PRN_ERR("Not found pseudo_fd(%d) in entity object(%s) for physical_fd(%d)", fd, path.c_str(), physical_fd);
return -1;
}
PseudoFdInfo* org_pseudoinfo = iter->second;
PseudoFdInfo* ppseudoinfo = new PseudoFdInfo(physical_fd, (org_pseudoinfo ? org_pseudoinfo->GetFlags() : 0));
const PseudoFdInfo* org_pseudoinfo = iter->second.get();
std::unique_ptr<PseudoFdInfo> ppseudoinfo(new PseudoFdInfo(physical_fd, (org_pseudoinfo ? org_pseudoinfo->GetFlags() : 0)));
int pseudo_fd = ppseudoinfo->GetPseudoFd();
pseudo_fd_map[pseudo_fd] = ppseudoinfo;
pseudo_fd_map[pseudo_fd] = std::move(ppseudoinfo);
return pseudo_fd;
}
@ -291,9 +286,9 @@ int FdEntity::OpenPseudoFd(int flags, AutoLock::Type locktype)
if(-1 == physical_fd){
return -1;
}
PseudoFdInfo* ppseudoinfo = new PseudoFdInfo(physical_fd, flags);
std::unique_ptr<PseudoFdInfo> ppseudoinfo(new PseudoFdInfo(physical_fd, flags));
int pseudo_fd = ppseudoinfo->GetPseudoFd();
pseudo_fd_map[pseudo_fd] = ppseudoinfo;
pseudo_fd_map[pseudo_fd] = std::move(ppseudoinfo);
return pseudo_fd;
}
@ -317,13 +312,13 @@ int FdEntity::OpenMirrorFile()
// make temporary directory
std::string bupdir;
if(!FdManager::MakeCachePath(NULL, bupdir, true, true)){
if(!FdManager::MakeCachePath(nullptr, bupdir, true, true)){
S3FS_PRN_ERR("could not make bup cache directory path or create it.");
return -EIO;
}
// create seed generating mirror file name
unsigned int seed = static_cast<unsigned int>(time(NULL));
unsigned int seed = static_cast<unsigned int>(time(nullptr));
int urandom_fd;
if(-1 != (urandom_fd = open("/dev/urandom", O_RDONLY))){
unsigned int rand_data;
@ -339,7 +334,8 @@ int FdEntity::OpenMirrorFile()
// (do not care for threading, because allowed any value returned.)
//
char szfile[NAME_MAX + 1];
sprintf(szfile, "%x.tmp", rand_r(&seed));
snprintf(szfile, sizeof(szfile), "%x.tmp", rand_r(&seed));
szfile[NAME_MAX] = '\0'; // for safety
mirrorpath = bupdir + "/" + szfile;
// link mirror file to cache file
@ -380,22 +376,22 @@ PseudoFdInfo* FdEntity::CheckPseudoFdFlags(int fd, bool writable, AutoLock::Type
AutoLock auto_lock(&fdent_lock, locktype);
if(-1 == fd){
return NULL;
return nullptr;
}
fdinfo_map_t::iterator iter = pseudo_fd_map.find(fd);
if(pseudo_fd_map.end() == iter || NULL == iter->second){
return NULL;
if(pseudo_fd_map.end() == iter || nullptr == iter->second){
return nullptr;
}
if(writable){
if(!iter->second->Writable()){
return NULL;
return nullptr;
}
}else{
if(!iter->second->Readable()){
return NULL;
return nullptr;
}
}
return iter->second;
return iter->second.get();
}
bool FdEntity::IsUploading(AutoLock::Type locktype)
@ -403,7 +399,7 @@ bool FdEntity::IsUploading(AutoLock::Type locktype)
AutoLock auto_lock(&fdent_lock, locktype);
for(fdinfo_map_t::const_iterator iter = pseudo_fd_map.begin(); iter != pseudo_fd_map.end(); ++iter){
PseudoFdInfo* ppseudoinfo = iter->second;
const PseudoFdInfo* ppseudoinfo = iter->second.get();
if(ppseudoinfo && ppseudoinfo->IsUploading()){
return true;
}
@ -485,11 +481,13 @@ int FdEntity::Open(const headers_t* pmeta, off_t size, const struct timespec& ts
bool need_save_csf = false; // need to save(reset) cache stat file
bool is_truncate = false; // need to truncate
std::unique_ptr<CacheFileStat> pcfstat;
if(!cachepath.empty()){
// using cache
struct stat st;
if(stat(cachepath.c_str(), &st) == 0){
if(0 > compare_timespec(st, ST_TYPE_MTIME, ts_mctime)){
if(0 > compare_timespec(st, stat_time_type::MTIME, ts_mctime)){
S3FS_PRN_DBG("cache file stale, removing: %s", cachepath.c_str());
if(unlink(cachepath.c_str()) != 0){
return (0 == errno ? -EIO : -errno);
@ -498,12 +496,12 @@ int FdEntity::Open(const headers_t* pmeta, off_t size, const struct timespec& ts
}
// open cache and cache stat file, load page info.
CacheFileStat cfstat(path.c_str());
pcfstat.reset(new CacheFileStat(path.c_str()));
// try to open cache file
if( -1 != (physical_fd = open(cachepath.c_str(), O_RDWR)) &&
0 != (inode = FdEntity::GetInode(physical_fd)) &&
pagelist.Serialize(cfstat, false, inode) )
pagelist.Serialize(*pcfstat, false, inode) )
{
// succeed to open cache file and to load stats data
memset(&st, 0, sizeof(struct stat));
@ -517,13 +515,18 @@ int FdEntity::Open(const headers_t* pmeta, off_t size, const struct timespec& ts
if(-1 == size){
if(st.st_size != pagelist.Size()){
pagelist.Resize(st.st_size, false, true); // Areas with increased size are modified
need_save_csf = true; // need to update page info
need_save_csf = true; // need to update page info
}
size = st.st_size;
}else{
// First if the current cache file size and pagelist do not match, fix pagelist.
if(st.st_size != pagelist.Size()){
pagelist.Resize(st.st_size, false, true); // Areas with increased size are modified
need_save_csf = true; // need to update page info
}
if(size != pagelist.Size()){
pagelist.Resize(size, false, true); // Areas with increased size are modified
need_save_csf = true; // need to update page info
need_save_csf = true; // need to update page info
}
if(size != st.st_size){
is_truncate = true;
@ -541,12 +544,13 @@ int FdEntity::Open(const headers_t* pmeta, off_t size, const struct timespec& ts
S3FS_PRN_ERR("failed to open file(%s). errno(%d)", cachepath.c_str(), errno);
// remove cache stat file if it is existed
if(!CacheFileStat::DeleteCacheFileStat(path.c_str())){
if(ENOENT != errno){
S3FS_PRN_WARN("failed to delete current cache stat file(%s) by errno(%d), but continue...", path.c_str(), errno);
int result;
if(0 != (result = CacheFileStat::DeleteCacheFileStat(path.c_str()))){
if(-ENOENT != result){
S3FS_PRN_WARN("failed to delete current cache stat file(%s) by errno(%d), but continue...", path.c_str(), result);
}
}
return (0 == errno ? -EIO : -errno);
return result;
}
need_save_csf = true; // need to update page info
inode = FdEntity::GetInode(physical_fd);
@ -579,7 +583,7 @@ int FdEntity::Open(const headers_t* pmeta, off_t size, const struct timespec& ts
physical_fd = mirrorfd;
// make file pointer(for being same tmpfile)
if(NULL == (pfile = fdopen(physical_fd, "wb"))){
if(nullptr == (pfile = fdopen(physical_fd, "wb"))){
S3FS_PRN_ERR("failed to get fileno(%s). errno(%d)", cachepath.c_str(), errno);
close(physical_fd);
physical_fd = -1;
@ -592,11 +596,11 @@ int FdEntity::Open(const headers_t* pmeta, off_t size, const struct timespec& ts
inode = 0;
// open temporary file
if(NULL == (pfile = FdManager::MakeTempFile()) || -1 ==(physical_fd = fileno(pfile))){
if(nullptr == (pfile = FdManager::MakeTempFile()) || -1 ==(physical_fd = fileno(pfile))){
S3FS_PRN_ERR("failed to open temporary file by errno(%d)", errno);
if(pfile){
fclose(pfile);
pfile = NULL;
pfile = nullptr;
}
return (0 == errno ? -EIO : -errno);
}
@ -623,7 +627,7 @@ int FdEntity::Open(const headers_t* pmeta, off_t size, const struct timespec& ts
if(0 != ftruncate(physical_fd, size) || 0 != fsync(physical_fd)){
S3FS_PRN_ERR("ftruncate(%s) or fsync returned err(%d)", cachepath.c_str(), errno);
fclose(pfile);
pfile = NULL;
pfile = nullptr;
physical_fd = -1;
inode = 0;
return (0 == errno ? -EIO : -errno);
@ -631,9 +635,8 @@ int FdEntity::Open(const headers_t* pmeta, off_t size, const struct timespec& ts
}
// reset cache stat file
if(need_save_csf){
CacheFileStat cfstat(path.c_str());
if(!pagelist.Serialize(cfstat, true, inode)){
if(need_save_csf && pcfstat.get()){
if(!pagelist.Serialize(*pcfstat, true, inode)){
S3FS_PRN_WARN("failed to save cache stat file(%s), but continue...", path.c_str());
}
}
@ -658,7 +661,7 @@ int FdEntity::Open(const headers_t* pmeta, off_t size, const struct timespec& ts
if(0 != SetMCtime(ts_mctime, ts_mctime, AutoLock::ALREADY_LOCKED)){
S3FS_PRN_ERR("failed to set mtime/ctime. errno(%d)", errno);
fclose(pfile);
pfile = NULL;
pfile = nullptr;
physical_fd = -1;
inode = 0;
return (0 == errno ? -EIO : -errno);
@ -667,9 +670,9 @@ int FdEntity::Open(const headers_t* pmeta, off_t size, const struct timespec& ts
}
// create new pseudo fd, and set it to map
PseudoFdInfo* ppseudoinfo = new PseudoFdInfo(physical_fd, flags);
std::unique_ptr<PseudoFdInfo> ppseudoinfo(new PseudoFdInfo(physical_fd, flags));
int pseudo_fd = ppseudoinfo->GetPseudoFd();
pseudo_fd_map[pseudo_fd] = ppseudoinfo;
pseudo_fd_map[pseudo_fd] = std::move(ppseudoinfo);
// if there is untreated area, set it to pseudo object.
if(0 < truncated_size){
@ -677,9 +680,8 @@ int FdEntity::Open(const headers_t* pmeta, off_t size, const struct timespec& ts
pseudo_fd_map.erase(pseudo_fd);
if(pfile){
fclose(pfile);
pfile = NULL;
pfile = nullptr;
}
delete ppseudoinfo;
}
}
@ -773,7 +775,8 @@ bool FdEntity::RenamePath(const std::string& newpath, std::string& fentmapkey)
bool FdEntity::IsModified() const
{
AutoLock auto_data_lock(&fdent_data_lock);
AutoLock auto_lock(&fdent_lock);
AutoLock auto_data_lock2(&fdent_data_lock);
return pagelist.IsModified();
}
@ -868,7 +871,7 @@ bool FdEntity::UpdateCtime()
return false;
}
orgmeta["x-amz-meta-ctime"] = str_stat_time(st, ST_TYPE_CTIME);
orgmeta["x-amz-meta-ctime"] = str_stat_time(st, stat_time_type::CTIME);
return true;
}
@ -881,7 +884,7 @@ bool FdEntity::UpdateAtime()
return false;
}
orgmeta["x-amz-meta-atime"] = str_stat_time(st, ST_TYPE_ATIME);
orgmeta["x-amz-meta-atime"] = str_stat_time(st, stat_time_type::ATIME);
return true;
}
@ -904,13 +907,20 @@ bool FdEntity::UpdateMtime(bool clear_holding_mtime)
if(!ClearHoldingMtime(AutoLock::ALREADY_LOCKED)){
return false;
}
// [NOTE]
// If come here after fdatasync has been processed, the file
// content update has already taken place. However, the metadata
// update is necessary and needs to be flagged in order to
// perform it with flush,
//
pending_status = pending_status_t::UPDATE_META_PENDING;
}
}else{
struct stat st;
if(!GetStats(st, AutoLock::ALREADY_LOCKED)){
return false;
}
orgmeta["x-amz-meta-mtime"] = str_stat_time(st, ST_TYPE_MTIME);
orgmeta["x-amz-meta-mtime"] = str_stat_time(st, stat_time_type::MTIME);
}
return true;
}
@ -946,7 +956,7 @@ bool FdEntity::ClearHoldingMtime(AutoLock::Type locktype)
ts[0].tv_sec = holding_mtime.tv_sec;
ts[0].tv_nsec = holding_mtime.tv_nsec;
set_stat_to_timespec(st, ST_TYPE_CTIME, ts_ctime);
set_stat_to_timespec(st, stat_time_type::CTIME, ts_ctime);
ts[1].tv_sec = ts_ctime.tv_sec;
ts[1].tv_nsec = ts_ctime.tv_nsec;
@ -959,7 +969,7 @@ bool FdEntity::ClearHoldingMtime(AutoLock::Type locktype)
struct timespec ts[2];
struct timespec ts_ctime;
set_stat_to_timespec(st, ST_TYPE_CTIME, ts_ctime);
set_stat_to_timespec(st, stat_time_type::CTIME, ts_ctime);
ts[0].tv_sec = ts_ctime.tv_sec;
ts[0].tv_nsec = ts_ctime.tv_nsec;
@ -1010,21 +1020,21 @@ bool FdEntity::SetXattr(const std::string& xattr)
bool FdEntity::SetMode(mode_t mode)
{
AutoLock auto_lock(&fdent_lock);
orgmeta["x-amz-meta-mode"] = str(mode);
orgmeta["x-amz-meta-mode"] = std::to_string(mode);
return true;
}
bool FdEntity::SetUId(uid_t uid)
{
AutoLock auto_lock(&fdent_lock);
orgmeta["x-amz-meta-uid"] = str(uid);
orgmeta["x-amz-meta-uid"] = std::to_string(uid);
return true;
}
bool FdEntity::SetGId(gid_t gid)
{
AutoLock auto_lock(&fdent_lock);
orgmeta["x-amz-meta-gid"] = str(gid);
orgmeta["x-amz-meta-gid"] = std::to_string(gid);
return true;
}
@ -1034,7 +1044,7 @@ bool FdEntity::SetContentType(const char* path)
return false;
}
AutoLock auto_lock(&fdent_lock);
orgmeta["Content-Type"] = S3fsCurl::LookupMimeType(std::string(path));
orgmeta["Content-Type"] = S3fsCurl::LookupMimeType(path);
return true;
}
@ -1109,7 +1119,7 @@ int FdEntity::Load(off_t start, off_t size, AutoLock::Type type, bool is_modifie
break;
}
// Set loaded flag
pagelist.SetPageLoadedStatus(iter->offset, iter->bytes, (is_modified_flag ? PageList::PAGE_LOAD_MODIFIED : PageList::PAGE_LOADED));
pagelist.SetPageLoadedStatus(iter->offset, iter->bytes, (is_modified_flag ? PageList::page_status::LOAD_MODIFIED : PageList::page_status::LOADED));
}
PageList::FreeList(unloaded_list);
}
@ -1131,7 +1141,7 @@ int FdEntity::NoCacheLoadAndPost(PseudoFdInfo* pseudo_obj, off_t start, off_t si
S3FS_PRN_INFO3("[path=%s][physical_fd=%d][offset=%lld][size=%lld]", path.c_str(), physical_fd, static_cast<long long int>(start), static_cast<long long int>(size));
if(!pseudo_obj){
S3FS_PRN_ERR("Pseudo object is NULL.");
S3FS_PRN_ERR("Pseudo object is nullptr.");
return -EIO;
}
@ -1154,13 +1164,10 @@ int FdEntity::NoCacheLoadAndPost(PseudoFdInfo* pseudo_obj, off_t start, off_t si
FdManager::get()->ChangeEntityToTempPath(this, path.c_str());
// open temporary file
FILE* ptmpfp;
int tmpfd;
if(NULL == (ptmpfp = FdManager::MakeTempFile()) || -1 ==(tmpfd = fileno(ptmpfp))){
int tmpfd;
std::unique_ptr<FILE, decltype(&s3fs_fclose)> ptmpfp(FdManager::MakeTempFile(), &s3fs_fclose);
if(nullptr == ptmpfp || -1 == (tmpfd = fileno(ptmpfp.get()))){
S3FS_PRN_ERR("failed to open temporary file by errno(%d)", errno);
if(ptmpfp){
fclose(ptmpfp);
}
return (0 == errno ? -EIO : -errno);
}
@ -1173,10 +1180,10 @@ int FdEntity::NoCacheLoadAndPost(PseudoFdInfo* pseudo_obj, off_t start, off_t si
break;
}
// download each multipart size(default 10MB) in unit
for(off_t oneread = 0, totalread = (iter->offset < start ? start : 0); totalread < static_cast<off_t>(iter->bytes); totalread += oneread){
for(off_t oneread = 0, totalread = (iter->offset < start ? start : 0); totalread < iter->bytes; totalread += oneread){
int upload_fd = physical_fd;
off_t offset = iter->offset + totalread;
oneread = std::min(static_cast<off_t>(iter->bytes) - totalread, S3fsCurl::GetMultipartSize());
oneread = std::min(iter->bytes - totalread, S3fsCurl::GetMultipartSize());
// check rest size is over minimum part size
//
@ -1282,9 +1289,6 @@ int FdEntity::NoCacheLoadAndPost(PseudoFdInfo* pseudo_obj, off_t start, off_t si
}
}
// close temporary
fclose(ptmpfp);
return result;
}
@ -1311,7 +1315,7 @@ int FdEntity::NoCachePreMultipartPost(PseudoFdInfo* pseudo_obj)
s3fscurl.DestroyCurlHandle();
// Clear the dirty flag, because the meta data is updated.
pending_status = NO_UPDATE_PENDING;
pending_status = pending_status_t::NO_UPDATE_PENDING;
// reset upload_id
if(!pseudo_obj->InitialUploadInfo(upload_id)){
@ -1338,7 +1342,7 @@ int FdEntity::NoCacheMultipartPost(PseudoFdInfo* pseudo_obj, int tgfd, off_t sta
}
// append new part and get it's etag string pointer
etagpair* petagpair = NULL;
etagpair* petagpair = nullptr;
if(!pseudo_obj->AppendUploadPart(start, size, false, &petagpair)){
return -EIO;
}
@ -1366,11 +1370,17 @@ int FdEntity::NoCacheCompleteMultipartPost(PseudoFdInfo* pseudo_obj)
}
S3fsCurl s3fscurl(true);
int result;
if(0 != (result = s3fscurl.CompleteMultipartPostRequest(path.c_str(), upload_id, etaglist))){
int result = s3fscurl.CompleteMultipartPostRequest(path.c_str(), upload_id, etaglist);
s3fscurl.DestroyCurlHandle();
if(0 != result){
S3fsCurl s3fscurl_abort(true);
int result2 = s3fscurl.AbortMultipartUpload(path.c_str(), upload_id);
s3fscurl_abort.DestroyCurlHandle();
if(0 != result2){
S3FS_PRN_ERR("failed to abort multipart upload by errno(%d)", result2);
}
return result;
}
s3fscurl.DestroyCurlHandle();
// clear multipart upload info
untreated_list.ClearAll();
@ -1381,7 +1391,8 @@ int FdEntity::NoCacheCompleteMultipartPost(PseudoFdInfo* pseudo_obj)
off_t FdEntity::BytesModified()
{
AutoLock auto_lock(&fdent_data_lock);
AutoLock auto_lock(&fdent_lock);
AutoLock auto_lock2(&fdent_data_lock);
return pagelist.BytesModified();
}
@ -1401,40 +1412,44 @@ off_t FdEntity::BytesModified()
//
int FdEntity::RowFlush(int fd, const char* tpath, AutoLock::Type type, bool force_sync)
{
AutoLock auto_lock(&fdent_lock, type);
S3FS_PRN_INFO3("[tpath=%s][path=%s][pseudo_fd=%d][physical_fd=%d]", SAFESTRPTR(tpath), path.c_str(), fd, physical_fd);
if(-1 == physical_fd){
return -EBADF;
}
AutoLock auto_lock(&fdent_lock, type);
// check pseudo fd and its flag
fdinfo_map_t::iterator miter = pseudo_fd_map.find(fd);
if(pseudo_fd_map.end() == miter || NULL == miter->second){
if(pseudo_fd_map.end() == miter || nullptr == miter->second){
return -EBADF;
}
if(!miter->second->Writable() && !(miter->second->GetFlags() & O_CREAT)){
// If the entity is opened read-only, it will end normally without updating.
return 0;
}
PseudoFdInfo* pseudo_obj = miter->second;
PseudoFdInfo* pseudo_obj = miter->second.get();
AutoLock auto_lock2(&fdent_data_lock);
if(!force_sync && !pagelist.IsModified()){
int result;
if(!force_sync && !pagelist.IsModified() && !IsDirtyMetadata()){
// nothing to update.
return 0;
}
if(S3fsLog::IsS3fsLogDbg()){
pagelist.Dump();
}
int result;
if(nomultipart){
// No multipart upload
result = RowFlushNoMultipart(pseudo_obj, tpath);
if(!force_sync && !pagelist.IsModified()){
// for only push pending headers
result = UploadPending(-1, AutoLock::ALREADY_LOCKED);
}else{
result = RowFlushNoMultipart(pseudo_obj, tpath);
}
}else if(FdEntity::streamupload){
// Stream multipart upload
result = RowFlushStreamMultipart(pseudo_obj, tpath);
@ -1460,7 +1475,7 @@ int FdEntity::RowFlush(int fd, const char* tpath, AutoLock::Type type, bool forc
// [NOTE]
// Both fdent_lock and fdent_data_lock must be locked before calling.
//
int FdEntity::RowFlushNoMultipart(PseudoFdInfo* pseudo_obj, const char* tpath)
int FdEntity::RowFlushNoMultipart(const PseudoFdInfo* pseudo_obj, const char* tpath)
{
S3FS_PRN_INFO3("[tpath=%s][path=%s][pseudo_fd=%d][physical_fd=%d]", SAFESTRPTR(tpath), path.c_str(), (pseudo_obj ? pseudo_obj->GetPseudoFd() : -1), physical_fd);
@ -1519,6 +1534,7 @@ int FdEntity::RowFlushNoMultipart(PseudoFdInfo* pseudo_obj, const char* tpath)
if(0 == result){
pagelist.ClearAllModified();
}
return result;
}
@ -1624,7 +1640,7 @@ int FdEntity::RowFlushMultipart(PseudoFdInfo* pseudo_obj, const char* tpath)
if(0 == result){
pagelist.ClearAllModified();
pending_status = NO_UPDATE_PENDING;
pending_status = pending_status_t::NO_UPDATE_PENDING;
}
return result;
}
@ -1733,7 +1749,7 @@ int FdEntity::RowFlushMixMultipart(PseudoFdInfo* pseudo_obj, const char* tpath)
return result;
}
untreated_list.ClearParts(untreated_start, untreated_size);
}
}
// complete multipart uploading.
if(0 != (result = NoCacheCompleteMultipartPost(pseudo_obj))){
S3FS_PRN_ERR("failed to complete(finish) multipart post for file(physical_fd=%d).", physical_fd);
@ -1752,7 +1768,7 @@ int FdEntity::RowFlushMixMultipart(PseudoFdInfo* pseudo_obj, const char* tpath)
if(0 == result){
pagelist.ClearAllModified();
pending_status = NO_UPDATE_PENDING;
pending_status = pending_status_t::NO_UPDATE_PENDING;
}
return result;
}
@ -1767,7 +1783,7 @@ int FdEntity::RowFlushStreamMultipart(PseudoFdInfo* pseudo_obj, const char* tpat
if(-1 == physical_fd || !pseudo_obj){
return -EBADF;
}
int result;
int result = 0;
if(pagelist.Size() <= S3fsCurl::GetMultipartSize()){
//
@ -1808,7 +1824,8 @@ int FdEntity::RowFlushStreamMultipart(PseudoFdInfo* pseudo_obj, const char* tpat
mp_part_list_t to_copy_list;
mp_part_list_t to_download_list;
filepart_list_t cancel_uploaded_list;
if(!pseudo_obj->ExtractUploadPartsFromAllArea(untreated_list, to_upload_list, to_copy_list, to_download_list, cancel_uploaded_list, S3fsCurl::GetMultipartSize(), pagelist.Size(), FdEntity::mixmultipart)){
bool wait_upload_complete = false;
if(!pseudo_obj->ExtractUploadPartsFromAllArea(untreated_list, to_upload_list, to_copy_list, to_download_list, cancel_uploaded_list, wait_upload_complete, S3fsCurl::GetMultipartSize(), pagelist.Size(), FdEntity::mixmultipart)){
S3FS_PRN_ERR("Failed to extract various upload parts list from all area: errno(EIO)");
return -EIO;
}
@ -1870,7 +1887,7 @@ int FdEntity::RowFlushStreamMultipart(PseudoFdInfo* pseudo_obj, const char* tpat
}
// Clear the dirty flag, because the meta data is updated.
pending_status = NO_UPDATE_PENDING;
pending_status = pending_status_t::NO_UPDATE_PENDING;
}
//
@ -1884,6 +1901,26 @@ int FdEntity::RowFlushStreamMultipart(PseudoFdInfo* pseudo_obj, const char* tpat
}
}
// [NOTE]
// If there is a part where has already been uploading, that part
// is re-updated after finishing uploading, so the part of the last
// uploded must be canceled.
// (These are cancel_uploaded_list, cancellation processing means
// re-uploading the same area.)
//
// In rare cases, the completion of the previous upload and the
// re-upload may be reversed, causing the ETag to be reversed,
// in which case the upload will fail.
// To prevent this, if the upload of the same area as the re-upload
// is incomplete, we must wait for it to complete here.
//
if(wait_upload_complete){
if(0 != (result = pseudo_obj->WaitAllThreadsExit())){
S3FS_PRN_ERR("Some cancel area uploads that were waiting to complete failed with %d.", result);
return result;
}
}
//
// Upload multipart and copy parts and wait exiting them
//
@ -1912,13 +1949,21 @@ int FdEntity::RowFlushStreamMultipart(PseudoFdInfo* pseudo_obj, const char* tpat
return -EIO;
}else{
S3fsCurl s3fscurl(true);
if(0 != (result = s3fscurl.CompleteMultipartPostRequest(path.c_str(), upload_id, etaglist))){
result = s3fscurl.CompleteMultipartPostRequest(path.c_str(), upload_id, etaglist);
s3fscurl.DestroyCurlHandle();
if(0 != result){
S3FS_PRN_ERR("failed to complete multipart upload by errno(%d)", result);
untreated_list.ClearAll();
pseudo_obj->ClearUploadInfo(); // clear multipart upload info
S3fsCurl s3fscurl_abort(true);
int result2 = s3fscurl.AbortMultipartUpload(path.c_str(), upload_id);
s3fscurl_abort.DestroyCurlHandle();
if(0 != result2){
S3FS_PRN_ERR("failed to abort multipart upload by errno(%d)", result2);
}
return result;
}
s3fscurl.DestroyCurlHandle();
}
untreated_list.ClearAll();
pseudo_obj->ClearUploadInfo(); // clear multipart upload info
@ -1967,7 +2012,7 @@ ssize_t FdEntity::Read(int fd, char* bytes, off_t start, size_t size, bool force
{
S3FS_PRN_DBG("[path=%s][pseudo_fd=%d][physical_fd=%d][offset=%lld][size=%zu]", path.c_str(), fd, physical_fd, static_cast<long long int>(start), size);
if(-1 == physical_fd || NULL == CheckPseudoFdFlags(fd, false)){
if(-1 == physical_fd || nullptr == CheckPseudoFdFlags(fd, false)){
S3FS_PRN_DBG("pseudo_fd(%d) to physical_fd(%d) for path(%s) is not opened or not readable", fd, physical_fd, path.c_str());
return -EBADF;
}
@ -1976,7 +2021,7 @@ ssize_t FdEntity::Read(int fd, char* bytes, off_t start, size_t size, bool force
AutoLock auto_lock2(&fdent_data_lock);
if(force_load){
pagelist.SetPageLoadedStatus(start, size, PageList::PAGE_NOT_LOAD_MODIFIED);
pagelist.SetPageLoadedStatus(start, size, PageList::page_status::NOT_LOAD_MODIFIED);
}
ssize_t rsize;
@ -2030,14 +2075,14 @@ ssize_t FdEntity::Write(int fd, const char* bytes, off_t start, size_t size)
{
S3FS_PRN_DBG("[path=%s][pseudo_fd=%d][physical_fd=%d][offset=%lld][size=%zu]", path.c_str(), fd, physical_fd, static_cast<long long int>(start), size);
PseudoFdInfo* pseudo_obj = NULL;
if(-1 == physical_fd || NULL == (pseudo_obj = CheckPseudoFdFlags(fd, false))){
PseudoFdInfo* pseudo_obj = nullptr;
if(-1 == physical_fd || nullptr == (pseudo_obj = CheckPseudoFdFlags(fd, false))){
S3FS_PRN_ERR("pseudo_fd(%d) to physical_fd(%d) for path(%s) is not opened or not writable", fd, physical_fd, path.c_str());
return -EBADF;
}
// check if not enough disk space left BEFORE locking fd
if(FdManager::IsCacheDir() && !FdManager::IsSafeDiskSpace(NULL, size)){
if(FdManager::IsCacheDir() && !FdManager::IsSafeDiskSpace(nullptr, size)){
FdManager::get()->CleanupCacheDir();
}
AutoLock auto_lock(&fdent_lock);
@ -2057,7 +2102,7 @@ ssize_t FdEntity::Write(int fd, const char* bytes, off_t start, size_t size)
}
// add new area
pagelist.SetPageLoadedStatus(pagelist.Size(), start - pagelist.Size(), PageList::PAGE_MODIFIED);
pagelist.SetPageLoadedStatus(pagelist.Size(), start - pagelist.Size(), PageList::page_status::MODIFIED);
}
ssize_t wsize;
@ -2081,7 +2126,7 @@ ssize_t FdEntity::Write(int fd, const char* bytes, off_t start, size_t size)
// [NOTE]
// Both fdent_lock and fdent_data_lock must be locked before calling.
//
ssize_t FdEntity::WriteNoMultipart(PseudoFdInfo* pseudo_obj, const char* bytes, off_t start, size_t size)
ssize_t FdEntity::WriteNoMultipart(const PseudoFdInfo* pseudo_obj, const char* bytes, off_t start, size_t size)
{
S3FS_PRN_DBG("[path=%s][pseudo_fd=%d][physical_fd=%d][offset=%lld][size=%zu]", path.c_str(), (pseudo_obj ? pseudo_obj->GetPseudoFd() : -1), physical_fd, static_cast<long long int>(start), size);
@ -2123,7 +2168,7 @@ ssize_t FdEntity::WriteNoMultipart(PseudoFdInfo* pseudo_obj, const char* bytes,
return -errno;
}
if(0 < wsize){
pagelist.SetPageLoadedStatus(start, wsize, PageList::PAGE_LOAD_MODIFIED);
pagelist.SetPageLoadedStatus(start, wsize, PageList::page_status::LOAD_MODIFIED);
AddUntreated(start, wsize);
}
@ -2197,7 +2242,7 @@ ssize_t FdEntity::WriteMultipart(PseudoFdInfo* pseudo_obj, const char* bytes, of
return -errno;
}
if(0 < wsize){
pagelist.SetPageLoadedStatus(start, wsize, PageList::PAGE_LOAD_MODIFIED);
pagelist.SetPageLoadedStatus(start, wsize, PageList::page_status::LOAD_MODIFIED);
AddUntreated(start, wsize);
}
@ -2284,7 +2329,7 @@ ssize_t FdEntity::WriteMixMultipart(PseudoFdInfo* pseudo_obj, const char* bytes,
return -errno;
}
if(0 < wsize){
pagelist.SetPageLoadedStatus(start, wsize, PageList::PAGE_LOAD_MODIFIED);
pagelist.SetPageLoadedStatus(start, wsize, PageList::page_status::LOAD_MODIFIED);
AddUntreated(start, wsize);
}
@ -2337,7 +2382,7 @@ ssize_t FdEntity::WriteStreamUpload(PseudoFdInfo* pseudo_obj, const char* bytes,
return -errno;
}
if(0 < wsize){
pagelist.SetPageLoadedStatus(start, wsize, PageList::PAGE_LOAD_MODIFIED);
pagelist.SetPageLoadedStatus(start, wsize, PageList::page_status::LOAD_MODIFIED);
AddUntreated(start, wsize);
}
@ -2348,15 +2393,15 @@ ssize_t FdEntity::WriteStreamUpload(PseudoFdInfo* pseudo_obj, const char* bytes,
//
headers_t tmporgmeta = orgmeta;
bool isuploading = pseudo_obj->IsUploading();
int result;
ssize_t result;
if(0 != (result = pseudo_obj->UploadBoundaryLastUntreatedArea(path.c_str(), tmporgmeta, this))){
S3FS_PRN_ERR("Failed to upload the last untreated parts(area) : result=%d", result);
S3FS_PRN_ERR("Failed to upload the last untreated parts(area) : result=%zd", result);
return result;
}
if(!isuploading && pseudo_obj->IsUploading()){
// Clear the dirty flag, because the meta data is updated.
pending_status = NO_UPDATE_PENDING;
pending_status = pending_status_t::NO_UPDATE_PENDING;
}
return wsize;
@ -2378,7 +2423,7 @@ bool FdEntity::MergeOrgMeta(headers_t& updatemeta)
// this is special cases, we remove the key which has empty values.
for(headers_t::iterator hiter = orgmeta.begin(); hiter != orgmeta.end(); ){
if(hiter->second.empty()){
orgmeta.erase(hiter++);
hiter = orgmeta.erase(hiter);
}else{
++hiter;
}
@ -2397,11 +2442,12 @@ bool FdEntity::MergeOrgMeta(headers_t& updatemeta)
SetAtime(atime, AutoLock::ALREADY_LOCKED);
}
if(NO_UPDATE_PENDING == pending_status && (IsUploading(AutoLock::ALREADY_LOCKED) || pagelist.IsModified())){
pending_status = UPDATE_META_PENDING;
AutoLock auto_lock2(&fdent_data_lock);
if(pending_status_t::NO_UPDATE_PENDING == pending_status && (IsUploading(AutoLock::ALREADY_LOCKED) || pagelist.IsModified())){
pending_status = pending_status_t::UPDATE_META_PENDING;
}
return (NO_UPDATE_PENDING != pending_status);
return (pending_status_t::NO_UPDATE_PENDING != pending_status);
}
// global function in s3fs.cpp
@ -2412,11 +2458,11 @@ int FdEntity::UploadPending(int fd, AutoLock::Type type)
AutoLock auto_lock(&fdent_lock, type);
int result;
if(NO_UPDATE_PENDING == pending_status){
if(pending_status_t::NO_UPDATE_PENDING == pending_status){
// nothing to do
result = 0;
}else if(UPDATE_META_PENDING == pending_status){
}else if(pending_status_t::UPDATE_META_PENDING == pending_status){
headers_t updatemeta = orgmeta;
updatemeta["x-amz-copy-source"] = urlEncodePath(service_path + S3fsCred::GetBucket() + get_realpath(path.c_str()));
updatemeta["x-amz-metadata-directive"] = "REPLACE";
@ -2426,7 +2472,7 @@ int FdEntity::UploadPending(int fd, AutoLock::Type type)
if(0 != result){
S3FS_PRN_ERR("failed to put header after flushing file(%s) by(%d).", path.c_str(), result);
}else{
pending_status = NO_UPDATE_PENDING;
pending_status = pending_status_t::NO_UPDATE_PENDING;
}
}else{ // CREATE_FILE_PENDING == pending_status
@ -2438,7 +2484,7 @@ int FdEntity::UploadPending(int fd, AutoLock::Type type)
if(0 != result){
S3FS_PRN_ERR("failed to flush for file(%s) by(%d).", path.c_str(), result);
}else{
pending_status = NO_UPDATE_PENDING;
pending_status = pending_status_t::NO_UPDATE_PENDING;
}
}
}
@ -2482,10 +2528,12 @@ bool FdEntity::PunchHole(off_t start, size_t size)
{
S3FS_PRN_DBG("[path=%s][physical_fd=%d][offset=%lld][size=%zu]", path.c_str(), physical_fd, static_cast<long long int>(start), size);
AutoLock auto_lock(&fdent_lock);
AutoLock auto_lock2(&fdent_data_lock);
if(-1 == physical_fd){
return false;
}
AutoLock auto_lock(&fdent_data_lock);
// get page list that have no data
fdpage_list_t nodata_pages;
@ -2508,7 +2556,7 @@ bool FdEntity::PunchHole(off_t start, size_t size)
}
return false;
}
if(!pagelist.SetPageLoadedStatus(iter->offset, iter->bytes, PageList::PAGE_NOT_LOAD_MODIFIED)){
if(!pagelist.SetPageLoadedStatus(iter->offset, iter->bytes, PageList::page_status::NOT_LOAD_MODIFIED)){
S3FS_PRN_ERR("succeed to punch HOLEs in the cache file, but failed to update the cache stat.");
return false;
}
@ -2523,8 +2571,41 @@ bool FdEntity::PunchHole(off_t start, size_t size)
//
void FdEntity::MarkDirtyNewFile()
{
AutoLock auto_lock(&fdent_lock);
AutoLock auto_lock2(&fdent_data_lock);
pagelist.Init(0, false, true);
pending_status = CREATE_FILE_PENDING;
pending_status = pending_status_t::CREATE_FILE_PENDING;
}
bool FdEntity::IsDirtyNewFile() const
{
AutoLock auto_lock(&fdent_lock);
return (pending_status_t::CREATE_FILE_PENDING == pending_status);
}
// [NOTE]
// The fdatasync call only uploads the content but does not update
// the meta data. In the flush call, if there is no update contents,
// need to upload only metadata, so use these functions.
//
void FdEntity::MarkDirtyMetadata()
{
AutoLock auto_lock(&fdent_lock);
AutoLock auto_lock2(&fdent_data_lock);
if(pending_status_t::NO_UPDATE_PENDING == pending_status){
pending_status = pending_status_t::UPDATE_META_PENDING;
}
}
bool FdEntity::IsDirtyMetadata() const
{
// [NOTE]
// fdent_lock must be previously locked.
//
return (pending_status_t::UPDATE_META_PENDING == pending_status);
}
bool FdEntity::AddUntreated(off_t start, off_t size)

View File

@ -22,6 +22,7 @@
#define S3FS_FDCACHE_ENTITY_H_
#include <fcntl.h>
#include <memory>
#include "autolock.h"
#include "fdcache_page.h"
@ -40,7 +41,7 @@ class FdEntity
// because the processing(request) at these updates is different.
// Therefore, the pending state is expressed by this enum type.
//
enum pending_status_t {
enum class pending_status_t {
NO_UPDATE_PENDING = 0,
UPDATE_META_PENDING, // pending meta header
CREATE_FILE_PENDING // pending file creation and meta header
@ -83,35 +84,43 @@ class FdEntity
int NoCachePreMultipartPost(PseudoFdInfo* pseudo_obj);
int NoCacheMultipartPost(PseudoFdInfo* pseudo_obj, int tgfd, off_t start, off_t size);
int NoCacheCompleteMultipartPost(PseudoFdInfo* pseudo_obj);
int RowFlushNoMultipart(PseudoFdInfo* pseudo_obj, const char* tpath);
int RowFlushNoMultipart(const PseudoFdInfo* pseudo_obj, const char* tpath);
int RowFlushMultipart(PseudoFdInfo* pseudo_obj, const char* tpath);
int RowFlushMixMultipart(PseudoFdInfo* pseudo_obj, const char* tpath);
int RowFlushStreamMultipart(PseudoFdInfo* pseudo_obj, const char* tpath);
ssize_t WriteNoMultipart(PseudoFdInfo* pseudo_obj, const char* bytes, off_t start, size_t size);
ssize_t WriteNoMultipart(const PseudoFdInfo* pseudo_obj, const char* bytes, off_t start, size_t size);
ssize_t WriteMultipart(PseudoFdInfo* pseudo_obj, const char* bytes, off_t start, size_t size);
ssize_t WriteMixMultipart(PseudoFdInfo* pseudo_obj, const char* bytes, off_t start, size_t size);
ssize_t WriteStreamUpload(PseudoFdInfo* pseudo_obj, const char* bytes, off_t start, size_t size);
bool ReserveDiskSpace(off_t size);
bool AddUntreated(off_t start, off_t size);
bool IsDirtyMetadata() const;
public:
static bool GetNoMixMultipart() { return mixmultipart; }
static bool SetNoMixMultipart();
static bool GetStreamUpload() { return streamupload; }
static bool SetStreamUpload(bool isstream);
explicit FdEntity(const char* tpath = NULL, const char* cpath = NULL);
explicit FdEntity(const char* tpath = nullptr, const char* cpath = nullptr);
~FdEntity();
FdEntity(const FdEntity&) = delete;
FdEntity(FdEntity&&) = delete;
FdEntity& operator=(const FdEntity&) = delete;
FdEntity& operator=(FdEntity&&) = delete;
void Close(int fd);
bool IsOpen() const { return (-1 != physical_fd); }
bool FindPseudoFd(int fd, AutoLock::Type locktype = AutoLock::NONE) const;
int Open(const headers_t* pmeta, off_t size, const struct timespec& ts_mctime, int flags, AutoLock::Type type);
bool LoadAll(int fd, headers_t* pmeta = NULL, off_t* size = NULL, bool force_load = false);
bool LoadAll(int fd, headers_t* pmeta = nullptr, off_t* size = nullptr, bool force_load = false);
int Dup(int fd, AutoLock::Type locktype = AutoLock::NONE);
int OpenPseudoFd(int flags = O_RDONLY, AutoLock::Type locktype = AutoLock::NONE);
int GetOpenCount(AutoLock::Type locktype = AutoLock::NONE) const;
const char* GetPath() const { return path.c_str(); }
const std::string& GetPath() const { return path; }
bool RenamePath(const std::string& newpath, std::string& fentmapkey);
int GetPhysicalFd() const { return physical_fd; }
bool IsModified() const;
@ -140,22 +149,22 @@ class FdEntity
off_t BytesModified();
int RowFlush(int fd, const char* tpath, AutoLock::Type type, bool force_sync = false);
int Flush(int fd, AutoLock::Type type, bool force_sync = false) { return RowFlush(fd, NULL, type, force_sync); }
int Flush(int fd, AutoLock::Type type, bool force_sync = false) { return RowFlush(fd, nullptr, type, force_sync); }
ssize_t Read(int fd, char* bytes, off_t start, size_t size, bool force_load = false);
ssize_t Write(int fd, const char* bytes, off_t start, size_t size);
bool ReserveDiskSpace(off_t size);
bool PunchHole(off_t start = 0, size_t size = 0);
void MarkDirtyNewFile();
bool IsDirtyNewFile() { return (CREATE_FILE_PENDING == pending_status); }
bool IsDirtyNewFile() const;
void MarkDirtyMetadata();
bool GetLastUpdateUntreatedPart(off_t& start, off_t& size) const;
bool ReplaceLastUpdateUntreatedPart(off_t front_start, off_t front_size, off_t behind_start, off_t behind_size);
};
typedef std::map<std::string, class FdEntity*> fdent_map_t; // key=path, value=FdEntity*
typedef std::map<std::string, std::unique_ptr<FdEntity>> fdent_map_t; // key=path, value=FdEntity*
#endif // S3FS_FDCACHE_ENTITY_H_

View File

@ -18,14 +18,17 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <algorithm>
#include <cerrno>
#include <cstdio>
#include <cstdlib>
#include <errno.h>
#include <memory>
#include <sys/stat.h>
#include <unistd.h>
#include "common.h"
#include "s3fs_logger.h"
#include "s3fs_util.h"
#include "fdcache_fdinfo.h"
#include "fdcache_pseudofd.h"
#include "fdcache_entity.h"
@ -47,15 +50,13 @@ int PseudoFdInfo::opt_max_threads = -1;
//
void* PseudoFdInfo::MultipartUploadThreadWorker(void* arg)
{
pseudofdinfo_thparam* pthparam = static_cast<pseudofdinfo_thparam*>(arg);
std::unique_ptr<pseudofdinfo_thparam> pthparam(static_cast<pseudofdinfo_thparam*>(arg));
if(!pthparam || !(pthparam->ppseudofdinfo)){
delete pthparam;
return reinterpret_cast<void*>(-EIO);
}
S3FS_PRN_INFO3("Upload Part Thread [tpath=%s][start=%lld][size=%lld][part=%d]", pthparam->path.c_str(), static_cast<long long>(pthparam->start), static_cast<long long>(pthparam->size), pthparam->part_num);
int result;
S3fsCurl* s3fscurl;
{
AutoLock auto_lock(&(pthparam->ppseudofdinfo->upload_list_lock));
@ -65,20 +66,19 @@ void* PseudoFdInfo::MultipartUploadThreadWorker(void* arg)
if(!pthparam->ppseudofdinfo->CompleteInstruction(result, AutoLock::ALREADY_LOCKED)){ // result will be overwritten with the same value.
result = -EIO;
}
delete pthparam;
return reinterpret_cast<void*>(result);
}
}
// setup and make curl object
if(NULL == (s3fscurl = S3fsCurl::CreateParallelS3fsCurl(pthparam->path.c_str(), pthparam->upload_fd, pthparam->start, pthparam->size, pthparam->part_num, pthparam->is_copy, pthparam->petag, pthparam->upload_id, result))){
std::unique_ptr<S3fsCurl> s3fscurl(S3fsCurl::CreateParallelS3fsCurl(pthparam->path.c_str(), pthparam->upload_fd, pthparam->start, pthparam->size, pthparam->part_num, pthparam->is_copy, pthparam->petag, pthparam->upload_id, result));
if(nullptr == s3fscurl){
S3FS_PRN_ERR("failed creating s3fs curl object for uploading [path=%s][start=%lld][size=%lld][part=%d]", pthparam->path.c_str(), static_cast<long long>(pthparam->start), static_cast<long long>(pthparam->size), pthparam->part_num);
// set result for exiting
if(!pthparam->ppseudofdinfo->CompleteInstruction(result, AutoLock::NONE)){
result = -EIO;
}
delete pthparam;
return reinterpret_cast<void*>(result);
}
@ -93,13 +93,11 @@ void* PseudoFdInfo::MultipartUploadThreadWorker(void* arg)
S3FS_PRN_ERR("failed uploading with error(%d) [path=%s][start=%lld][size=%lld][part=%d]", result, pthparam->path.c_str(), static_cast<long long>(pthparam->start), static_cast<long long>(pthparam->size), pthparam->part_num);
}
s3fscurl->DestroyCurlHandle(true, false);
delete s3fscurl;
// set result
if(!pthparam->ppseudofdinfo->CompleteInstruction(result, AutoLock::NONE)){
S3FS_PRN_WARN("This thread worker is about to end, so it doesn't return an EIO here and runs to the end.");
}
delete pthparam;
return reinterpret_cast<void*>(result);
}
@ -143,6 +141,8 @@ PseudoFdInfo::~PseudoFdInfo()
bool PseudoFdInfo::Clear()
{
// cppcheck-suppress unmatchedSuppression
// cppcheck-suppress knownConditionTrueFalse
if(!CancelAllThreads() || !ResetUploadInfo(AutoLock::NONE)){
return false;
}
@ -180,19 +180,25 @@ bool PseudoFdInfo::OpenUploadFd(AutoLock::Type type)
}
// duplicate fd
if(-1 == (upload_fd = dup(physical_fd)) || 0 != lseek(upload_fd, 0, SEEK_SET)){
int fd;
if(-1 == (fd = dup(physical_fd))){
S3FS_PRN_ERR("Could not duplicate physical file descriptor(errno=%d)", errno);
if(-1 != upload_fd){
close(upload_fd);
}
return false;
}
scope_guard guard([&]() { close(fd); });
if(0 != lseek(fd, 0, SEEK_SET)){
S3FS_PRN_ERR("Could not seek physical file descriptor(errno=%d)", errno);
return false;
}
struct stat st;
if(-1 == fstat(upload_fd, &st)){
if(-1 == fstat(fd, &st)){
S3FS_PRN_ERR("Invalid file descriptor for uploading(errno=%d)", errno);
close(upload_fd);
return false;
}
guard.dismiss();
upload_fd = fd;
return true;
}
@ -232,6 +238,8 @@ bool PseudoFdInfo::Readable() const
bool PseudoFdInfo::ClearUploadInfo(bool is_cancel_mp)
{
if(is_cancel_mp){
// cppcheck-suppress unmatchedSuppression
// cppcheck-suppress knownConditionTrueFalse
if(!CancelAllThreads()){
return false;
}
@ -260,10 +268,14 @@ bool PseudoFdInfo::RowInitialUploadInfo(const std::string& id, bool is_cancel_mp
}
if(is_cancel_mp){
// cppcheck-suppress unmatchedSuppression
// cppcheck-suppress knownConditionTrueFalse
if(!ClearUploadInfo(is_cancel_mp)){
return false;
}
}else{
// cppcheck-suppress unmatchedSuppression
// cppcheck-suppress knownConditionTrueFalse
if(!ResetUploadInfo(type)){
return false;
}
@ -351,9 +363,8 @@ bool PseudoFdInfo::AppendUploadPart(off_t start, off_t size, bool is_copy, etagp
int partnumber = static_cast<int>(upload_list.size()) + 1;
// add new part
etagpair* petag_entity = etag_entities.add(etagpair(NULL, partnumber)); // [NOTE] Create the etag entity and register it in the list.
filepart newpart(false, physical_fd, start, size, is_copy, petag_entity);
upload_list.push_back(newpart);
etagpair* petag_entity = etag_entities.add(etagpair(nullptr, partnumber)); // [NOTE] Create the etag entity and register it in the list.
upload_list.emplace_back(false, physical_fd, start, size, is_copy, petag_entity);
// set etag pointer
if(ppetag){
@ -368,7 +379,7 @@ bool PseudoFdInfo::AppendUploadPart(off_t start, off_t size, bool is_copy, etagp
//
static bool filepart_partnum_compare(const filepart& src1, const filepart& src2)
{
return (src1.get_part_number() <= src2.get_part_number());
return src1.get_part_number() < src2.get_part_number();
}
bool PseudoFdInfo::InsertUploadPart(off_t start, off_t size, int part_num, bool is_copy, etagpair** ppetag, AutoLock::Type type)
@ -387,12 +398,11 @@ bool PseudoFdInfo::InsertUploadPart(off_t start, off_t size, int part_num, bool
AutoLock auto_lock(&upload_list_lock, type);
// insert new part
etagpair* petag_entity = etag_entities.add(etagpair(NULL, part_num));
filepart newpart(false, physical_fd, start, size, is_copy, petag_entity);
upload_list.push_back(newpart);
etagpair* petag_entity = etag_entities.add(etagpair(nullptr, part_num));
upload_list.emplace_back(false, physical_fd, start, size, is_copy, petag_entity);
// sort by part number
upload_list.sort(filepart_partnum_compare);
std::sort(upload_list.begin(), upload_list.end(), filepart_partnum_compare);
// set etag pointer
*ppetag = petag_entity;
@ -420,7 +430,7 @@ bool PseudoFdInfo::ParallelMultipartUpload(const char* path, const mp_part_list_
for(mp_part_list_t::const_iterator iter = mplist.begin(); iter != mplist.end(); ++iter){
// Insert upload part
etagpair* petag = NULL;
etagpair* petag = nullptr;
if(!InsertUploadPart(iter->start, iter->size, iter->part_num, is_copy, &petag, AutoLock::ALREADY_LOCKED)){
S3FS_PRN_ERR("Failed to insert insert upload part(path=%s, start=%lld, size=%lld, part=%d, copy=%s) to mplist", SAFESTRPTR(path), static_cast<long long int>(iter->start), static_cast<long long int>(iter->size), iter->part_num, (is_copy ? "true" : "false"));
return false;
@ -439,15 +449,14 @@ bool PseudoFdInfo::ParallelMultipartUpload(const char* path, const mp_part_list_
thargs->petag = petag;
// make parameter for thread pool
thpoolman_param* ppoolparam = new thpoolman_param;
ppoolparam->args = thargs;
ppoolparam->psem = &uploaded_sem;
ppoolparam->pfunc = PseudoFdInfo::MultipartUploadThreadWorker;
thpoolman_param ppoolparam;
ppoolparam.args = thargs;
ppoolparam.psem = &uploaded_sem;
ppoolparam.pfunc = PseudoFdInfo::MultipartUploadThreadWorker;
// setup instruction
if(!ThreadPoolMan::Instruct(ppoolparam)){
S3FS_PRN_ERR("failed setup instruction for uploading.");
delete ppoolparam;
delete thargs;
return false;
}
@ -456,9 +465,9 @@ bool PseudoFdInfo::ParallelMultipartUpload(const char* path, const mp_part_list_
return true;
}
bool PseudoFdInfo::ParallelMultipartUploadAll(const char* path, const mp_part_list_t& upload_list, const mp_part_list_t& copy_list, int& result)
bool PseudoFdInfo::ParallelMultipartUploadAll(const char* path, const mp_part_list_t& to_upload_list, const mp_part_list_t& copy_list, int& result)
{
S3FS_PRN_DBG("[path=%s][upload_list(%zu)][copy_list(%zu)]", SAFESTRPTR(path), upload_list.size(), copy_list.size());
S3FS_PRN_DBG("[path=%s][to_upload_list(%zu)][copy_list(%zu)]", SAFESTRPTR(path), to_upload_list.size(), copy_list.size());
result = 0;
@ -466,8 +475,8 @@ bool PseudoFdInfo::ParallelMultipartUploadAll(const char* path, const mp_part_li
return false;
}
if(!ParallelMultipartUpload(path, upload_list, false, AutoLock::NONE) || !ParallelMultipartUpload(path, copy_list, true, AutoLock::NONE)){
S3FS_PRN_ERR("Failed setup instruction for uploading(path=%s, upload_list=%zu, copy_list=%zu).", SAFESTRPTR(path), upload_list.size(), copy_list.size());
if(!ParallelMultipartUpload(path, to_upload_list, false, AutoLock::NONE) || !ParallelMultipartUpload(path, copy_list, true, AutoLock::NONE)){
S3FS_PRN_ERR("Failed setup instruction for uploading(path=%s, to_upload_list=%zu, copy_list=%zu).", SAFESTRPTR(path), to_upload_list.size(), copy_list.size());
return false;
}
@ -499,7 +508,7 @@ ssize_t PseudoFdInfo::UploadBoundaryLastUntreatedArea(const char* path, headers_
S3FS_PRN_DBG("[path=%s][pseudo_fd=%d][physical_fd=%d]", SAFESTRPTR(path), pseudo_fd, physical_fd);
if(!path || -1 == physical_fd || -1 == pseudo_fd || !pfdent){
S3FS_PRN_ERR("pseudo_fd(%d) to physical_fd(%d) for path(%s) is not opened or not writable, or pfdent is NULL.", pseudo_fd, physical_fd, path);
S3FS_PRN_ERR("pseudo_fd(%d) to physical_fd(%d) for path(%s) is not opened or not writable, or pfdent is nullptr.", pseudo_fd, physical_fd, path);
return -EBADF;
}
AutoLock auto_lock(&upload_list_lock);
@ -674,7 +683,7 @@ bool PseudoFdInfo::CancelAllThreads()
// [NOTE]
// Maximum multipart upload size must be uploading boundary.
//
bool PseudoFdInfo::ExtractUploadPartsFromUntreatedArea(off_t& untreated_start, off_t& untreated_size, mp_part_list_t& to_upload_list, filepart_list_t& cancel_upload_list, off_t max_mp_size)
bool PseudoFdInfo::ExtractUploadPartsFromUntreatedArea(const off_t& untreated_start, const off_t& untreated_size, mp_part_list_t& to_upload_list, filepart_list_t& cancel_upload_list, off_t max_mp_size)
{
if(untreated_start < 0 || untreated_size <= 0){
S3FS_PRN_ERR("Paramters are wrong(untreated_start=%lld, untreated_size=%lld).", static_cast<long long int>(untreated_start), static_cast<long long int>(untreated_size));
@ -736,8 +745,8 @@ bool PseudoFdInfo::ExtractUploadPartsFromUntreatedArea(off_t& untreated_start, o
// Add upload area to the list
//
while(max_mp_size <= aligned_size){
int part_num = (aligned_start / max_mp_size) + 1;
to_upload_list.push_back(mp_part(aligned_start, max_mp_size, part_num));
int part_num = static_cast<int>((aligned_start / max_mp_size) + 1);
to_upload_list.emplace_back(aligned_start, max_mp_size, part_num);
aligned_start += max_mp_size;
aligned_size -= max_mp_size;
@ -753,7 +762,8 @@ bool PseudoFdInfo::ExtractUploadPartsFromUntreatedArea(off_t& untreated_start, o
// to_upload_list : A list of areas to upload in multipart upload.
// to_copy_list : A list of areas for copy upload in multipart upload.
// to_download_list : A list of areas that must be downloaded before multipart upload.
// cancel_upload_list : A list of areas that have already been uploaded and will be canceled(overwritten).
// cancel_upload_list : A list of areas that have already been uploaded and will be canceled(overwritten).
// wait_upload_complete : If cancellation areas exist, this flag is set to true when it is necessary to wait until the upload of those cancellation areas is complete.
// file_size : The size of the upload file.
// use_copy : Specify true if copy multipart upload is available.
//
@ -761,7 +771,7 @@ bool PseudoFdInfo::ExtractUploadPartsFromUntreatedArea(off_t& untreated_start, o
// The untreated_list in fdentity does not change, but upload_list is changed.
// (If you want to restore it, you can use cancel_upload_list.)
//
bool PseudoFdInfo::ExtractUploadPartsFromAllArea(UntreatedParts& untreated_list, mp_part_list_t& to_upload_list, mp_part_list_t& to_copy_list, mp_part_list_t& to_download_list, filepart_list_t& cancel_upload_list, off_t max_mp_size, off_t file_size, bool use_copy)
bool PseudoFdInfo::ExtractUploadPartsFromAllArea(UntreatedParts& untreated_list, mp_part_list_t& to_upload_list, mp_part_list_t& to_copy_list, mp_part_list_t& to_download_list, filepart_list_t& cancel_upload_list, bool& wait_upload_complete, off_t max_mp_size, off_t file_size, bool use_copy)
{
AutoLock auto_lock(&upload_list_lock);
@ -770,6 +780,7 @@ bool PseudoFdInfo::ExtractUploadPartsFromAllArea(UntreatedParts& untreated_list,
to_copy_list.clear();
to_download_list.clear();
cancel_upload_list.clear();
wait_upload_complete = false;
// Duplicate untreated list
untreated_list_t dup_untreated_list;
@ -825,7 +836,7 @@ bool PseudoFdInfo::ExtractUploadPartsFromAllArea(UntreatedParts& untreated_list,
// - Add this untreated area to cur_untreated_list
// - Delete this from dup_untreated_list
//
cur_untreated_list.push_back(untreatedpart(tmp_untreated_start, tmp_untreated_size));
cur_untreated_list.emplace_back(tmp_untreated_start, tmp_untreated_size);
dup_untreated_iter = dup_untreated_list.erase(dup_untreated_iter);
}else{
//
@ -836,7 +847,7 @@ bool PseudoFdInfo::ExtractUploadPartsFromAllArea(UntreatedParts& untreated_list,
tmp_untreated_size = (cur_start + cur_size) - tmp_untreated_start;
// Add ajusted untreated area to cur_untreated_list
cur_untreated_list.push_back(untreatedpart(tmp_untreated_start, tmp_untreated_size));
cur_untreated_list.emplace_back(tmp_untreated_start, tmp_untreated_size);
// Remove this ajusted untreated area from the area pointed
// to by dup_untreated_iter.
@ -884,7 +895,7 @@ bool PseudoFdInfo::ExtractUploadPartsFromAllArea(UntreatedParts& untreated_list,
//
// Create upload/download/cancel/copy list for this current area
//
int part_num = (cur_start / max_mp_size) + 1;
int part_num = static_cast<int>((cur_start / max_mp_size) + 1);
if(cur_untreated_list.empty()){
//
// No untreated area was detected in this current area
@ -905,14 +916,14 @@ bool PseudoFdInfo::ExtractUploadPartsFromAllArea(UntreatedParts& untreated_list,
// Copy multipart upload available
//
S3FS_PRN_DBG("To copy: start=%lld, size=%lld", static_cast<long long int>(cur_start), static_cast<long long int>(cur_size));
to_copy_list.push_back(mp_part(cur_start, cur_size, part_num));
to_copy_list.emplace_back(cur_start, cur_size, part_num);
}else{
//
// This current area needs to be downloaded and uploaded
//
S3FS_PRN_DBG("To download and upload: start=%lld, size=%lld", static_cast<long long int>(cur_start), static_cast<long long int>(cur_size));
to_download_list.push_back(mp_part(cur_start, cur_size));
to_upload_list.push_back(mp_part(cur_start, cur_size, part_num));
to_download_list.emplace_back(cur_start, cur_size);
to_upload_list.emplace_back(cur_start, cur_size, part_num);
}
}
}else{
@ -929,11 +940,16 @@ bool PseudoFdInfo::ExtractUploadPartsFromAllArea(UntreatedParts& untreated_list,
// So this current area only needs to be uploaded again.
//
S3FS_PRN_DBG("Cancel upload: start=%lld, size=%lld", static_cast<long long int>(overlap_uploaded_iter->startpos), static_cast<long long int>(overlap_uploaded_iter->size));
if(!overlap_uploaded_iter->uploaded){
S3FS_PRN_DBG("This cancel upload area is still uploading, so you must wait for it to complete before starting any Stream uploads.");
wait_upload_complete = true;
}
cancel_upload_list.push_back(*overlap_uploaded_iter); // add this uploaded area to cancel_upload_list
uploaded_iter = upload_list.erase(overlap_uploaded_iter); // remove it from upload_list
S3FS_PRN_DBG("To upload: start=%lld, size=%lld", static_cast<long long int>(cur_start), static_cast<long long int>(cur_size));
to_upload_list.push_back(mp_part(cur_start, cur_size, part_num)); // add new uploading area to list
to_upload_list.emplace_back(cur_start, cur_size, part_num); // add new uploading area to list
}else{
//
@ -976,7 +992,7 @@ bool PseudoFdInfo::ExtractUploadPartsFromAllArea(UntreatedParts& untreated_list,
if( (copy_riter->start + copy_riter->size) == tmp_cur_start &&
(copy_riter->size + (tmp_cur_untreated_iter->start - tmp_cur_start)) <= FIVE_GB &&
((tmp_cur_start + tmp_cur_size) - (tmp_cur_untreated_iter->start - tmp_cur_start)) >= MIN_MULTIPART_SIZE )
((tmp_cur_start + tmp_cur_size) - tmp_cur_untreated_iter->start) >= MIN_MULTIPART_SIZE )
{
//
// Unify to this area to previouse copy area.
@ -994,7 +1010,7 @@ bool PseudoFdInfo::ExtractUploadPartsFromAllArea(UntreatedParts& untreated_list,
// If this area is not unified, need to download this area
//
S3FS_PRN_DBG("To download: start=%lld, size=%lld", static_cast<long long int>(tmp_cur_start), static_cast<long long int>(tmp_cur_untreated_iter->start - tmp_cur_start));
to_download_list.push_back(mp_part(tmp_cur_start, tmp_cur_untreated_iter->start - tmp_cur_start));
to_download_list.emplace_back(tmp_cur_start, tmp_cur_untreated_iter->start - tmp_cur_start);
}
}
//
@ -1009,14 +1025,14 @@ bool PseudoFdInfo::ExtractUploadPartsFromAllArea(UntreatedParts& untreated_list,
//
if(0 < tmp_cur_size){
S3FS_PRN_DBG("To download: start=%lld, size=%lld", static_cast<long long int>(tmp_cur_start), static_cast<long long int>(tmp_cur_size));
to_download_list.push_back(mp_part(tmp_cur_start, tmp_cur_size));
to_download_list.emplace_back(tmp_cur_start, tmp_cur_size);
}
//
// Set upload area(whole of area) to list
//
S3FS_PRN_DBG("To upload: start=%lld, size=%lld", static_cast<long long int>(changed_start), static_cast<long long int>(changed_size));
to_upload_list.push_back(mp_part(changed_start, changed_size, part_num));
to_upload_list.emplace_back(changed_start, changed_size, part_num);
}
}
}

View File

@ -21,6 +21,8 @@
#ifndef S3FS_FDCACHE_FDINFO_H_
#define S3FS_FDCACHE_FDINFO_H_
#include <memory>
#include "psemaphore.h"
#include "metaheader.h"
#include "autolock.h"
@ -46,7 +48,7 @@ struct pseudofdinfo_thparam
int part_num;
etagpair* petag;
pseudofdinfo_thparam() : ppseudofdinfo(NULL), path(""), upload_id(""), upload_fd(-1), start(0), size(0), is_copy(false), part_num(-1), petag(NULL) {}
pseudofdinfo_thparam() : ppseudofdinfo(nullptr), path(""), upload_id(""), upload_fd(-1), start(0), size(0), is_copy(false), part_num(-1), petag(nullptr) {}
};
//------------------------------------------------
@ -68,8 +70,8 @@ class PseudoFdInfo
bool is_lock_init;
mutable pthread_mutex_t upload_list_lock; // protects upload_id and upload_list
Semaphore uploaded_sem; // use a semaphore to trigger an upload completion like event flag
volatile int instruct_count; // number of instructions for processing by threads
volatile int completed_count; // number of completed processes by thread
int instruct_count; // number of instructions for processing by threads
int completed_count; // number of completed processes by thread
int last_result; // the result of thread processing
private:
@ -83,13 +85,16 @@ class PseudoFdInfo
bool CompleteInstruction(int result, AutoLock::Type type = AutoLock::NONE);
bool ParallelMultipartUpload(const char* path, const mp_part_list_t& mplist, bool is_copy, AutoLock::Type type = AutoLock::NONE);
bool InsertUploadPart(off_t start, off_t size, int part_num, bool is_copy, etagpair** ppetag, AutoLock::Type type = AutoLock::NONE);
int WaitAllThreadsExit();
bool CancelAllThreads();
bool ExtractUploadPartsFromUntreatedArea(off_t& untreated_start, off_t& untreated_size, mp_part_list_t& to_upload_list, filepart_list_t& cancel_upload_list, off_t max_mp_size);
bool ExtractUploadPartsFromUntreatedArea(const off_t& untreated_start, const off_t& untreated_size, mp_part_list_t& to_upload_list, filepart_list_t& cancel_upload_list, off_t max_mp_size);
public:
explicit PseudoFdInfo(int fd = -1, int open_flags = 0);
~PseudoFdInfo();
PseudoFdInfo(const PseudoFdInfo&) = delete;
PseudoFdInfo(PseudoFdInfo&&) = delete;
PseudoFdInfo& operator=(const PseudoFdInfo&) = delete;
PseudoFdInfo& operator=(PseudoFdInfo&&) = delete;
int GetPhysicalFd() const { return physical_fd; }
int GetPseudoFd() const { return pseudo_fd; }
@ -105,15 +110,16 @@ class PseudoFdInfo
bool GetUploadId(std::string& id) const;
bool GetEtaglist(etaglist_t& list) const;
bool AppendUploadPart(off_t start, off_t size, bool is_copy = false, etagpair** ppetag = NULL);
bool AppendUploadPart(off_t start, off_t size, bool is_copy = false, etagpair** ppetag = nullptr);
bool ParallelMultipartUploadAll(const char* path, const mp_part_list_t& upload_list, const mp_part_list_t& copy_list, int& result);
bool ParallelMultipartUploadAll(const char* path, const mp_part_list_t& to_upload_list, const mp_part_list_t& copy_list, int& result);
int WaitAllThreadsExit();
ssize_t UploadBoundaryLastUntreatedArea(const char* path, headers_t& meta, FdEntity* pfdent);
bool ExtractUploadPartsFromAllArea(UntreatedParts& untreated_list, mp_part_list_t& to_upload_list, mp_part_list_t& to_copy_list, mp_part_list_t& to_download_list, filepart_list_t& cancel_upload_list, off_t max_mp_size, off_t file_size, bool use_copy);
bool ExtractUploadPartsFromAllArea(UntreatedParts& untreated_list, mp_part_list_t& to_upload_list, mp_part_list_t& to_copy_list, mp_part_list_t& to_download_list, filepart_list_t& cancel_upload_list, bool& wait_upload_complete, off_t max_mp_size, off_t file_size, bool use_copy);
};
typedef std::map<int, class PseudoFdInfo*> fdinfo_map_t;
typedef std::map<int, std::unique_ptr<PseudoFdInfo>> fdinfo_map_t;
#endif // S3FS_FDCACHE_FDINFO_H_

View File

@ -20,6 +20,7 @@
#include <cstdio>
#include <cerrno>
#include <memory>
#include <unistd.h>
#include <sstream>
#include <sys/stat.h>
@ -33,7 +34,7 @@
//------------------------------------------------
// Symbols
//------------------------------------------------
static const int CHECK_CACHEFILE_PART_SIZE = 1024 * 16; // Buffer size in PageList::CheckZeroAreaInFile()
static constexpr int CHECK_CACHEFILE_PART_SIZE = 1024 * 16; // Buffer size in PageList::CheckZeroAreaInFile()
//------------------------------------------------
// fdpage_list_t utility
@ -72,7 +73,7 @@ static void raw_compress_fdpage_list(const fdpage_list_t& pages, fdpage_list_t&
{
compressed_pages.clear();
fdpage* lastpage = NULL;
fdpage* lastpage = nullptr;
fdpage_list_t::iterator add_iter;
for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){
if(0 == iter->bytes){
@ -232,7 +233,7 @@ bool PageList::GetSparseFilePages(int fd, size_t file_size, fdpage_list_t& spars
//
bool PageList::CheckZeroAreaInFile(int fd, off_t start, size_t bytes)
{
char* readbuff = new char[CHECK_CACHEFILE_PART_SIZE];
std::unique_ptr<char[]> readbuff(new char[CHECK_CACHEFILE_PART_SIZE]);
for(size_t comp_bytes = 0, check_bytes = 0; comp_bytes < bytes; comp_bytes += check_bytes){
if(CHECK_CACHEFILE_PART_SIZE < (bytes - comp_bytes)){
@ -242,7 +243,7 @@ bool PageList::CheckZeroAreaInFile(int fd, off_t start, size_t bytes)
}
bool found_bad_data = false;
ssize_t read_bytes;
if(-1 == (read_bytes = pread(fd, readbuff, check_bytes, (start + comp_bytes)))){
if(-1 == (read_bytes = pread(fd, readbuff.get(), check_bytes, (start + comp_bytes)))){
S3FS_PRN_ERR("Something error is occurred in reading %zu bytes at %lld from file(physical_fd=%d).", check_bytes, static_cast<long long int>(start + comp_bytes), fd);
found_bad_data = true;
}else{
@ -256,11 +257,9 @@ bool PageList::CheckZeroAreaInFile(int fd, off_t start, size_t bytes)
}
}
if(found_bad_data){
delete[] readbuff;
return false;
}
}
delete[] readbuff;
return true;
}
@ -359,14 +358,6 @@ PageList::PageList(off_t size, bool is_loaded, bool is_modified, bool shrinked)
Init(size, is_loaded, is_modified);
}
PageList::PageList(const PageList& other)
{
for(fdpage_list_t::const_iterator iter = other.pages.begin(); iter != other.pages.end(); ++iter){
pages.push_back(*iter);
}
is_shrink = other.is_shrink;
}
PageList::~PageList()
{
Clear();
@ -399,7 +390,7 @@ off_t PageList::Size() const
bool PageList::Compress()
{
fdpage* lastpage = NULL;
fdpage* lastpage = nullptr;
for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ){
if(!lastpage){
// First item
@ -511,8 +502,8 @@ bool PageList::IsPageLoaded(off_t start, off_t size) const
bool PageList::SetPageLoadedStatus(off_t start, off_t size, PageList::page_status pstatus, bool is_compress)
{
off_t now_size = Size();
bool is_loaded = (PAGE_LOAD_MODIFIED == pstatus || PAGE_LOADED == pstatus);
bool is_modified = (PAGE_LOAD_MODIFIED == pstatus || PAGE_MODIFIED == pstatus);
bool is_loaded = (page_status::LOAD_MODIFIED == pstatus || page_status::LOADED == pstatus);
bool is_modified = (page_status::LOAD_MODIFIED == pstatus || page_status::MODIFIED == pstatus);
if(now_size <= start){
if(now_size < start){
@ -657,9 +648,7 @@ size_t PageList::GetUnloadedPages(fdpage_list_t& unloaded_list, off_t start, off
bool PageList::GetPageListsForMultipartUpload(fdpage_list_t& dlpages, fdpage_list_t& mixuppages, off_t max_partsize)
{
// compress before this processing
if(!Compress()){
return false;
}
Compress(); // always true
// make a list by modified flag
fdpage_list_t modified_pages;
@ -760,9 +749,7 @@ bool PageList::GetPageListsForMultipartUpload(fdpage_list_t& dlpages, fdpage_lis
bool PageList::GetNoDataPageLists(fdpage_list_t& nodata_pages, off_t start, size_t size)
{
// compress before this processing
if(!Compress()){
return false;
}
Compress(); // always true
// extract areas without data
fdpage_list_t tmp_pagelist;
@ -873,17 +860,16 @@ bool PageList::Serialize(CacheFileStat& file, bool is_output, ino_t inode)
Init(0, false, false);
return true;
}
char* ptmp = new char[st.st_size + 1];
std::unique_ptr<char[]> ptmp(new char[st.st_size + 1]);
ssize_t result;
// read from file
if(0 >= (result = pread(file.GetFd(), ptmp, st.st_size, 0))){
if(0 >= (result = pread(file.GetFd(), ptmp.get(), st.st_size, 0))){
S3FS_PRN_ERR("failed to read stats(%d)", errno);
delete[] ptmp;
return false;
}
ptmp[result] = '\0';
std::string oneline;
std::istringstream ssall(ptmp);
std::istringstream ssall(ptmp.get());
// loaded
Clear();
@ -893,7 +879,6 @@ bool PageList::Serialize(CacheFileStat& file, bool is_output, ino_t inode)
ino_t cache_inode; // if this value is 0, it means old format.
if(!getline(ssall, oneline, '\n')){
S3FS_PRN_ERR("failed to parse stats.");
delete[] ptmp;
return false;
}else{
std::istringstream sshead(oneline);
@ -903,7 +888,6 @@ bool PageList::Serialize(CacheFileStat& file, bool is_output, ino_t inode)
// get first part in head line.
if(!getline(sshead, strhead1, ':')){
S3FS_PRN_ERR("failed to parse stats.");
delete[] ptmp;
return false;
}
// get second part in head line.
@ -917,7 +901,6 @@ bool PageList::Serialize(CacheFileStat& file, bool is_output, ino_t inode)
cache_inode = static_cast<ino_t>(cvt_strtoofft(strhead1.c_str(), /* base= */10));
if(0 == cache_inode){
S3FS_PRN_ERR("wrong inode number in parsed cache stats.");
delete[] ptmp;
return false;
}
}
@ -925,7 +908,6 @@ bool PageList::Serialize(CacheFileStat& file, bool is_output, ino_t inode)
// check inode number
if(0 != cache_inode && cache_inode != inode){
S3FS_PRN_ERR("differ inode and inode number in parsed cache stats.");
delete[] ptmp;
return false;
}
@ -959,14 +941,20 @@ bool PageList::Serialize(CacheFileStat& file, bool is_output, ino_t inode)
is_modified = (1 == cvt_strtoofft(part.c_str(), /* base= */10) ? true : false);
}
// add new area
PageList::page_status pstatus =
( is_loaded && is_modified ? PageList::PAGE_LOAD_MODIFIED :
!is_loaded && is_modified ? PageList::PAGE_MODIFIED :
is_loaded && !is_modified ? PageList::PAGE_LOADED : PageList::PAGE_NOT_LOAD_MODIFIED );
PageList::page_status pstatus = PageList::page_status::NOT_LOAD_MODIFIED;
if(is_loaded){
if(is_modified){
pstatus = PageList::page_status::LOAD_MODIFIED;
}else{
pstatus = PageList::page_status::LOADED;
}
}else{
if(is_modified){
pstatus = PageList::page_status::MODIFIED;
}
}
SetPageLoadedStatus(offset, size, pstatus);
}
delete[] ptmp;
if(is_err){
S3FS_PRN_ERR("failed to parse stats.");
Clear();

View File

@ -21,8 +21,8 @@
#ifndef S3FS_FDCACHE_PAGE_H_
#define S3FS_FDCACHE_PAGE_H_
#include <list>
#include <sys/types.h>
#include <vector>
//------------------------------------------------
// Symbols
@ -61,7 +61,7 @@ struct fdpage
return (0 < bytes ? offset + bytes - 1 : 0);
}
};
typedef std::list<struct fdpage> fdpage_list_t;
typedef std::vector<struct fdpage> fdpage_list_t;
//------------------------------------------------
// Class PageList
@ -79,11 +79,11 @@ class PageList
bool is_shrink; // [NOTE] true if it has been shrinked even once
public:
enum page_status{
PAGE_NOT_LOAD_MODIFIED = 0,
PAGE_LOADED,
PAGE_MODIFIED,
PAGE_LOAD_MODIFIED
enum class page_status{
NOT_LOAD_MODIFIED = 0,
LOADED,
MODIFIED,
LOAD_MODIFIED
};
private:
@ -98,7 +98,8 @@ class PageList
static void FreeList(fdpage_list_t& list);
explicit PageList(off_t size = 0, bool is_loaded = false, bool is_modified = false, bool shrinked = false);
explicit PageList(const PageList& other);
PageList(const PageList&) = delete;
PageList& operator=(const PageList&) = delete;
~PageList();
bool Init(off_t size, bool is_loaded, bool is_modified);
@ -106,7 +107,7 @@ class PageList
bool Resize(off_t size, bool is_loaded, bool is_modified);
bool IsPageLoaded(off_t start = 0, off_t size = 0) const; // size=0 is checking to end of list
bool SetPageLoadedStatus(off_t start, off_t size, PageList::page_status pstatus = PAGE_LOADED, bool is_compress = true);
bool SetPageLoadedStatus(off_t start, off_t size, PageList::page_status pstatus = page_status::LOADED, bool is_compress = true);
bool FindUnloadedPage(off_t start, off_t& resstart, off_t& ressize) const;
off_t GetTotalUnloadedPageSize(off_t start = 0, off_t size = 0, off_t limit_size = 0) const; // size=0 is checking to end of list
size_t GetUnloadedPages(fdpage_list_t& unloaded_list, off_t start = 0, off_t size = 0) const; // size=0 is checking to end of list

View File

@ -33,7 +33,7 @@
// The minimum pseudo fd value starts 2.
// This is to avoid mistakes for 0(stdout) and 1(stderr), which are usually used.
//
static const int MIN_PSEUDOFD_NUMBER = 2;
static constexpr int MIN_PSEUDOFD_NUMBER = 2;
//------------------------------------------------
// PseudoFdManager class methods

View File

@ -21,6 +21,8 @@
#ifndef S3FS_FDCACHE_PSEUDOFD_H_
#define S3FS_FDCACHE_PSEUDOFD_H_
#include <vector>
//------------------------------------------------
// Typdefs
//------------------------------------------------
@ -43,6 +45,10 @@ class PseudoFdManager
PseudoFdManager();
~PseudoFdManager();
PseudoFdManager(const PseudoFdManager&) = delete;
PseudoFdManager(PseudoFdManager&&) = delete;
PseudoFdManager& operator=(const PseudoFdManager&) = delete;
PseudoFdManager& operator=(PseudoFdManager&&) = delete;
int GetUnusedMinPseudoFd() const;
int CreatePseudoFd();

View File

@ -48,19 +48,19 @@ std::string CacheFileStat::GetCacheFileStatTopDir()
return top_path;
}
bool CacheFileStat::MakeCacheFileStatPath(const char* path, std::string& sfile_path, bool is_create_dir)
int CacheFileStat::MakeCacheFileStatPath(const char* path, std::string& sfile_path, bool is_create_dir)
{
std::string top_path = CacheFileStat::GetCacheFileStatTopDir();
if(top_path.empty()){
S3FS_PRN_ERR("The path to cache top dir is empty.");
return false;
return -EIO;
}
if(is_create_dir){
int result;
if(0 != (result = mkdirp(top_path + mydirname(path), 0777))){
S3FS_PRN_ERR("failed to create dir(%s) by errno(%d).", path, result);
return false;
return result;
}
}
if(!path || '\0' == path[0]){
@ -68,7 +68,7 @@ bool CacheFileStat::MakeCacheFileStatPath(const char* path, std::string& sfile_p
}else{
sfile_path = top_path + SAFESTRPTR(path);
}
return true;
return 0;
}
bool CacheFileStat::CheckCacheFileStatTopDir()
@ -82,26 +82,28 @@ bool CacheFileStat::CheckCacheFileStatTopDir()
return check_exist_dir_permission(top_path.c_str());
}
bool CacheFileStat::DeleteCacheFileStat(const char* path)
int CacheFileStat::DeleteCacheFileStat(const char* path)
{
if(!path || '\0' == path[0]){
return false;
return -EINVAL;
}
// stat path
std::string sfile_path;
if(!CacheFileStat::MakeCacheFileStatPath(path, sfile_path, false)){
int result;
if(0 != (result = CacheFileStat::MakeCacheFileStatPath(path, sfile_path, false))){
S3FS_PRN_ERR("failed to create cache stat file path(%s)", path);
return false;
return result;
}
if(0 != unlink(sfile_path.c_str())){
if(ENOENT == errno){
S3FS_PRN_DBG("failed to delete file(%s): errno=%d", path, errno);
result = -errno;
if(-ENOENT == result){
S3FS_PRN_DBG("failed to delete file(%s): errno=%d", path, result);
}else{
S3FS_PRN_ERR("failed to delete file(%s): errno=%d", path, errno);
S3FS_PRN_ERR("failed to delete file(%s): errno=%d", path, result);
}
return false;
return result;
}
return true;
return 0;
}
// [NOTE]
@ -127,7 +129,7 @@ bool CacheFileStat::RenameCacheFileStat(const char* oldpath, const char* newpath
// stat path
std::string old_filestat;
std::string new_filestat;
if(!CacheFileStat::MakeCacheFileStatPath(oldpath, old_filestat, false) || !CacheFileStat::MakeCacheFileStatPath(newpath, new_filestat, false)){
if(0 != CacheFileStat::MakeCacheFileStatPath(oldpath, old_filestat, false) || 0 != CacheFileStat::MakeCacheFileStatPath(newpath, new_filestat, false)){
return false;
}
@ -201,39 +203,40 @@ bool CacheFileStat::RawOpen(bool readonly)
}
// stat path
std::string sfile_path;
if(!CacheFileStat::MakeCacheFileStatPath(path.c_str(), sfile_path, true)){
if(0 != CacheFileStat::MakeCacheFileStatPath(path.c_str(), sfile_path, true)){
S3FS_PRN_ERR("failed to create cache stat file path(%s)", path.c_str());
return false;
}
// open
int tmpfd;
if(readonly){
if(-1 == (fd = open(sfile_path.c_str(), O_RDONLY))){
if(-1 == (tmpfd = open(sfile_path.c_str(), O_RDONLY))){
S3FS_PRN_ERR("failed to read only open cache stat file path(%s) - errno(%d)", path.c_str(), errno);
return false;
}
}else{
if(-1 == (fd = open(sfile_path.c_str(), O_CREAT|O_RDWR, 0600))){
if(-1 == (tmpfd = open(sfile_path.c_str(), O_CREAT|O_RDWR, 0600))){
S3FS_PRN_ERR("failed to open cache stat file path(%s) - errno(%d)", path.c_str(), errno);
return false;
}
}
scope_guard guard([&]() { close(tmpfd); });
// lock
if(-1 == flock(fd, LOCK_EX)){
if(-1 == flock(tmpfd, LOCK_EX)){
S3FS_PRN_ERR("failed to lock cache stat file(%s) - errno(%d)", path.c_str(), errno);
close(fd);
fd = -1;
return false;
}
// seek top
if(0 != lseek(fd, 0, SEEK_SET)){
if(0 != lseek(tmpfd, 0, SEEK_SET)){
S3FS_PRN_ERR("failed to lseek cache stat file(%s) - errno(%d)", path.c_str(), errno);
flock(fd, LOCK_UN);
close(fd);
fd = -1;
flock(tmpfd, LOCK_UN);
return false;
}
S3FS_PRN_DBG("file locked(%s - %s)", path.c_str(), sfile_path.c_str());
guard.dismiss();
fd = tmpfd;
return true;
}

View File

@ -33,18 +33,18 @@ class CacheFileStat
int fd;
private:
static bool MakeCacheFileStatPath(const char* path, std::string& sfile_path, bool is_create_dir = true);
static int MakeCacheFileStatPath(const char* path, std::string& sfile_path, bool is_create_dir = true);
bool RawOpen(bool readonly);
public:
static std::string GetCacheFileStatTopDir();
static bool DeleteCacheFileStat(const char* path);
static int DeleteCacheFileStat(const char* path);
static bool CheckCacheFileStatTopDir();
static bool DeleteCacheFileStatDirectory();
static bool RenameCacheFileStat(const char* oldpath, const char* newpath);
explicit CacheFileStat(const char* tpath = NULL);
explicit CacheFileStat(const char* tpath = nullptr);
~CacheFileStat();
bool Open();

View File

@ -95,7 +95,7 @@ bool UntreatedParts::AddPart(off_t start, off_t size)
}
}
// There are no overlapping parts in the untreated_list, then add the part at end of list
untreated_list.push_back(untreatedpart(start, size, last_tag));
untreated_list.emplace_back(start, size, last_tag);
return true;
}

View File

@ -42,6 +42,10 @@ class UntreatedParts
public:
UntreatedParts();
~UntreatedParts();
UntreatedParts(const UntreatedParts&) = delete;
UntreatedParts(UntreatedParts&&) = delete;
UntreatedParts& operator=(const UntreatedParts&) = delete;
UntreatedParts& operator=(UntreatedParts&&) = delete;
bool empty();

View File

@ -18,15 +18,15 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <cerrno>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <pthread.h>
#include <unistd.h>
#include <syslog.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <string.h>
#include <gcrypt.h>
#include <gnutls/gnutls.h>
#include <gnutls/crypto.h>
@ -50,7 +50,7 @@
const char* s3fs_crypt_lib_name(void)
{
static const char version[] = "GnuTLS(nettle)";
static constexpr char version[] = "GnuTLS(nettle)";
return version;
}
@ -59,7 +59,7 @@ const char* s3fs_crypt_lib_name(void)
const char* s3fs_crypt_lib_name()
{
static const char version[] = "GnuTLS(gcrypt)";
static constexpr char version[] = "GnuTLS(gcrypt)";
return version;
}
@ -75,7 +75,7 @@ bool s3fs_init_global_ssl()
return false;
}
#ifndef USE_GNUTLS_NETTLE
if(NULL == gcry_check_version(NULL)){
if(nullptr == gcry_check_version(nullptr)){
return false;
}
#endif // USE_GNUTLS_NETTLE
@ -106,76 +106,72 @@ bool s3fs_destroy_crypt_mutex()
//-------------------------------------------------------------------
#ifdef USE_GNUTLS_NETTLE
bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
std::unique_ptr<unsigned char[]> s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned int* digestlen)
{
if(!key || !data || !digest || !digestlen){
return false;
if(!key || !data || !digestlen){
return nullptr;
}
*digest = new unsigned char[SHA1_DIGEST_SIZE];
std::unique_ptr<unsigned char[]> digest(new unsigned char[SHA1_DIGEST_SIZE]);
struct hmac_sha1_ctx ctx_hmac;
hmac_sha1_set_key(&ctx_hmac, keylen, reinterpret_cast<const uint8_t*>(key));
hmac_sha1_update(&ctx_hmac, datalen, reinterpret_cast<const uint8_t*>(data));
hmac_sha1_digest(&ctx_hmac, SHA1_DIGEST_SIZE, reinterpret_cast<uint8_t*>(*digest));
hmac_sha1_digest(&ctx_hmac, SHA1_DIGEST_SIZE, reinterpret_cast<uint8_t*>(digest.get()));
*digestlen = SHA1_DIGEST_SIZE;
return true;
return digest;
}
bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
std::unique_ptr<unsigned char[]> s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned int* digestlen)
{
if(!key || !data || !digest || !digestlen){
return false;
if(!key || !data || !digestlen){
return nullptr;
}
*digest = new unsigned char[SHA256_DIGEST_SIZE];
std::unique_ptr<unsigned char[]> digest(new unsigned char[SHA256_DIGEST_SIZE]);
struct hmac_sha256_ctx ctx_hmac;
hmac_sha256_set_key(&ctx_hmac, keylen, reinterpret_cast<const uint8_t*>(key));
hmac_sha256_update(&ctx_hmac, datalen, reinterpret_cast<const uint8_t*>(data));
hmac_sha256_digest(&ctx_hmac, SHA256_DIGEST_SIZE, reinterpret_cast<uint8_t*>(*digest));
hmac_sha256_digest(&ctx_hmac, SHA256_DIGEST_SIZE, reinterpret_cast<uint8_t*>(digest.get()));
*digestlen = SHA256_DIGEST_SIZE;
return true;
return digest;
}
#else // USE_GNUTLS_NETTLE
bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
std::unique_ptr<unsigned char[]> s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned int* digestlen)
{
if(!key || !data || !digest || !digestlen){
return false;
if(!key || !data || !digestlen){
return nullptr;
}
if(0 == (*digestlen = gnutls_hmac_get_len(GNUTLS_MAC_SHA1))){
return false;
return nullptr;
}
*digest = new unsigned char[*digestlen + 1];
if(0 > gnutls_hmac_fast(GNUTLS_MAC_SHA1, key, keylen, data, datalen, *digest)){
delete[] *digest;
*digest = NULL;
return false;
std::unique_ptr<unsigned char[]> digest(new unsigned char[*digestlen + 1]);
if(0 > gnutls_hmac_fast(GNUTLS_MAC_SHA1, key, keylen, data, datalen, digest.get())){
return nullptr;
}
return true;
return digest;
}
bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
std::unique_ptr<unsigned char[]> s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned int* digestlen)
{
if(!key || !data || !digest || !digestlen){
return false;
if(!key || !data || !digestlen){
return nullptr;
}
if(0 == (*digestlen = gnutls_hmac_get_len(GNUTLS_MAC_SHA256))){
return false;
return nullptr;
}
*digest = new unsigned char[*digestlen + 1];
if(0 > gnutls_hmac_fast(GNUTLS_MAC_SHA256, key, keylen, data, datalen, *digest)){
delete[] *digest;
*digest = NULL;
return false;
std::unique_ptr<unsigned char[]> digest(new unsigned char[*digestlen + 1]);
if(0 > gnutls_hmac_fast(GNUTLS_MAC_SHA256, key, keylen, data, datalen, digest.get())){
return nullptr;
}
return true;
return digest;
}
#endif // USE_GNUTLS_NETTLE
@ -183,22 +179,26 @@ bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, siz
//-------------------------------------------------------------------
// Utility Function for MD5
//-------------------------------------------------------------------
size_t get_md5_digest_length()
#ifdef USE_GNUTLS_NETTLE
bool s3fs_md5(const unsigned char* data, size_t datalen, md5_t* result)
{
return 16;
struct md5_ctx ctx_md5;
md5_init(&ctx_md5);
md5_update(&ctx_md5, datalen, data);
md5_digest(&ctx_md5, result->size(), result->data());
return true;
}
#ifdef USE_GNUTLS_NETTLE
unsigned char* s3fs_md5_fd(int fd, off_t start, off_t size)
bool s3fs_md5_fd(int fd, off_t start, off_t size, md5_t* result)
{
struct md5_ctx ctx_md5;
off_t bytes;
unsigned char* result;
if(-1 == size){
struct stat st;
if(-1 == fstat(fd, &st)){
return NULL;
return false;
}
size = st.st_size;
}
@ -216,36 +216,48 @@ unsigned char* s3fs_md5_fd(int fd, off_t start, off_t size)
}else if(-1 == bytes){
// error
S3FS_PRN_ERR("file read error(%d)", errno);
return NULL;
return false;
}
md5_update(&ctx_md5, bytes, buf);
}
result = new unsigned char[get_md5_digest_length()];
md5_digest(&ctx_md5, get_md5_digest_length(), result);
md5_digest(&ctx_md5, result->size(), result->data());
return result;
return true;
}
#else // USE_GNUTLS_NETTLE
unsigned char* s3fs_md5_fd(int fd, off_t start, off_t size)
bool s3fs_md5(const unsigned char* data, size_t datalen, md5_t* digest)
{
gcry_md_hd_t ctx_md5;
gcry_error_t err;
if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_md5, GCRY_MD_MD5, 0))){
S3FS_PRN_ERR("MD5 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err));
return false;
}
gcry_md_write(ctx_md5, digest->data(), digest->size());
gcry_md_close(ctx_md5);
return true;
}
bool s3fs_md5_fd(int fd, off_t start, off_t size, md5_t* result)
{
gcry_md_hd_t ctx_md5;
gcry_error_t err;
off_t bytes;
unsigned char* result;
if(-1 == size){
struct stat st;
if(-1 == fstat(fd, &st)){
return NULL;
return false;
}
size = st.st_size;
}
if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_md5, GCRY_MD_MD5, 0))){
S3FS_PRN_ERR("MD5 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err));
return NULL;
return false;
}
for(off_t total = 0; total < size; total += bytes){
@ -260,15 +272,14 @@ unsigned char* s3fs_md5_fd(int fd, off_t start, off_t size)
// error
S3FS_PRN_ERR("file read error(%d)", errno);
gcry_md_close(ctx_md5);
return NULL;
return false;
}
gcry_md_write(ctx_md5, buf, bytes);
}
result = new unsigned char[get_md5_digest_length()];
memcpy(result, gcry_md_read(ctx_md5, 0), get_md5_digest_length());
memcpy(result->data(), gcry_md_read(ctx_md5, 0), result->size());
gcry_md_close(ctx_md5);
return result;
return true;
}
#endif // USE_GNUTLS_NETTLE
@ -276,30 +287,21 @@ unsigned char* s3fs_md5_fd(int fd, off_t start, off_t size)
//-------------------------------------------------------------------
// Utility Function for SHA256
//-------------------------------------------------------------------
size_t get_sha256_digest_length()
{
return 32;
}
#ifdef USE_GNUTLS_NETTLE
bool s3fs_sha256(const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
bool s3fs_sha256(const unsigned char* data, size_t datalen, sha256_t* digest)
{
(*digestlen) = static_cast<unsigned int>(get_sha256_digest_length());
*digest = new unsigned char[*digestlen];
struct sha256_ctx ctx_sha256;
sha256_init(&ctx_sha256);
sha256_update(&ctx_sha256, datalen, data);
sha256_digest(&ctx_sha256, *digestlen, *digest);
sha256_digest(&ctx_sha256, digest->size(), digest->data());
return true;
}
unsigned char* s3fs_sha256_fd(int fd, off_t start, off_t size)
bool s3fs_sha256_fd(int fd, off_t start, off_t size, sha256_t* result)
{
struct sha256_ctx ctx_sha256;
off_t bytes;
unsigned char* result;
sha256_init(&ctx_sha256);
@ -314,55 +316,49 @@ unsigned char* s3fs_sha256_fd(int fd, off_t start, off_t size)
}else if(-1 == bytes){
// error
S3FS_PRN_ERR("file read error(%d)", errno);
return NULL;
return false;
}
sha256_update(&ctx_sha256, bytes, buf);
}
result = new unsigned char[get_sha256_digest_length()];
sha256_digest(&ctx_sha256, get_sha256_digest_length(), result);
sha256_digest(&ctx_sha256, result->size(), result->data());
return result;
return true;
}
#else // USE_GNUTLS_NETTLE
bool s3fs_sha256(const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
bool s3fs_sha256(const unsigned char* data, size_t datalen, sha256_t* digest)
{
size_t len = (*digestlen) = static_cast<unsigned int>(get_sha256_digest_length());
*digest = new unsigned char[len];
gcry_md_hd_t ctx_sha256;
gcry_error_t err;
if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_sha256, GCRY_MD_SHA256, 0))){
S3FS_PRN_ERR("SHA256 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err));
delete[] *digest;
return false;
}
gcry_md_write(ctx_sha256, data, datalen);
memcpy(*digest, gcry_md_read(ctx_sha256, 0), *digestlen);
memcpy(digest->data(), gcry_md_read(ctx_sha256, 0), digest->size());
gcry_md_close(ctx_sha256);
return true;
}
unsigned char* s3fs_sha256_fd(int fd, off_t start, off_t size)
bool s3fs_sha256_fd(int fd, off_t start, off_t size, sha256_t* result)
{
gcry_md_hd_t ctx_sha256;
gcry_error_t err;
off_t bytes;
unsigned char* result;
if(-1 == size){
struct stat st;
if(-1 == fstat(fd, &st)){
return NULL;
return false;
}
size = st.st_size;
}
if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_sha256, GCRY_MD_SHA256, 0))){
S3FS_PRN_ERR("SHA256 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err));
return NULL;
return false;
}
for(off_t total = 0; total < size; total += bytes){
@ -377,15 +373,14 @@ unsigned char* s3fs_sha256_fd(int fd, off_t start, off_t size)
// error
S3FS_PRN_ERR("file read error(%d)", errno);
gcry_md_close(ctx_sha256);
return NULL;
return false;
}
gcry_md_write(ctx_sha256, buf, bytes);
}
result = new unsigned char[get_sha256_digest_length()];
memcpy(result, gcry_md_read(ctx_sha256, 0), get_sha256_digest_length());
memcpy(result->data(), gcry_md_read(ctx_sha256, 0), result->size());
gcry_md_close(ctx_sha256);
return result;
return true;
}
#endif // USE_GNUTLS_NETTLE

View File

@ -27,7 +27,7 @@
#include "metaheader.h"
#include "string_util.h"
static const struct timespec DEFAULT_TIMESPEC = {-1, 0};
static constexpr struct timespec DEFAULT_TIMESPEC = {-1, 0};
//-------------------------------------------------------------------
// Utility functions for convert

View File

@ -32,12 +32,12 @@
//-------------------------------------------------------------------
// Global variables
//-------------------------------------------------------------------
utility_incomp_type utility_mode = NO_UTILITY_MODE;
utility_incomp_type utility_mode = utility_incomp_type::NO_UTILITY_MODE;
//-------------------------------------------------------------------
// Functions
//-------------------------------------------------------------------
static void print_incomp_mpu_list(incomp_mpu_list_t& list)
static void print_incomp_mpu_list(const incomp_mpu_list_t& list)
{
printf("\n");
printf("Lists the parts that have been uploaded for a specific multipart upload.\n");
@ -47,7 +47,7 @@ static void print_incomp_mpu_list(incomp_mpu_list_t& list)
printf("---------------------------------------------------------------\n");
int cnt = 0;
for(incomp_mpu_list_t::iterator iter = list.begin(); iter != list.end(); ++iter, ++cnt){
for(incomp_mpu_list_t::const_iterator iter = list.begin(); iter != list.end(); ++iter, ++cnt){
printf(" Path : %s\n", (*iter).key.c_str());
printf(" UploadId : %s\n", (*iter).id.c_str());
printf(" Date : %s\n", (*iter).date.c_str());
@ -60,17 +60,17 @@ static void print_incomp_mpu_list(incomp_mpu_list_t& list)
}
}
static bool abort_incomp_mpu_list(incomp_mpu_list_t& list, time_t abort_time)
static bool abort_incomp_mpu_list(const incomp_mpu_list_t& list, time_t abort_time)
{
if(list.empty()){
return true;
}
time_t now_time = time(NULL);
time_t now_time = time(nullptr);
// do removing.
S3fsCurl s3fscurl;
bool result = true;
for(incomp_mpu_list_t::iterator iter = list.begin(); iter != list.end(); ++iter){
for(incomp_mpu_list_t::const_iterator iter = list.begin(); iter != list.end(); ++iter){
const char* tpath = (*iter).key.c_str();
std::string upload_id = (*iter).id;
@ -100,7 +100,7 @@ static bool abort_incomp_mpu_list(incomp_mpu_list_t& list, time_t abort_time)
int s3fs_utility_processing(time_t abort_time)
{
if(NO_UTILITY_MODE == utility_mode){
if(utility_incomp_type::NO_UTILITY_MODE == utility_mode){
return EXIT_FAILURE;
}
printf("\n*** s3fs run as utility mode.\n\n");
@ -115,23 +115,23 @@ int s3fs_utility_processing(time_t abort_time)
// parse result(incomplete multipart upload information)
S3FS_PRN_DBG("response body = {\n%s\n}", body.c_str());
xmlDocPtr doc;
if(NULL == (doc = xmlReadMemory(body.c_str(), static_cast<int>(body.size()), "", NULL, 0))){
std::unique_ptr<xmlDoc, decltype(&xmlFreeDoc)> doc(xmlReadMemory(body.c_str(), static_cast<int>(body.size()), "", nullptr, 0), xmlFreeDoc);
if(nullptr == doc){
S3FS_PRN_DBG("xmlReadMemory exited with error.");
result = EXIT_FAILURE;
}else{
// make incomplete uploads list
incomp_mpu_list_t list;
if(!get_incomp_mpu_list(doc, list)){
if(!get_incomp_mpu_list(doc.get(), list)){
S3FS_PRN_DBG("get_incomp_mpu_list exited with error.");
result = EXIT_FAILURE;
}else{
if(INCOMP_TYPE_LIST == utility_mode){
if(utility_incomp_type::INCOMP_TYPE_LIST == utility_mode){
// print list
print_incomp_mpu_list(list);
}else if(INCOMP_TYPE_ABORT == utility_mode){
}else if(utility_incomp_type::INCOMP_TYPE_ABORT == utility_mode){
// remove
if(!abort_incomp_mpu_list(list, abort_time)){
S3FS_PRN_DBG("an error occurred during removal process.");
@ -139,7 +139,6 @@ int s3fs_utility_processing(time_t abort_time)
}
}
}
S3FS_XMLFREEDOC(doc);
}
}

View File

@ -22,7 +22,7 @@
#define S3FS_MPU_UTIL_H_
#include <string>
#include <list>
#include <vector>
//-------------------------------------------------------------------
// Structure / Typedef
@ -34,12 +34,12 @@ typedef struct incomplete_multipart_upload_info
std::string date;
}INCOMP_MPU_INFO;
typedef std::list<INCOMP_MPU_INFO> incomp_mpu_list_t;
typedef std::vector<INCOMP_MPU_INFO> incomp_mpu_list_t;
//-------------------------------------------------------------------
// enum for utility process mode
//-------------------------------------------------------------------
enum utility_incomp_type{
enum class utility_incomp_type{
NO_UTILITY_MODE = 0, // not utility mode
INCOMP_TYPE_LIST, // list of incomplete mpu
INCOMP_TYPE_ABORT // delete incomplete mpu

View File

@ -1,139 +0,0 @@
/*
* s3fs - FUSE-based file system backed by Amazon S3
*
* Copyright(C) 2007 Takeshi Nakatani <ggtakec.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include "s3fs.h"
#include "mvnode.h"
//-------------------------------------------------------------------
// Utility functions for moving objects
//-------------------------------------------------------------------
MVNODE *create_mvnode(const char *old_path, const char *new_path, bool is_dir, bool normdir)
{
MVNODE *p;
char *p_old_path;
char *p_new_path;
if(NULL == (p_old_path = strdup(old_path))){
printf("create_mvnode: could not allocation memory for p_old_path\n");
S3FS_FUSE_EXIT();
return NULL;
}
if(NULL == (p_new_path = strdup(new_path))){
free(p_old_path);
printf("create_mvnode: could not allocation memory for p_new_path\n");
S3FS_FUSE_EXIT();
return NULL;
}
p = new MVNODE();
p->old_path = p_old_path;
p->new_path = p_new_path;
p->is_dir = is_dir;
p->is_normdir = normdir;
p->prev = NULL;
p->next = NULL;
return p;
}
//
// Add sorted MVNODE data(Ascending order)
//
MVNODE *add_mvnode(MVNODE** head, MVNODE** tail, const char *old_path, const char *new_path, bool is_dir, bool normdir)
{
if(!head || !tail){
return NULL;
}
MVNODE* cur;
MVNODE* mvnew;
for(cur = *head; cur; cur = cur->next){
if(cur->is_dir == is_dir){
int nResult = strcmp(cur->old_path, old_path);
if(0 == nResult){
// Found same old_path.
return cur;
}else if(0 > nResult){
// next check.
// ex: cur("abc"), mvnew("abcd")
// ex: cur("abc"), mvnew("abd")
continue;
}else{
// Add into before cur-pos.
// ex: cur("abc"), mvnew("ab")
// ex: cur("abc"), mvnew("abb")
if(NULL == (mvnew = create_mvnode(old_path, new_path, is_dir, normdir))){
return NULL;
}
if(cur->prev){
(cur->prev)->next = mvnew;
}else{
*head = mvnew;
}
mvnew->prev = cur->prev;
mvnew->next = cur;
cur->prev = mvnew;
return mvnew;
}
}
}
// Add into tail.
if(NULL == (mvnew = create_mvnode(old_path, new_path, is_dir, normdir))){
return NULL;
}
mvnew->prev = (*tail);
if(*tail){
(*tail)->next = mvnew;
}
(*tail) = mvnew;
if(!(*head)){
(*head) = mvnew;
}
return mvnew;
}
void free_mvnodes(MVNODE *head)
{
MVNODE *my_head;
MVNODE *next;
for(my_head = head, next = NULL; my_head; my_head = next){
next = my_head->next;
free(my_head->old_path);
free(my_head->new_path);
delete my_head;
}
}
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: expandtab sw=4 ts=4 fdm=marker
* vim<600: expandtab sw=4 ts=4
*/

View File

@ -1,53 +0,0 @@
/*
* s3fs - FUSE-based file system backed by Amazon S3
*
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef S3FS_MVNODE_H_
#define S3FS_MVNODE_H_
//-------------------------------------------------------------------
// Structure
//-------------------------------------------------------------------
typedef struct mvnode
{
char* old_path;
char* new_path;
bool is_dir;
bool is_normdir;
struct mvnode* prev;
struct mvnode* next;
} MVNODE;
//-------------------------------------------------------------------
// Utility functions for moving objects
//-------------------------------------------------------------------
MVNODE *create_mvnode(const char *old_path, const char *new_path, bool is_dir, bool normdir = false);
MVNODE *add_mvnode(MVNODE** head, MVNODE** tail, const char *old_path, const char *new_path, bool is_dir, bool normdir = false);
void free_mvnodes(MVNODE *head);
#endif // S3FS_MVNODE_H_
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: expandtab sw=4 ts=4 fdm=marker
* vim<600: expandtab sw=4 ts=4
*/

View File

@ -18,15 +18,15 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <cerrno>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <pthread.h>
#include <unistd.h>
#include <syslog.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <string.h>
#include <nss.h>
#include <pk11pub.h>
#include <hasht.h>
@ -44,7 +44,7 @@
//-------------------------------------------------------------------
const char* s3fs_crypt_lib_name()
{
static const char version[] = "NSS";
static constexpr char version[] = "NSS";
return version;
}
@ -56,7 +56,7 @@ bool s3fs_init_global_ssl()
{
PR_Init(PR_USER_THREAD, PR_PRIORITY_NORMAL, 0);
if(SECSuccess != NSS_NoDB_Init(NULL)){
if(SECSuccess != NSS_NoDB_Init(nullptr)){
S3FS_PRN_ERR("Failed NSS_NoDB_Init call.");
return false;
}
@ -87,10 +87,10 @@ bool s3fs_destroy_crypt_mutex()
//-------------------------------------------------------------------
// Utility Function for HMAC
//-------------------------------------------------------------------
static bool s3fs_HMAC_RAW(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen, bool is_sha256)
static std::unique_ptr<unsigned char[]> s3fs_HMAC_RAW(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned int* digestlen, bool is_sha256)
{
if(!key || !data || !digest || !digestlen){
return false;
if(!key || !data || !digestlen){
return nullptr;
}
PK11SlotInfo* Slot;
@ -98,19 +98,19 @@ static bool s3fs_HMAC_RAW(const void* key, size_t keylen, const unsigned char* d
PK11Context* Context;
unsigned char tmpdigest[64];
SECItem KeySecItem = {siBuffer, reinterpret_cast<unsigned char*>(const_cast<void*>(key)), static_cast<unsigned int>(keylen)};
SECItem NullSecItem = {siBuffer, NULL, 0};
SECItem NullSecItem = {siBuffer, nullptr, 0};
if(NULL == (Slot = PK11_GetInternalKeySlot())){
return false;
if(nullptr == (Slot = PK11_GetInternalKeySlot())){
return nullptr;
}
if(NULL == (pKey = PK11_ImportSymKey(Slot, (is_sha256 ? CKM_SHA256_HMAC : CKM_SHA_1_HMAC), PK11_OriginUnwrap, CKA_SIGN, &KeySecItem, NULL))){
if(nullptr == (pKey = PK11_ImportSymKey(Slot, (is_sha256 ? CKM_SHA256_HMAC : CKM_SHA_1_HMAC), PK11_OriginUnwrap, CKA_SIGN, &KeySecItem, nullptr))){
PK11_FreeSlot(Slot);
return false;
return nullptr;
}
if(NULL == (Context = PK11_CreateContextBySymKey((is_sha256 ? CKM_SHA256_HMAC : CKM_SHA_1_HMAC), CKA_SIGN, pKey, &NullSecItem))){
if(nullptr == (Context = PK11_CreateContextBySymKey((is_sha256 ? CKM_SHA256_HMAC : CKM_SHA_1_HMAC), CKA_SIGN, pKey, &NullSecItem))){
PK11_FreeSymKey(pKey);
PK11_FreeSlot(Slot);
return false;
return nullptr;
}
*digestlen = 0;
@ -121,47 +121,54 @@ static bool s3fs_HMAC_RAW(const void* key, size_t keylen, const unsigned char* d
PK11_DestroyContext(Context, PR_TRUE);
PK11_FreeSymKey(pKey);
PK11_FreeSlot(Slot);
return false;
return nullptr;
}
PK11_DestroyContext(Context, PR_TRUE);
PK11_FreeSymKey(pKey);
PK11_FreeSlot(Slot);
*digest = new unsigned char[*digestlen];
memcpy(*digest, tmpdigest, *digestlen);
std::unique_ptr<unsigned char[]> digest(new unsigned char[*digestlen]);
memcpy(digest.get(), tmpdigest, *digestlen);
return true;
return digest;
}
bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
std::unique_ptr<unsigned char[]> s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned int* digestlen)
{
return s3fs_HMAC_RAW(key, keylen, data, datalen, digest, digestlen, false);
return s3fs_HMAC_RAW(key, keylen, data, datalen, digestlen, false);
}
bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
std::unique_ptr<unsigned char[]> s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned int* digestlen)
{
return s3fs_HMAC_RAW(key, keylen, data, datalen, digest, digestlen, true);
return s3fs_HMAC_RAW(key, keylen, data, datalen, digestlen, true);
}
//-------------------------------------------------------------------
// Utility Function for MD5
//-------------------------------------------------------------------
size_t get_md5_digest_length()
bool s3fs_md5(const unsigned char* data, size_t datalen, md5_t* result)
{
return MD5_LENGTH;
PK11Context* md5ctx;
unsigned int md5outlen;
md5ctx = PK11_CreateDigestContext(SEC_OID_MD5);
PK11_DigestOp(md5ctx, data, datalen);
PK11_DigestFinal(md5ctx, result->data(), &md5outlen, result->size());
PK11_DestroyContext(md5ctx, PR_TRUE);
return true;
}
unsigned char* s3fs_md5_fd(int fd, off_t start, off_t size)
bool s3fs_md5_fd(int fd, off_t start, off_t size, md5_t* result)
{
PK11Context* md5ctx;
off_t bytes;
unsigned char* result;
unsigned int md5outlen;
if(-1 == size){
struct stat st;
if(-1 == fstat(fd, &st)){
return NULL;
return false;
}
size = st.st_size;
}
@ -180,53 +187,42 @@ unsigned char* s3fs_md5_fd(int fd, off_t start, off_t size)
// error
S3FS_PRN_ERR("file read error(%d)", errno);
PK11_DestroyContext(md5ctx, PR_TRUE);
return NULL;
return false;
}
PK11_DigestOp(md5ctx, buf, bytes);
}
result = new unsigned char[get_md5_digest_length()];
PK11_DigestFinal(md5ctx, result, &md5outlen, get_md5_digest_length());
PK11_DigestFinal(md5ctx, result->data(), &md5outlen, result->size());
PK11_DestroyContext(md5ctx, PR_TRUE);
return result;
return false;
}
//-------------------------------------------------------------------
// Utility Function for SHA256
//-------------------------------------------------------------------
size_t get_sha256_digest_length()
bool s3fs_sha256(const unsigned char* data, size_t datalen, sha256_t* digest)
{
return SHA256_LENGTH;
}
bool s3fs_sha256(const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
{
(*digestlen) = static_cast<unsigned int>(get_sha256_digest_length());
*digest = new unsigned char[*digestlen];
PK11Context* sha256ctx;
unsigned int sha256outlen;
sha256ctx = PK11_CreateDigestContext(SEC_OID_SHA256);
PK11_DigestOp(sha256ctx, data, datalen);
PK11_DigestFinal(sha256ctx, *digest, &sha256outlen, *digestlen);
PK11_DigestFinal(sha256ctx, digest->data(), &sha256outlen, digest->size());
PK11_DestroyContext(sha256ctx, PR_TRUE);
*digestlen = sha256outlen;
return true;
}
unsigned char* s3fs_sha256_fd(int fd, off_t start, off_t size)
bool s3fs_sha256_fd(int fd, off_t start, off_t size, sha256_t* result)
{
PK11Context* sha256ctx;
off_t bytes;
unsigned char* result;
unsigned int sha256outlen;
if(-1 == size){
struct stat st;
if(-1 == fstat(fd, &st)){
return NULL;
return false;
}
size = st.st_size;
}
@ -245,15 +241,14 @@ unsigned char* s3fs_sha256_fd(int fd, off_t start, off_t size)
// error
S3FS_PRN_ERR("file read error(%d)", errno);
PK11_DestroyContext(sha256ctx, PR_TRUE);
return NULL;
return false;
}
PK11_DigestOp(sha256ctx, buf, bytes);
}
result = new unsigned char[get_sha256_digest_length()];
PK11_DigestFinal(sha256ctx, result, &sha256outlen, get_sha256_digest_length());
PK11_DigestFinal(sha256ctx, result->data(), &sha256outlen, result->size());
PK11_DestroyContext(sha256ctx, PR_TRUE);
return result;
return true;
}
/*

View File

@ -18,6 +18,10 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifdef __clang__
#pragma clang diagnostic ignored "-Wdeprecated-declarations"
#endif
#include <cstdio>
#include <cstdlib>
#include <cerrno>
@ -40,7 +44,7 @@
//-------------------------------------------------------------------
const char* s3fs_crypt_lib_name()
{
static const char version[] = "OpenSSL";
static constexpr char version[] = "OpenSSL";
return version;
}
@ -79,7 +83,7 @@ struct CRYPTO_dynlock_value
pthread_mutex_t dyn_mutex;
};
static pthread_mutex_t* s3fs_crypt_mutex = NULL;
static pthread_mutex_t* s3fs_crypt_mutex = nullptr;
static void s3fs_crypt_mutex_lock(int mode, int pos, const char* file, int line) __attribute__ ((unused));
static void s3fs_crypt_mutex_lock(int mode, int pos, const char* file, int line)
@ -120,7 +124,7 @@ static struct CRYPTO_dynlock_value* s3fs_dyn_crypt_mutex(const char* file, int l
int result;
if(0 != (result = pthread_mutex_init(&(dyndata->dyn_mutex), &attr))){
S3FS_PRN_CRIT("pthread_mutex_init returned: %d", result);
return NULL;
return nullptr;
}
return dyndata;
}
@ -160,7 +164,10 @@ static void s3fs_destroy_dyn_crypt_mutex(struct CRYPTO_dynlock_value* dyndata, c
bool s3fs_init_crypt_mutex()
{
if(s3fs_crypt_mutex){
S3FS_PRN_DBG("s3fs_crypt_mutex is not NULL, destroy it.");
S3FS_PRN_DBG("s3fs_crypt_mutex is not nullptr, destroy it.");
// cppcheck-suppress unmatchedSuppression
// cppcheck-suppress knownConditionTrueFalse
if(!s3fs_destroy_crypt_mutex()){
S3FS_PRN_ERR("Failed to s3fs_crypt_mutex");
return false;
@ -196,11 +203,11 @@ bool s3fs_destroy_crypt_mutex()
return true;
}
CRYPTO_set_dynlock_destroy_callback(NULL);
CRYPTO_set_dynlock_lock_callback(NULL);
CRYPTO_set_dynlock_create_callback(NULL);
CRYPTO_set_id_callback(NULL);
CRYPTO_set_locking_callback(NULL);
CRYPTO_set_dynlock_destroy_callback(nullptr);
CRYPTO_set_dynlock_lock_callback(nullptr);
CRYPTO_set_dynlock_create_callback(nullptr);
CRYPTO_set_id_callback(nullptr);
CRYPTO_set_locking_callback(nullptr);
for(int cnt = 0; cnt < CRYPTO_num_locks(); cnt++){
int result = pthread_mutex_destroy(&s3fs_crypt_mutex[cnt]);
@ -211,7 +218,7 @@ bool s3fs_destroy_crypt_mutex()
}
CRYPTO_cleanup_all_ex_data();
delete[] s3fs_crypt_mutex;
s3fs_crypt_mutex = NULL;
s3fs_crypt_mutex = nullptr;
return true;
}
@ -219,30 +226,30 @@ bool s3fs_destroy_crypt_mutex()
//-------------------------------------------------------------------
// Utility Function for HMAC
//-------------------------------------------------------------------
static bool s3fs_HMAC_RAW(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen, bool is_sha256)
static std::unique_ptr<unsigned char[]> s3fs_HMAC_RAW(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned int* digestlen, bool is_sha256)
{
if(!key || !data || !digest || !digestlen){
return false;
if(!key || !data || !digestlen){
return nullptr;
}
(*digestlen) = EVP_MAX_MD_SIZE * sizeof(unsigned char);
*digest = new unsigned char[*digestlen];
std::unique_ptr<unsigned char[]> digest(new unsigned char[*digestlen]);
if(is_sha256){
HMAC(EVP_sha256(), key, static_cast<int>(keylen), data, datalen, *digest, digestlen);
HMAC(EVP_sha256(), key, static_cast<int>(keylen), data, datalen, digest.get(), digestlen);
}else{
HMAC(EVP_sha1(), key, static_cast<int>(keylen), data, datalen, *digest, digestlen);
HMAC(EVP_sha1(), key, static_cast<int>(keylen), data, datalen, digest.get(), digestlen);
}
return true;
return digest;
}
bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
std::unique_ptr<unsigned char[]> s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned int* digestlen)
{
return s3fs_HMAC_RAW(key, keylen, data, datalen, digest, digestlen, false);
return s3fs_HMAC_RAW(key, keylen, data, datalen, digestlen, false);
}
bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
std::unique_ptr<unsigned char[]> s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned int* digestlen)
{
return s3fs_HMAC_RAW(key, keylen, data, datalen, digest, digestlen, true);
return s3fs_HMAC_RAW(key, keylen, data, datalen, digestlen, true);
}
#ifdef USE_OPENSSL_30
@ -253,29 +260,38 @@ bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, siz
// OpenSSL 3.0 deprecated the MD5_*** low-level encryption functions,
// so we should use the high-level EVP API instead.
//
size_t get_md5_digest_length()
bool s3fs_md5(const unsigned char* data, size_t datalen, md5_t* digest)
{
return EVP_MD_size(EVP_md5());
unsigned int digestlen = static_cast<unsigned int>(digest->size());
const EVP_MD* md = EVP_get_digestbyname("md5");
EVP_MD_CTX* mdctx = EVP_MD_CTX_create();
EVP_DigestInit_ex(mdctx, md, nullptr);
EVP_DigestUpdate(mdctx, data, datalen);
EVP_DigestFinal_ex(mdctx, digest->data(), &digestlen);
EVP_MD_CTX_destroy(mdctx);
return true;
}
unsigned char* s3fs_md5_fd(int fd, off_t start, off_t size)
bool s3fs_md5_fd(int fd, off_t start, off_t size, md5_t* result)
{
EVP_MD_CTX* mdctx;
unsigned char* md5_digest;
unsigned int md5_digest_len = get_md5_digest_length();
unsigned int md5_digest_len = static_cast<unsigned int>(result->size());
off_t bytes;
if(-1 == size){
struct stat st;
if(-1 == fstat(fd, &st)){
return NULL;
return false;
}
size = st.st_size;
}
// instead of MD5_Init
mdctx = EVP_MD_CTX_new();
EVP_DigestInit_ex(mdctx, EVP_md5(), NULL);
EVP_DigestInit_ex(mdctx, EVP_md5(), nullptr);
for(off_t total = 0; total < size; total += bytes){
const off_t len = 512;
@ -289,39 +305,48 @@ unsigned char* s3fs_md5_fd(int fd, off_t start, off_t size)
// error
S3FS_PRN_ERR("file read error(%d)", errno);
EVP_MD_CTX_free(mdctx);
return NULL;
return false;
}
// instead of MD5_Update
EVP_DigestUpdate(mdctx, buf, bytes);
}
// instead of MD5_Final
md5_digest = new unsigned char[md5_digest_len];
EVP_DigestFinal_ex(mdctx, md5_digest, &md5_digest_len);
EVP_DigestFinal_ex(mdctx, result->data(), &md5_digest_len);
EVP_MD_CTX_free(mdctx);
return md5_digest;
return true;
}
#else
//-------------------------------------------------------------------
// Utility Function for MD5 (OpenSSL < 3.0)
//-------------------------------------------------------------------
size_t get_md5_digest_length()
// TODO: Does this fail on OpenSSL < 3.0 and we need to use MD5_CTX functions?
bool s3fs_md5(const unsigned char* data, size_t datalen, md5_t* digest)
{
return MD5_DIGEST_LENGTH;
unsigned int digestlen = digest->size();
const EVP_MD* md = EVP_get_digestbyname("md5");
EVP_MD_CTX* mdctx = EVP_MD_CTX_create();
EVP_DigestInit_ex(mdctx, md, nullptr);
EVP_DigestUpdate(mdctx, data, datalen);
EVP_DigestFinal_ex(mdctx, digest->data(), &digestlen);
EVP_MD_CTX_destroy(mdctx);
return true;
}
unsigned char* s3fs_md5_fd(int fd, off_t start, off_t size)
bool s3fs_md5_fd(int fd, off_t start, off_t size, md5_t* result)
{
MD5_CTX md5ctx;
off_t bytes;
unsigned char* result;
if(-1 == size){
struct stat st;
if(-1 == fstat(fd, &st)){
return NULL;
return false;
}
size = st.st_size;
}
@ -339,62 +364,53 @@ unsigned char* s3fs_md5_fd(int fd, off_t start, off_t size)
}else if(-1 == bytes){
// error
S3FS_PRN_ERR("file read error(%d)", errno);
return NULL;
return false;
}
MD5_Update(&md5ctx, buf, bytes);
}
result = new unsigned char[get_md5_digest_length()];
MD5_Final(result, &md5ctx);
MD5_Final(result->data(), &md5ctx);
return result;
return true;
}
#endif
//-------------------------------------------------------------------
// Utility Function for SHA256
//-------------------------------------------------------------------
size_t get_sha256_digest_length()
bool s3fs_sha256(const unsigned char* data, size_t datalen, sha256_t* digest)
{
return SHA256_DIGEST_LENGTH;
}
bool s3fs_sha256(const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
{
(*digestlen) = EVP_MAX_MD_SIZE * sizeof(unsigned char);
*digest = new unsigned char[*digestlen];
const EVP_MD* md = EVP_get_digestbyname("sha256");
EVP_MD_CTX* mdctx = EVP_MD_CTX_create();
EVP_DigestInit_ex(mdctx, md, NULL);
EVP_DigestInit_ex(mdctx, md, nullptr);
EVP_DigestUpdate(mdctx, data, datalen);
EVP_DigestFinal_ex(mdctx, *digest, digestlen);
unsigned int digestlen = static_cast<unsigned int>(digest->size());
EVP_DigestFinal_ex(mdctx, digest->data(), &digestlen);
EVP_MD_CTX_destroy(mdctx);
return true;
}
unsigned char* s3fs_sha256_fd(int fd, off_t start, off_t size)
bool s3fs_sha256_fd(int fd, off_t start, off_t size, sha256_t* result)
{
const EVP_MD* md = EVP_get_digestbyname("sha256");
EVP_MD_CTX* sha256ctx;
off_t bytes;
unsigned char* result;
if(-1 == fd){
return NULL;
return false;
}
if(-1 == size){
struct stat st;
if(-1 == fstat(fd, &st)){
S3FS_PRN_ERR("fstat error(%d)", errno);
return NULL;
return false;
}
size = st.st_size;
}
sha256ctx = EVP_MD_CTX_create();
EVP_DigestInit_ex(sha256ctx, md, NULL);
EVP_DigestInit_ex(sha256ctx, md, nullptr);
for(off_t total = 0; total < size; total += bytes){
const off_t len = 512;
@ -408,15 +424,14 @@ unsigned char* s3fs_sha256_fd(int fd, off_t start, off_t size)
// error
S3FS_PRN_ERR("file read error(%d)", errno);
EVP_MD_CTX_destroy(sha256ctx);
return NULL;
return false;
}
EVP_DigestUpdate(sha256ctx, buf, bytes);
}
result = new unsigned char[get_sha256_digest_length()];
EVP_DigestFinal_ex(sha256ctx, result, NULL);
EVP_DigestFinal_ex(sha256ctx, result->data(), nullptr);
EVP_MD_CTX_destroy(sha256ctx);
return result;
return true;
}
/*

View File

@ -41,6 +41,11 @@ class Semaphore
}
dispatch_release(sem);
}
Semaphore(const Semaphore&) = delete;
Semaphore(Semaphore&&) = delete;
Semaphore& operator=(const Semaphore&) = delete;
Semaphore& operator=(Semaphore&&) = delete;
void wait() { dispatch_semaphore_wait(sem, DISPATCH_TIME_FOREVER); }
bool try_wait()
{

File diff suppressed because it is too large Load Diff

View File

@ -59,27 +59,6 @@
#define S3FS_MALLOCTRIM(pad)
#endif // S3FS_MALLOC_TRIM
#define S3FS_XMLFREEDOC(doc) \
do{ \
xmlFreeDoc(doc); \
S3FS_MALLOCTRIM(0); \
}while(0)
#define S3FS_XMLFREE(ptr) \
do{ \
xmlFree(ptr); \
S3FS_MALLOCTRIM(0); \
}while(0)
#define S3FS_XMLXPATHFREECONTEXT(ctx) \
do{ \
xmlXPathFreeContext(ctx); \
S3FS_MALLOCTRIM(0); \
}while(0)
#define S3FS_XMLXPATHFREEOBJECT(obj) \
do{ \
xmlXPathFreeObject(obj); \
S3FS_MALLOCTRIM(0); \
}while(0)
#endif // S3FS_S3FS_H_
/*

View File

@ -21,9 +21,14 @@
#ifndef S3FS_AUTH_H_
#define S3FS_AUTH_H_
#include <array>
#include <memory>
#include <string>
#include <sys/types.h>
typedef std::array<unsigned char, 16> md5_t;
typedef std::array<unsigned char, 32> sha256_t;
//-------------------------------------------------------------------
// Utility functions for Authentication
//-------------------------------------------------------------------
@ -41,13 +46,12 @@ bool s3fs_init_global_ssl();
bool s3fs_destroy_global_ssl();
bool s3fs_init_crypt_mutex();
bool s3fs_destroy_crypt_mutex();
bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen);
bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen);
size_t get_md5_digest_length();
unsigned char* s3fs_md5_fd(int fd, off_t start, off_t size);
bool s3fs_sha256(const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen);
size_t get_sha256_digest_length();
unsigned char* s3fs_sha256_fd(int fd, off_t start, off_t size);
std::unique_ptr<unsigned char[]> s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned int* digestlen);
std::unique_ptr<unsigned char[]> s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned int* digestlen);
bool s3fs_md5(const unsigned char* data, size_t datalen, md5_t* result);
bool s3fs_md5_fd(int fd, off_t start, off_t size, md5_t* result);
bool s3fs_sha256(const unsigned char* data, size_t datalen, sha256_t* digest);
bool s3fs_sha256_fd(int fd, off_t start, off_t size, sha256_t* result);
#endif // S3FS_AUTH_H_

View File

@ -18,7 +18,7 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <errno.h>
#include <cerrno>
#include <unistd.h>
#include <pwd.h>
#include <sys/stat.h>
@ -29,6 +29,7 @@
#include "common.h"
#include "s3fs_cred.h"
#include "s3fs_help.h"
#include "s3fs_logger.h"
#include "curl.h"
#include "string_util.h"
@ -37,7 +38,7 @@
//-------------------------------------------------------------------
// Symbols
//-------------------------------------------------------------------
#define DEFAULT_AWS_PROFILE_NAME "default"
static constexpr char DEFAULT_AWS_PROFILE_NAME[] = "default";
//-------------------------------------------------------------------
// External Credential dummy function
@ -52,13 +53,11 @@
//
const char* VersionS3fsCredential(bool detail)
{
static const char version[] = "built-in";
static const char detail_version[] =
static constexpr char version[] = "built-in";
static constexpr char detail_version[] =
"s3fs-fuse built-in Credential I/F Function\n"
"Copyright(C) 2007 s3fs-fuse\n";
S3FS_PRN_CRIT("Check why built-in function was called, the external credential library must have VersionS3fsCredential function.");
if(detail){
return detail_version;
}else{
@ -100,13 +99,13 @@ bool UpdateS3fsCredential(char** ppaccess_key_id, char** ppserect_access_key, ch
}
if(ppaccess_key_id){
*ppaccess_key_id = NULL;
*ppaccess_key_id = nullptr;
}
if(ppserect_access_key){
*ppserect_access_key = NULL;
*ppserect_access_key = nullptr;
}
if(ppaccess_token){
*ppaccess_token = NULL;
*ppaccess_token = nullptr;
}
return false; // always false
}
@ -114,21 +113,19 @@ bool UpdateS3fsCredential(char** ppaccess_key_id, char** ppserect_access_key, ch
//-------------------------------------------------------------------
// Class Variables
//-------------------------------------------------------------------
const char* S3fsCred::ALLBUCKET_FIELDS_TYPE = "";
const char* S3fsCred::KEYVAL_FIELDS_TYPE = "\t";
const char* S3fsCred::AWS_ACCESSKEYID = "AWSAccessKeyId";
const char* S3fsCred::AWS_SECRETKEY = "AWSSecretKey";
constexpr char S3fsCred::ALLBUCKET_FIELDS_TYPE[];
constexpr char S3fsCred::KEYVAL_FIELDS_TYPE[];
constexpr char S3fsCred::AWS_ACCESSKEYID[];
constexpr char S3fsCred::AWS_SECRETKEY[];
const int S3fsCred::IAM_EXPIRE_MERGIN = 20 * 60; // update timing
const char* S3fsCred::ECS_IAM_ENV_VAR = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI";
const char* S3fsCred::IAMCRED_ACCESSKEYID = "AccessKeyId";
const char* S3fsCred::IAMCRED_SECRETACCESSKEY = "SecretAccessKey";
const char* S3fsCred::IAMCRED_ROLEARN = "RoleArn";
constexpr char S3fsCred::ECS_IAM_ENV_VAR[];
constexpr char S3fsCred::IAMCRED_ACCESSKEYID[];
constexpr char S3fsCred::IAMCRED_SECRETACCESSKEY[];
constexpr char S3fsCred::IAMCRED_ROLEARN[];
const char* S3fsCred::IAMv2_token_url = "http://169.254.169.254/latest/api/token";
int S3fsCred::IAMv2_token_ttl = 21600;
const char* S3fsCred::IAMv2_token_ttl_hdr = "X-aws-ec2-metadata-token-ttl-seconds";
const char* S3fsCred::IAMv2_token_hdr = "X-aws-ec2-metadata-token";
constexpr char S3fsCred::IAMv2_token_url[];
constexpr char S3fsCred::IAMv2_token_ttl_hdr[];
constexpr char S3fsCred::IAMv2_token_hdr[];
std::string S3fsCred::bucket_name;
@ -185,7 +182,7 @@ S3fsCred::S3fsCred() :
IAM_token_field("Token"),
IAM_expiry_field("Expiration"),
set_builtin_cred_opts(false),
hExtCredLib(NULL),
hExtCredLib(nullptr),
pFuncCredVersion(VersionS3fsCredential),
pFuncCredInit(InitS3fsCredential),
pFuncCredFree(FreeS3fsCredential),
@ -376,7 +373,7 @@ bool S3fsCred::GetIAMCredentialsURL(std::string& url, bool check_iam_role, AutoL
if(is_ecs){
const char *env = std::getenv(S3fsCred::ECS_IAM_ENV_VAR);
if(env == NULL){
if(env == nullptr){
S3FS_PRN_ERR("%s is not set.", S3fsCred::ECS_IAM_ENV_VAR);
return false;
}
@ -481,14 +478,14 @@ bool S3fsCred::LoadIAMCredentials(AutoLock::Type type)
return false;
}
const char* iam_v2_token = NULL;
const char* iam_v2_token = nullptr;
std::string str_iam_v2_token;
if(GetIMDSVersion(AutoLock::ALREADY_LOCKED) > 1){
str_iam_v2_token = GetIAMv2APIToken(AutoLock::ALREADY_LOCKED);
iam_v2_token = str_iam_v2_token.c_str();
}
const char* ibm_secret_access_key = NULL;
const char* ibm_secret_access_key = nullptr;
std::string str_ibm_secret_access_key;
if(IsIBMIAMAuth()){
str_ibm_secret_access_key = AWSSecretAccessKey;
@ -523,7 +520,7 @@ bool S3fsCred::LoadIAMRoleFromMetaData()
return false;
}
const char* iam_v2_token = NULL;
const char* iam_v2_token = nullptr;
std::string str_iam_v2_token;
if(GetIMDSVersion(AutoLock::ALREADY_LOCKED) > 1){
str_iam_v2_token = GetIAMv2APIToken(AutoLock::ALREADY_LOCKED);
@ -570,8 +567,8 @@ bool S3fsCred::SetIAMCredentials(const char* response, AutoLock::Type type)
}
AWSAccessTokenExpire = static_cast<time_t>(tmp_expire);
}else{
AWSAccessKeyId = keyval[std::string(S3fsCred::IAMCRED_ACCESSKEYID)];
AWSSecretAccessKey = keyval[std::string(S3fsCred::IAMCRED_SECRETACCESSKEY)];
AWSAccessKeyId = keyval[S3fsCred::IAMCRED_ACCESSKEYID];
AWSSecretAccessKey = keyval[S3fsCred::IAMCRED_SECRETACCESSKEY];
AWSAccessTokenExpire = cvtIAMExpireStringToTime(keyval[IAM_expiry_field].c_str());
}
return true;
@ -628,7 +625,7 @@ bool S3fsCred::CheckS3fsPasswdFilePerms()
// let's get the file info
if(stat(passwd_file.c_str(), &info) != 0){
S3FS_PRN_EXIT("unexpected error from stat(%s).", passwd_file.c_str());
S3FS_PRN_EXIT("unexpected error from stat(%s): %s", passwd_file.c_str(), strerror(errno));
return false;
}
@ -997,18 +994,18 @@ bool S3fsCred::InitialS3fsCredentials()
}
// 3 - environment variables
char* AWSACCESSKEYID = getenv("AWS_ACCESS_KEY_ID") ? getenv("AWS_ACCESS_KEY_ID") : getenv("AWSACCESSKEYID");
char* AWSSECRETACCESSKEY = getenv("AWS_SECRET_ACCESS_KEY") ? getenv("AWS_SECRET_ACCESS_KEY") : getenv("AWSSECRETACCESSKEY");
char* AWSSESSIONTOKEN = getenv("AWS_SESSION_TOKEN") ? getenv("AWS_SESSION_TOKEN") : getenv("AWSSESSIONTOKEN");
const char* AWSACCESSKEYID = getenv("AWS_ACCESS_KEY_ID") ? getenv("AWS_ACCESS_KEY_ID") : getenv("AWSACCESSKEYID");
const char* AWSSECRETACCESSKEY = getenv("AWS_SECRET_ACCESS_KEY") ? getenv("AWS_SECRET_ACCESS_KEY") : getenv("AWSSECRETACCESSKEY");
const char* AWSSESSIONTOKEN = getenv("AWS_SESSION_TOKEN") ? getenv("AWS_SESSION_TOKEN") : getenv("AWSSESSIONTOKEN");
if(AWSACCESSKEYID != NULL || AWSSECRETACCESSKEY != NULL){
if( (AWSACCESSKEYID == NULL && AWSSECRETACCESSKEY != NULL) ||
(AWSACCESSKEYID != NULL && AWSSECRETACCESSKEY == NULL) ){
if(AWSACCESSKEYID != nullptr || AWSSECRETACCESSKEY != nullptr){
if( (AWSACCESSKEYID == nullptr && AWSSECRETACCESSKEY != nullptr) ||
(AWSACCESSKEYID != nullptr && AWSSECRETACCESSKEY == nullptr) ){
S3FS_PRN_EXIT("both environment variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY must be set together.");
return false;
}
S3FS_PRN_INFO2("access key from env variables");
if(AWSSESSIONTOKEN != NULL){
if(AWSSESSIONTOKEN != nullptr){
S3FS_PRN_INFO2("session token is available");
if(!SetAccessKeyWithSessionToken(AWSACCESSKEYID, AWSSECRETACCESSKEY, AWSSESSIONTOKEN, AutoLock::NONE)){
S3FS_PRN_EXIT("session token is invalid.");
@ -1030,7 +1027,7 @@ bool S3fsCred::InitialS3fsCredentials()
// 3a - from the AWS_CREDENTIAL_FILE environment variable
char* AWS_CREDENTIAL_FILE = getenv("AWS_CREDENTIAL_FILE");
if(AWS_CREDENTIAL_FILE != NULL){
if(AWS_CREDENTIAL_FILE != nullptr){
passwd_file = AWS_CREDENTIAL_FILE;
if(IsSetPasswdFile()){
if(!IsReadableS3fsPasswdFile()){
@ -1055,7 +1052,7 @@ bool S3fsCred::InitialS3fsCredentials()
// 4 - from the default location in the users home directory
char* HOME = getenv("HOME");
if(HOME != NULL){
if(HOME != nullptr){
passwd_file = HOME;
passwd_file += "/.passwd-s3fs";
if(IsReadableS3fsPasswdFile()){
@ -1148,7 +1145,7 @@ bool S3fsCred::CheckIAMCredentialUpdate(std::string* access_key_id, std::string*
AutoLock auto_lock(&token_lock);
if(IsIBMIAMAuth() || IsSetExtCredLib() || is_ecs || IsSetIAMRole(AutoLock::ALREADY_LOCKED)){
if(AWSAccessTokenExpire < (time(NULL) + S3fsCred::IAM_EXPIRE_MERGIN)){
if(AWSAccessTokenExpire < (time(nullptr) + S3fsCred::IAM_EXPIRE_MERGIN)){
S3FS_PRN_INFO("IAM Access Token refreshing...");
// update
@ -1187,7 +1184,7 @@ bool S3fsCred::CheckIAMCredentialUpdate(std::string* access_key_id, std::string*
const char* S3fsCred::GetCredFuncVersion(bool detail) const
{
static const char errVersion[] = "unknown";
static constexpr char errVersion[] = "unknown";
if(!pFuncCredVersion){
return errVersion;
@ -1235,13 +1232,13 @@ bool S3fsCred::InitExtCredLib()
}
// Initialize library
if(!pFuncCredInit){
S3FS_PRN_CRIT("\"InitS3fsCredential\" function pointer is NULL, why?");
S3FS_PRN_CRIT("\"InitS3fsCredential\" function pointer is nullptr, why?");
UnloadExtCredLib();
return false;
}
const char* popts = credlib_opts.empty() ? NULL : credlib_opts.c_str();
char* perrstr = NULL;
const char* popts = credlib_opts.empty() ? nullptr : credlib_opts.c_str();
char* perrstr = nullptr;
if(!(*pFuncCredInit)(popts, &perrstr)){
S3FS_PRN_ERR("Could not initialize %s(external credential library) by \"InitS3fsCredential\" function : %s", credlib.c_str(), perrstr ? perrstr : "unknown");
// cppcheck-suppress unmatchedSuppression
@ -1274,28 +1271,28 @@ bool S3fsCred::LoadExtCredLib()
//
// Search Library: (RPATH ->) LD_LIBRARY_PATH -> (RUNPATH ->) /etc/ld.so.cache -> /lib -> /usr/lib
//
if(NULL == (hExtCredLib = dlopen(credlib.c_str(), RTLD_LAZY))){
if(nullptr == (hExtCredLib = dlopen(credlib.c_str(), RTLD_LAZY))){
const char* preason = dlerror();
S3FS_PRN_ERR("Could not load %s(external credential library) by error : %s", credlib.c_str(), preason ? preason : "unknown");
return false;
}
// Set function pointers
if(NULL == (pFuncCredVersion = reinterpret_cast<fp_VersionS3fsCredential>(dlsym(hExtCredLib, "VersionS3fsCredential")))){
if(nullptr == (pFuncCredVersion = reinterpret_cast<fp_VersionS3fsCredential>(dlsym(hExtCredLib, "VersionS3fsCredential")))){
S3FS_PRN_ERR("%s(external credential library) does not have \"VersionS3fsCredential\" function which is required.", credlib.c_str());
UnloadExtCredLib();
return false;
}
if(NULL == (pFuncCredUpdate = reinterpret_cast<fp_UpdateS3fsCredential>(dlsym(hExtCredLib, "UpdateS3fsCredential")))){
if(nullptr == (pFuncCredUpdate = reinterpret_cast<fp_UpdateS3fsCredential>(dlsym(hExtCredLib, "UpdateS3fsCredential")))){
S3FS_PRN_ERR("%s(external credential library) does not have \"UpdateS3fsCredential\" function which is required.", credlib.c_str());
UnloadExtCredLib();
return false;
}
if(NULL == (pFuncCredInit = reinterpret_cast<fp_InitS3fsCredential>(dlsym(hExtCredLib, "InitS3fsCredential")))){
if(nullptr == (pFuncCredInit = reinterpret_cast<fp_InitS3fsCredential>(dlsym(hExtCredLib, "InitS3fsCredential")))){
S3FS_PRN_INFO("%s(external credential library) does not have \"InitS3fsCredential\" function which is optional.", credlib.c_str());
pFuncCredInit = InitS3fsCredential; // set built-in function
}
if(NULL == (pFuncCredFree = reinterpret_cast<fp_FreeS3fsCredential>(dlsym(hExtCredLib, "FreeS3fsCredential")))){
if(nullptr == (pFuncCredFree = reinterpret_cast<fp_FreeS3fsCredential>(dlsym(hExtCredLib, "FreeS3fsCredential")))){
S3FS_PRN_INFO("%s(external credential library) does not have \"FreeS3fsCredential\" function which is optional.", credlib.c_str());
pFuncCredFree = FreeS3fsCredential; // set built-in function
}
@ -1311,9 +1308,9 @@ bool S3fsCred::UnloadExtCredLib()
// Uninitialize library
if(!pFuncCredFree){
S3FS_PRN_CRIT("\"FreeS3fsCredential\" function pointer is NULL, why?");
S3FS_PRN_CRIT("\"FreeS3fsCredential\" function pointer is nullptr, why?");
}else{
char* perrstr = NULL;
char* perrstr = nullptr;
if(!(*pFuncCredFree)(&perrstr)){
S3FS_PRN_ERR("Could not uninitialize by \"FreeS3fsCredential\" function : %s", perrstr ? perrstr : "unknown");
}
@ -1332,7 +1329,7 @@ bool S3fsCred::UnloadExtCredLib()
// close
dlclose(hExtCredLib);
hExtCredLib = NULL;
hExtCredLib = nullptr;
}
return true;
}
@ -1346,10 +1343,10 @@ bool S3fsCred::UpdateExtCredentials(AutoLock::Type type)
AutoLock auto_lock(&token_lock, type);
char* paccess_key_id = NULL;
char* pserect_access_key = NULL;
char* paccess_token = NULL;
char* perrstr = NULL;
char* paccess_key_id = nullptr;
char* pserect_access_key = nullptr;
char* paccess_token = nullptr;
char* perrstr = nullptr;
long long token_expire = 0;
bool result = (*pFuncCredUpdate)(&paccess_key_id, &pserect_access_key, &paccess_token, &token_expire, &perrstr);
@ -1524,6 +1521,7 @@ bool S3fsCred::CheckForbiddenBucketParams()
// The first plain argument is the bucket
if(bucket_name.empty()){
S3FS_PRN_EXIT("missing BUCKET argument.");
show_usage();
return false;
}
@ -1536,7 +1534,7 @@ bool S3fsCred::CheckForbiddenBucketParams()
// check bucket name for illegal characters
size_t found = bucket_name.find_first_of("/:\\;!@#$%^&*?|+=");
if(found != std::string::npos){
S3FS_PRN_EXIT("BUCKET %s -- bucket name contains an illegal character.", bucket_name.c_str());
S3FS_PRN_EXIT("BUCKET %s -- bucket name contains an illegal character: '%c' at position %zu", bucket_name.c_str(), bucket_name[found], found);
return false;
}

View File

@ -40,16 +40,16 @@ typedef std::map<std::string, std::string> iamcredmap_t;
class S3fsCred
{
private:
static const char* ALLBUCKET_FIELDS_TYPE; // special key for mapping(This name is absolutely not used as a bucket name)
static const char* KEYVAL_FIELDS_TYPE; // special key for mapping(This name is absolutely not used as a bucket name)
static const char* AWS_ACCESSKEYID;
static const char* AWS_SECRETKEY;
static constexpr char ALLBUCKET_FIELDS_TYPE[] = ""; // special key for mapping(This name is absolutely not used as a bucket name)
static constexpr char KEYVAL_FIELDS_TYPE[] = "\t"; // special key for mapping(This name is absolutely not used as a bucket name)
static constexpr char AWS_ACCESSKEYID[] = "AWSAccessKeyId";
static constexpr char AWS_SECRETKEY[] = "AWSSecretKey";
static const int IAM_EXPIRE_MERGIN;
static const char* ECS_IAM_ENV_VAR;
static const char* IAMCRED_ACCESSKEYID;
static const char* IAMCRED_SECRETACCESSKEY;
static const char* IAMCRED_ROLEARN;
static constexpr int IAM_EXPIRE_MERGIN = 20 * 60; // update timing
static constexpr char ECS_IAM_ENV_VAR[] = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI";
static constexpr char IAMCRED_ACCESSKEYID[] = "AccessKeyId";
static constexpr char IAMCRED_SECRETACCESSKEY[] = "SecretAccessKey";
static constexpr char IAMCRED_ROLEARN[] = "RoleArn";
static std::string bucket_name;
@ -89,10 +89,10 @@ class S3fsCred
fp_UpdateS3fsCredential pFuncCredUpdate;
public:
static const char* IAMv2_token_url;
static int IAMv2_token_ttl;
static const char* IAMv2_token_ttl_hdr;
static const char* IAMv2_token_hdr;
static constexpr char IAMv2_token_url[] = "http://169.254.169.254/latest/api/token";
static constexpr int IAMv2_token_ttl = 21600;
static constexpr char IAMv2_token_ttl_hdr[] = "X-aws-ec2-metadata-token-ttl-seconds";
static constexpr char IAMv2_token_hdr[] = "X-aws-ec2-metadata-token";
private:
static bool ParseIAMRoleFromMetaDataResponse(const char* response, std::string& rolename);
@ -159,12 +159,16 @@ class S3fsCred
S3fsCred();
~S3fsCred();
S3fsCred(const S3fsCred&) = delete;
S3fsCred(S3fsCred&&) = delete;
S3fsCred& operator=(const S3fsCred&) = delete;
S3fsCred& operator=(S3fsCred&&) = delete;
bool IsIBMIAMAuth() const { return is_ibm_iam_auth; }
bool LoadIAMRoleFromMetaData();
bool CheckIAMCredentialUpdate(std::string* access_key_id = NULL, std::string* secret_access_key = NULL, std::string* access_token = NULL);
bool CheckIAMCredentialUpdate(std::string* access_key_id = nullptr, std::string* secret_access_key = nullptr, std::string* access_token = nullptr);
const char* GetCredFuncVersion(bool detail) const;
int DetectParam(const char* arg);

View File

@ -55,11 +55,11 @@ extern const char* VersionS3fsCredential(bool detail) S3FS_FUNCATTR_WEAK;
// implemented, it will not be called.
//
// const char* popts : String passed with the credlib_opts option. If the
// credlib_opts option is not specified, NULL will be
// credlib_opts option is not specified, nullptr will be
// passed.
// char** pperrstr : pperrstr is used to pass the error message to the
// caller when an error occurs.
// If this pointer is not NULL, you can allocate memory
// If this pointer is not nullptr, you can allocate memory
// and set an error message to it. The allocated memory
// area is freed by the caller.
//
@ -75,7 +75,7 @@ extern bool InitS3fsCredential(const char* popts, char** pperrstr) S3FS_FUNCATTR
//
// char** pperrstr : pperrstr is used to pass the error message to the
// caller when an error occurs.
// If this pointer is not NULL, you can allocate memory
// If this pointer is not nullptr, you can allocate memory
// and set an error message to it. The allocated memory
// area is freed by the caller.
//

View File

@ -29,7 +29,7 @@
//-------------------------------------------------------------------
// Contents
//-------------------------------------------------------------------
static const char help_string[] =
static constexpr char help_string[] =
"\n"
"Mount an Amazon S3 bucket as a file system.\n"
"\n"
@ -128,6 +128,8 @@ static const char help_string[] =
" environment which value is <kms id>. You must be careful\n"
" about that you can not use the KMS id which is not same EC2\n"
" region.\n"
" Additionally, if you specify SSE-KMS, your endpoints must use\n"
" Secure Sockets Layer(SSL) or Transport Layer Security(TLS).\n"
"\n"
" load_sse_c - specify SSE-C keys\n"
" Specify the custom-provided encryption keys file path for decrypting\n"
@ -215,6 +217,21 @@ static const char help_string[] =
" ssl_verify_hostname (default=\"2\")\n"
" - When 0, do not verify the SSL certificate against the hostname.\n"
"\n"
" ssl_client_cert (default=\"\")\n"
" - Specify an SSL client certificate.\n"
" Specify this optional parameter in the following format:\n"
" \"<SSL Cert>[:<Cert Type>[:<Private Key>[:<Key Type>\n"
" [:<Password>]]]]\"\n"
" <SSL Cert>: Client certificate.\n"
" Specify the file path or NickName(for NSS, etc.).\n"
" <Cert Type>: Type of certificate, default is \"PEM\"(optional).\n"
" <Private Key>: Certificate's private key file(optional).\n"
" <Key Type>: Type of private key, default is \"PEM\"(optional).\n"
" <Password>: Passphrase of the private key(optional).\n"
" It is also possible to omit this value and specify\n"
" it using the environment variable\n"
" \"S3FS_SSL_PRIVKEY_PASSWORD\".\n"
"\n"
" nodnscache (disable DNS cache)\n"
" - s3fs is always using DNS cache, this option make DNS cache disable.\n"
"\n"
@ -270,6 +287,15 @@ static const char help_string[] =
" space is smaller than this value, s3fs do not use disk space\n"
" as possible in exchange for the performance.\n"
"\n"
" free_space_ratio (default=\"10\")\n"
" - sets min free space ratio of the disk.\n"
" The value of this option can be between 0 and 100. It will control\n"
" the size of the cache according to this ratio to ensure that the\n"
" idle ratio of the disk is greater than this value.\n"
" For example, when the disk space is 50GB, the default value will\n"
" ensure that the disk will reserve at least 50GB * 10%% = 5GB of\n"
" remaining space.\n"
"\n"
" multipart_threshold (default=\"25\")\n"
" - threshold, in MB, to use multipart upload instead of\n"
" single-part. Must be at least 5 MB.\n"
@ -516,6 +542,14 @@ static const char help_string[] =
" Separate the username and passphrase with a ':' character and\n"
" specify each as a URL-encoded string.\n"
"\n"
" ipresolve (default=\"whatever\")\n"
" Select what type of IP addresses to use when establishing a\n"
" connection.\n"
" Default('whatever') can use addresses of all IP versions(IPv4 and\n"
" IPv6) that your system allows. If you specify 'IPv4', only IPv4\n"
" addresses are used. And when 'IPv6'is specified, only IPv6 addresses\n"
" will be used.\n"
"\n"
" logfile - specify the log output file.\n"
" s3fs outputs the log file to syslog. Alternatively, if s3fs is\n"
" started with the \"-f\" option specified, the log will be output\n"
@ -629,7 +663,7 @@ void show_version()
const char* short_version()
{
static const char short_ver[] = "s3fs version " VERSION "(" COMMIT_HASH_VAL ")";
static constexpr char short_ver[] = "s3fs version " VERSION "(" COMMIT_HASH_VAL ")";
return short_ver;
}

View File

@ -20,6 +20,7 @@
#include <cstdlib>
#include <iomanip>
#include <memory>
#include <sstream>
#include <string>
@ -29,14 +30,13 @@
//-------------------------------------------------------------------
// S3fsLog class : variables
//-------------------------------------------------------------------
const int S3fsLog::NEST_MAX;
const char* const S3fsLog::nest_spaces[S3fsLog::NEST_MAX] = {"", " ", " ", " "};
const char S3fsLog::LOGFILEENV[] = "S3FS_LOGFILE";
const char S3fsLog::MSGTIMESTAMP[] = "S3FS_MSGTIMESTAMP";
S3fsLog* S3fsLog::pSingleton = NULL;
constexpr char S3fsLog::LOGFILEENV[];
constexpr const char* S3fsLog::nest_spaces[];
constexpr char S3fsLog::MSGTIMESTAMP[];
S3fsLog* S3fsLog::pSingleton = nullptr;
S3fsLog::s3fs_log_level S3fsLog::debug_level = S3fsLog::LEVEL_CRIT;
FILE* S3fsLog::logfp = NULL;
std::string* S3fsLog::plogfile = NULL;
FILE* S3fsLog::logfp = nullptr;
std::string S3fsLog::logfile;
bool S3fsLog::time_stamp = true;
//-------------------------------------------------------------------
@ -59,7 +59,7 @@ std::string S3fsLog::GetCurrentTime()
now.tv_sec = tsnow.tv_sec;
now.tv_usec = (tsnow.tv_nsec / 1000);
}else{
gettimeofday(&now, NULL);
gettimeofday(&now, nullptr);
}
strftime(tmp, sizeof(tmp), "%Y-%m-%dT%H:%M:%S", gmtime_r(&now.tv_sec, &res));
current_time << tmp << "." << std::setfill('0') << std::setw(3) << (now.tv_usec / 1000) << "Z ";
@ -70,7 +70,7 @@ std::string S3fsLog::GetCurrentTime()
bool S3fsLog::SetLogfile(const char* pfile)
{
if(!S3fsLog::pSingleton){
S3FS_PRN_CRIT("S3fsLog::pSingleton is NULL.");
S3FS_PRN_CRIT("S3fsLog::pSingleton is nullptr.");
return false;
}
return S3fsLog::pSingleton->LowSetLogfile(pfile);
@ -79,25 +79,25 @@ bool S3fsLog::SetLogfile(const char* pfile)
bool S3fsLog::ReopenLogfile()
{
if(!S3fsLog::pSingleton){
S3FS_PRN_CRIT("S3fsLog::pSingleton is NULL.");
S3FS_PRN_CRIT("S3fsLog::pSingleton is nullptr.");
return false;
}
if(!S3fsLog::logfp){
S3FS_PRN_INFO("Currently the log file is output to stdout/stderr.");
return true;
}
if(!S3fsLog::plogfile){
S3FS_PRN_ERR("There is a problem with the path to the log file being NULL.");
if(!S3fsLog::logfile.empty()){
S3FS_PRN_ERR("There is a problem with the path to the log file being empty.");
return false;
}
std::string tmp = *(S3fsLog::plogfile);
std::string tmp = S3fsLog::logfile;
return S3fsLog::pSingleton->LowSetLogfile(tmp.c_str());
}
S3fsLog::s3fs_log_level S3fsLog::SetLogLevel(s3fs_log_level level)
{
if(!S3fsLog::pSingleton){
S3FS_PRN_CRIT("S3fsLog::pSingleton is NULL.");
S3FS_PRN_CRIT("S3fsLog::pSingleton is nullptr.");
return S3fsLog::debug_level; // Although it is an error, it returns the current value.
}
return S3fsLog::pSingleton->LowSetLogLevel(level);
@ -106,7 +106,7 @@ S3fsLog::s3fs_log_level S3fsLog::SetLogLevel(s3fs_log_level level)
S3fsLog::s3fs_log_level S3fsLog::BumpupLogLevel()
{
if(!S3fsLog::pSingleton){
S3FS_PRN_CRIT("S3fsLog::pSingleton is NULL.");
S3FS_PRN_CRIT("S3fsLog::pSingleton is nullptr.");
return S3fsLog::debug_level; // Although it is an error, it returns the current value.
}
return S3fsLog::pSingleton->LowBumpupLogLevel();
@ -139,15 +139,12 @@ S3fsLog::~S3fsLog()
{
if(S3fsLog::pSingleton == this){
FILE* oldfp = S3fsLog::logfp;
S3fsLog::logfp = NULL;
S3fsLog::logfp = nullptr;
if(oldfp && 0 != fclose(oldfp)){
S3FS_PRN_ERR("Could not close old log file(%s), but continue...", (S3fsLog::plogfile ? S3fsLog::plogfile->c_str() : "null"));
S3FS_PRN_ERR("Could not close old log file(%s), but continue...", (S3fsLog::logfile.empty() ? S3fsLog::logfile.c_str() : "null"));
}
if(S3fsLog::plogfile){
delete S3fsLog::plogfile;
S3fsLog::plogfile = NULL;
}
S3fsLog::pSingleton = NULL;
S3fsLog::logfile.clear();
S3fsLog::pSingleton = nullptr;
S3fsLog::debug_level = S3fsLog::LEVEL_CRIT;
closelog();
@ -163,12 +160,12 @@ bool S3fsLog::LowLoadEnv()
return false;
}
char* pEnvVal;
if(NULL != (pEnvVal = getenv(S3fsLog::LOGFILEENV))){
if(nullptr != (pEnvVal = getenv(S3fsLog::LOGFILEENV))){
if(!SetLogfile(pEnvVal)){
return false;
}
}
if(NULL != (pEnvVal = getenv(S3fsLog::MSGTIMESTAMP))){
if(nullptr != (pEnvVal = getenv(S3fsLog::MSGTIMESTAMP))){
if(0 == strcasecmp(pEnvVal, "true") || 0 == strcasecmp(pEnvVal, "yes") || 0 == strcasecmp(pEnvVal, "1")){
S3fsLog::time_stamp = true;
}else if(0 == strcasecmp(pEnvVal, "false") || 0 == strcasecmp(pEnvVal, "no") || 0 == strcasecmp(pEnvVal, "0")){
@ -190,14 +187,11 @@ bool S3fsLog::LowSetLogfile(const char* pfile)
if(!pfile){
// close log file if it is opened
if(S3fsLog::logfp && 0 != fclose(S3fsLog::logfp)){
S3FS_PRN_ERR("Could not close log file(%s).", (S3fsLog::plogfile ? S3fsLog::plogfile->c_str() : "null"));
S3FS_PRN_ERR("Could not close log file(%s).", (S3fsLog::logfile.empty() ? S3fsLog::logfile.c_str() : "null"));
return false;
}
S3fsLog::logfp = NULL;
if(S3fsLog::plogfile){
delete S3fsLog::plogfile;
S3fsLog::plogfile = NULL;
}
S3fsLog::logfp = nullptr;
S3fsLog::logfile.clear();
}else{
// open new log file
//
@ -205,7 +199,7 @@ bool S3fsLog::LowSetLogfile(const char* pfile)
// It will reopen even if it is the same file.
//
FILE* newfp;
if(NULL == (newfp = fopen(pfile, "a+"))){
if(nullptr == (newfp = fopen(pfile, "a+"))){
S3FS_PRN_ERR("Could not open log file(%s).", pfile);
return false;
}
@ -213,13 +207,12 @@ bool S3fsLog::LowSetLogfile(const char* pfile)
// switch new log file and close old log file if it is opened
FILE* oldfp = S3fsLog::logfp;
if(oldfp && 0 != fclose(oldfp)){
S3FS_PRN_ERR("Could not close old log file(%s).", (S3fsLog::plogfile ? S3fsLog::plogfile->c_str() : "null"));
S3FS_PRN_ERR("Could not close old log file(%s).", (!S3fsLog::logfile.empty() ? S3fsLog::logfile.c_str() : "null"));
fclose(newfp);
return false;
}
S3fsLog::logfp = newfp;
delete S3fsLog::plogfile;
S3fsLog::plogfile = new std::string(pfile);
S3fsLog::logfile = pfile;
}
return true;
}
@ -261,24 +254,22 @@ void s3fs_low_logprn(S3fsLog::s3fs_log_level level, const char* file, const char
if(S3fsLog::IsS3fsLogLevel(level)){
va_list va;
va_start(va, fmt);
size_t len = vsnprintf(NULL, 0, fmt, va) + 1;
size_t len = vsnprintf(nullptr, 0, fmt, va) + 1;
va_end(va);
char *message = new char[len];
std::unique_ptr<char[]> message(new char[len]);
va_start(va, fmt);
vsnprintf(message, len, fmt, va);
vsnprintf(message.get(), len, fmt, va);
va_end(va);
if(foreground || S3fsLog::IsSetLogFile()){
S3fsLog::SeekEnd();
fprintf(S3fsLog::GetOutputLogFile(), "%s%s%s:%s(%d): %s\n", S3fsLog::GetCurrentTime().c_str(), S3fsLog::GetLevelString(level), file, func, line, message);
fprintf(S3fsLog::GetOutputLogFile(), "%s%s%s:%s(%d): %s\n", S3fsLog::GetCurrentTime().c_str(), S3fsLog::GetLevelString(level), file, func, line, message.get());
S3fsLog::Flush();
}else{
// TODO: why does this differ from s3fs_low_logprn2?
syslog(S3fsLog::GetSyslogLevel(level), "%s%s:%s(%d): %s", instance_name.c_str(), file, func, line, message);
syslog(S3fsLog::GetSyslogLevel(level), "%s%s:%s(%d): %s", instance_name.c_str(), file, func, line, message.get());
}
delete[] message;
}
}
@ -287,23 +278,21 @@ void s3fs_low_logprn2(S3fsLog::s3fs_log_level level, int nest, const char* file,
if(S3fsLog::IsS3fsLogLevel(level)){
va_list va;
va_start(va, fmt);
size_t len = vsnprintf(NULL, 0, fmt, va) + 1;
size_t len = vsnprintf(nullptr, 0, fmt, va) + 1;
va_end(va);
char *message = new char[len];
std::unique_ptr<char[]> message(new char[len]);
va_start(va, fmt);
vsnprintf(message, len, fmt, va);
vsnprintf(message.get(), len, fmt, va);
va_end(va);
if(foreground || S3fsLog::IsSetLogFile()){
S3fsLog::SeekEnd();
fprintf(S3fsLog::GetOutputLogFile(), "%s%s%s%s:%s(%d): %s\n", S3fsLog::GetCurrentTime().c_str(), S3fsLog::GetLevelString(level), S3fsLog::GetS3fsLogNest(nest), file, func, line, message);
fprintf(S3fsLog::GetOutputLogFile(), "%s%s%s%s:%s(%d): %s\n", S3fsLog::GetCurrentTime().c_str(), S3fsLog::GetLevelString(level), S3fsLog::GetS3fsLogNest(nest), file, func, line, message.get());
S3fsLog::Flush();
}else{
syslog(S3fsLog::GetSyslogLevel(level), "%s%s%s", instance_name.c_str(), S3fsLog::GetS3fsLogNest(nest), message);
syslog(S3fsLog::GetSyslogLevel(level), "%s%s%s", instance_name.c_str(), S3fsLog::GetS3fsLogNest(nest), message.get());
}
delete[] message;
}
}

View File

@ -51,15 +51,15 @@ class S3fsLog
};
protected:
static const int NEST_MAX = 4;
static const char* const nest_spaces[NEST_MAX];
static const char LOGFILEENV[];
static const char MSGTIMESTAMP[];
static constexpr int NEST_MAX = 4;
static constexpr const char* nest_spaces[NEST_MAX] = {"", " ", " ", " "};
static constexpr char LOGFILEENV[] = "S3FS_LOGFILE";
static constexpr char MSGTIMESTAMP[] = "S3FS_MSGTIMESTAMP";
static S3fsLog* pSingleton;
static s3fs_log_level debug_level;
static FILE* logfp;
static std::string* plogfile;
static std::string logfile;
static bool time_stamp;
protected:
@ -76,7 +76,7 @@ class S3fsLog
static bool IsS3fsLogInfo() { return IsS3fsLogLevel(LEVEL_INFO); }
static bool IsS3fsLogDbg() { return IsS3fsLogLevel(LEVEL_DBG); }
static int GetSyslogLevel(s3fs_log_level level)
static constexpr int GetSyslogLevel(s3fs_log_level level)
{
return ( LEVEL_DBG == (level & LEVEL_DBG) ? LOG_DEBUG :
LEVEL_INFO == (level & LEVEL_DBG) ? LOG_INFO :
@ -86,7 +86,7 @@ class S3fsLog
static std::string GetCurrentTime();
static const char* GetLevelString(s3fs_log_level level)
static constexpr const char* GetLevelString(s3fs_log_level level)
{
return ( LEVEL_DBG == (level & LEVEL_DBG) ? "[DBG] " :
LEVEL_INFO == (level & LEVEL_DBG) ? "[INF] " :
@ -94,18 +94,14 @@ class S3fsLog
LEVEL_ERR == (level & LEVEL_DBG) ? "[ERR] " : "[CRT] " );
}
static const char* GetS3fsLogNest(int nest)
static constexpr const char* GetS3fsLogNest(int nest)
{
if(nest < NEST_MAX){
return nest_spaces[nest];
}else{
return nest_spaces[NEST_MAX - 1];
}
return nest_spaces[nest < NEST_MAX ? nest : NEST_MAX - 1];
}
static bool IsSetLogFile()
{
return (NULL != logfp);
return (nullptr != logfp);
}
static FILE* GetOutputLogFile()
@ -140,6 +136,10 @@ class S3fsLog
explicit S3fsLog();
~S3fsLog();
S3fsLog(const S3fsLog&) = delete;
S3fsLog(S3fsLog&&) = delete;
S3fsLog& operator=(const S3fsLog&) = delete;
S3fsLog& operator=(S3fsLog&&) = delete;
};
//-------------------------------------------------------------------
@ -230,6 +230,34 @@ void s3fs_low_logprn2(S3fsLog::s3fs_log_level level, int nest, const char* file,
#define S3FS_PRN_CURL(fmt, ...) S3FS_LOW_CURLDBG(fmt, ##__VA_ARGS__, "")
#define S3FS_PRN_CACHE(fp, ...) S3FS_LOW_CACHE(fp, ##__VA_ARGS__, "")
// Macros to print log with fuse context
#define PRINT_FUSE_CTX(level, indent, fmt, ...) do { \
if(S3fsLog::IsS3fsLogLevel(level)){ \
struct fuse_context *ctx = fuse_get_context(); \
if(ctx == NULL){ \
S3FS_LOW_LOGPRN2(level, indent, fmt, ##__VA_ARGS__); \
}else{ \
S3FS_LOW_LOGPRN2(level, indent, fmt"[pid=%u,uid=%u,gid=%u]",\
##__VA_ARGS__, \
(unsigned int)(ctx->pid), \
(unsigned int)(ctx->uid), \
(unsigned int)(ctx->gid)); \
} \
} \
} while (0)
#define FUSE_CTX_INFO(fmt, ...) do { \
PRINT_FUSE_CTX(S3fsLog::LEVEL_INFO, 0, fmt, ##__VA_ARGS__); \
} while (0)
#define FUSE_CTX_INFO1(fmt, ...) do { \
PRINT_FUSE_CTX(S3fsLog::LEVEL_INFO, 1, fmt, ##__VA_ARGS__); \
} while (0)
#define FUSE_CTX_DBG(fmt, ...) do { \
PRINT_FUSE_CTX(S3fsLog::LEVEL_DBG, 0, fmt, ##__VA_ARGS__); \
} while (0)
#endif // S3FS_LOGGER_H_
/*

View File

@ -23,6 +23,7 @@
#include <unistd.h>
#include <cerrno>
#include <grp.h>
#include <memory>
#include <pwd.h>
#include <libgen.h>
#include <dirent.h>
@ -94,32 +95,27 @@ std::string get_username(uid_t uid)
{
size_t maxlen = max_password_size;
int result;
char* pbuf;
struct passwd pwinfo;
struct passwd* ppwinfo = NULL;
struct passwd* ppwinfo = nullptr;
// make buffer
pbuf = new char[maxlen];
std::unique_ptr<char[]> pbuf(new char[maxlen]);
// get pw information
while(ERANGE == (result = getpwuid_r(uid, &pwinfo, pbuf, maxlen, &ppwinfo))){
delete[] pbuf;
while(ERANGE == (result = getpwuid_r(uid, &pwinfo, pbuf.get(), maxlen, &ppwinfo))){
maxlen *= 2;
pbuf = new char[maxlen];
pbuf.reset(new char[maxlen]);
}
if(0 != result){
S3FS_PRN_ERR("could not get pw information(%d).", result);
delete[] pbuf;
return std::string("");
return "";
}
// check pw
if(NULL == ppwinfo){
delete[] pbuf;
return std::string("");
if(nullptr == ppwinfo){
return "";
}
std::string name = SAFESTRPTR(ppwinfo->pw_name);
delete[] pbuf;
return name;
}
@ -127,29 +123,25 @@ int is_uid_include_group(uid_t uid, gid_t gid)
{
size_t maxlen = max_group_name_length;
int result;
char* pbuf;
struct group ginfo;
struct group* pginfo = NULL;
struct group* pginfo = nullptr;
// make buffer
pbuf = new char[maxlen];
std::unique_ptr<char[]> pbuf(new char[maxlen]);
// get group information
while(ERANGE == (result = getgrgid_r(gid, &ginfo, pbuf, maxlen, &pginfo))){
delete[] pbuf;
while(ERANGE == (result = getgrgid_r(gid, &ginfo, pbuf.get(), maxlen, &pginfo))){
maxlen *= 2;
pbuf = new char[maxlen];
pbuf.reset(new char[maxlen]);
}
if(0 != result){
S3FS_PRN_ERR("could not get group information(%d).", result);
delete[] pbuf;
return -result;
}
// check group
if(NULL == pginfo){
if(nullptr == pginfo){
// there is not gid in group.
delete[] pbuf;
return -EINVAL;
}
@ -159,11 +151,9 @@ int is_uid_include_group(uid_t uid, gid_t gid)
for(ppgr_mem = pginfo->gr_mem; ppgr_mem && *ppgr_mem; ppgr_mem++){
if(username == *ppgr_mem){
// Found username in group.
delete[] pbuf;
return 1;
}
}
delete[] pbuf;
return 0;
}
@ -177,7 +167,7 @@ int is_uid_include_group(uid_t uid, gid_t gid)
// conflicts.
// To avoid this, exclusive control is performed by mutex.
//
static pthread_mutex_t* pbasename_lock = NULL;
static pthread_mutex_t* pbasename_lock = nullptr;
bool init_basename_lock()
{
@ -197,7 +187,7 @@ bool init_basename_lock()
if(0 != (result = pthread_mutex_init(pbasename_lock, &attr))){
S3FS_PRN_ERR("failed to init pbasename_lock: %d.", result);
delete pbasename_lock;
pbasename_lock = NULL;
pbasename_lock = nullptr;
return false;
}
return true;
@ -215,7 +205,7 @@ bool destroy_basename_lock()
return false;
}
delete pbasename_lock;
pbasename_lock = NULL;
pbasename_lock = nullptr;
return true;
}
@ -232,7 +222,7 @@ std::string mydirname(const std::string& path)
std::string mydirname(const char* path)
{
if(!path || '\0' == path[0]){
return std::string("");
return "";
}
char *buf = strdup(path);
@ -253,7 +243,7 @@ std::string mybasename(const std::string& path)
std::string mybasename(const char* path)
{
if(!path || '\0' == path[0]){
return std::string("");
return "";
}
char *buf = strdup(path);
@ -359,7 +349,7 @@ bool delete_files_in_dir(const char* dir, bool is_remove_own)
DIR* dp;
struct dirent* dent;
if(NULL == (dp = opendir(dir))){
if(nullptr == (dp = opendir(dir))){
S3FS_PRN_ERR("could not open dir(%s) - errno(%d)", dir, errno);
return false;
}
@ -410,7 +400,7 @@ bool compare_sysname(const char* target)
// The buffer size of sysname member in struct utsname is
// OS dependent, but 512 bytes is sufficient for now.
//
static char* psysname = NULL;
static const char* psysname = nullptr;
static char sysname[512];
if(!psysname){
struct utsname sysinfo;
@ -454,11 +444,6 @@ void print_launch_message(int argc, char** argv)
S3FS_PRN_LAUNCH_INFO("%s", message.c_str());
}
//-------------------------------------------------------------------
// Utility for nanosecond time(timespec)
//-------------------------------------------------------------------
const struct timespec S3FS_OMIT_TS = {0, UTIME_OMIT};
//
// result: -1 ts1 < ts2
// 0 ts1 == ts2
@ -495,7 +480,7 @@ int compare_timespec(const struct stat& st, stat_time_type type, const struct ti
void set_timespec_to_stat(struct stat& st, stat_time_type type, const struct timespec& ts)
{
if(ST_TYPE_ATIME == type){
if(stat_time_type::ATIME == type){
#if defined(__APPLE__)
st.st_atime = ts.tv_sec;
st.st_atimespec.tv_nsec = ts.tv_nsec;
@ -503,7 +488,7 @@ void set_timespec_to_stat(struct stat& st, stat_time_type type, const struct tim
st.st_atim.tv_sec = ts.tv_sec;
st.st_atim.tv_nsec = ts.tv_nsec;
#endif
}else if(ST_TYPE_MTIME == type){
}else if(stat_time_type::MTIME == type){
#if defined(__APPLE__)
st.st_mtime = ts.tv_sec;
st.st_mtimespec.tv_nsec = ts.tv_nsec;
@ -511,7 +496,7 @@ void set_timespec_to_stat(struct stat& st, stat_time_type type, const struct tim
st.st_mtim.tv_sec = ts.tv_sec;
st.st_mtim.tv_nsec = ts.tv_nsec;
#endif
}else if(ST_TYPE_CTIME == type){
}else if(stat_time_type::CTIME == type){
#if defined(__APPLE__)
st.st_ctime = ts.tv_sec;
st.st_ctimespec.tv_nsec = ts.tv_nsec;
@ -520,27 +505,27 @@ void set_timespec_to_stat(struct stat& st, stat_time_type type, const struct tim
st.st_ctim.tv_nsec = ts.tv_nsec;
#endif
}else{
S3FS_PRN_ERR("unknown type(%d), so skip to set value.", type);
S3FS_PRN_ERR("unknown type(%d), so skip to set value.", static_cast<int>(type));
}
}
struct timespec* set_stat_to_timespec(const struct stat& st, stat_time_type type, struct timespec& ts)
{
if(ST_TYPE_ATIME == type){
if(stat_time_type::ATIME == type){
#if defined(__APPLE__)
ts.tv_sec = st.st_atime;
ts.tv_nsec = st.st_atimespec.tv_nsec;
#else
ts = st.st_atim;
#endif
}else if(ST_TYPE_MTIME == type){
}else if(stat_time_type::MTIME == type){
#if defined(__APPLE__)
ts.tv_sec = st.st_mtime;
ts.tv_nsec = st.st_mtimespec.tv_nsec;
#else
ts = st.st_mtim;
#endif
}else if(ST_TYPE_CTIME == type){
}else if(stat_time_type::CTIME == type){
#if defined(__APPLE__)
ts.tv_sec = st.st_ctime;
ts.tv_nsec = st.st_ctimespec.tv_nsec;
@ -548,7 +533,7 @@ struct timespec* set_stat_to_timespec(const struct stat& st, stat_time_type type
ts = st.st_ctim;
#endif
}else{
S3FS_PRN_ERR("unknown type(%d), so use 0 as timespec.", type);
S3FS_PRN_ERR("unknown type(%d), so use 0 as timespec.", static_cast<int>(type));
ts.tv_sec = 0;
ts.tv_nsec = 0;
}
@ -565,7 +550,7 @@ struct timespec* s3fs_realtime(struct timespec& ts)
{
if(-1 == clock_gettime(static_cast<clockid_t>(CLOCK_REALTIME), &ts)){
S3FS_PRN_WARN("failed to clock_gettime by errno(%d)", errno);
ts.tv_sec = time(NULL);
ts.tv_sec = time(nullptr);
ts.tv_nsec = 0;
}
return &ts;
@ -574,7 +559,15 @@ struct timespec* s3fs_realtime(struct timespec& ts)
std::string s3fs_str_realtime()
{
struct timespec ts;
return str(*(s3fs_realtime(ts)));
return str(*s3fs_realtime(ts));
}
int s3fs_fclose(FILE* fp)
{
if(fp == nullptr){
return 0;
}
return fclose(fp);
}
/*

View File

@ -21,6 +21,8 @@
#ifndef S3FS_S3FS_UTIL_H_
#define S3FS_S3FS_UTIL_H_
#include <functional>
#ifndef CLOCK_REALTIME
#define CLOCK_REALTIME 0
#endif
@ -59,12 +61,16 @@ void print_launch_message(int argc, char** argv);
//
// Utility for nanosecond time(timespec)
//
enum stat_time_type{
ST_TYPE_ATIME,
ST_TYPE_MTIME,
ST_TYPE_CTIME
enum class stat_time_type{
ATIME,
MTIME,
CTIME
};
extern const struct timespec S3FS_OMIT_TS;
//-------------------------------------------------------------------
// Utility for nanosecond time(timespec)
//-------------------------------------------------------------------
static constexpr struct timespec S3FS_OMIT_TS = {0, UTIME_OMIT};
int compare_timespec(const struct timespec& ts1, const struct timespec& ts2);
int compare_timespec(const struct stat& st, stat_time_type type, const struct timespec& ts);
@ -74,6 +80,35 @@ std::string str_stat_time(const struct stat& st, stat_time_type type);
struct timespec* s3fs_realtime(struct timespec& ts);
std::string s3fs_str_realtime();
// Wrap fclose since it is illegal to take the address of a stdlib function
int s3fs_fclose(FILE* fp);
class scope_guard {
public:
template<class Callable>
explicit scope_guard(Callable&& undo_func)
: func(std::forward<Callable>(undo_func))
{}
~scope_guard() {
if(func != nullptr) {
func();
}
}
void dismiss() {
func = nullptr;
}
scope_guard(const scope_guard&) = delete;
scope_guard(scope_guard&& other) = delete;
scope_guard& operator=(const scope_guard&) = delete;
scope_guard& operator=(scope_guard&&) = delete;
private:
std::function<void()> func;
};
#endif // S3FS_S3FS_UTIL_H_
/*

View File

@ -34,12 +34,12 @@
//-------------------------------------------------------------------
// Variables
//-------------------------------------------------------------------
static const char c_strErrorObjectName[] = "FILE or SUBDIR in DIR";
static constexpr char c_strErrorObjectName[] = "FILE or SUBDIR in DIR";
// [NOTE]
// mutex for static variables in GetXmlNsUrl
//
static pthread_mutex_t* pxml_parser_mutex = NULL;
static pthread_mutex_t* pxml_parser_mutex = nullptr;
//-------------------------------------------------------------------
// Functions
@ -59,21 +59,20 @@ static bool GetXmlNsUrl(xmlDocPtr doc, std::string& nsurl)
AutoLock lock(pxml_parser_mutex);
if((tmLast + 60) < time(NULL)){
if((tmLast + 60) < time(nullptr)){
// refresh
tmLast = time(NULL);
tmLast = time(nullptr);
strNs = "";
xmlNodePtr pRootNode = xmlDocGetRootElement(doc);
if(pRootNode){
xmlNsPtr* nslist = xmlGetNsList(doc, pRootNode);
std::unique_ptr<xmlNsPtr, decltype(xmlFree)> nslist(xmlGetNsList(doc, pRootNode), xmlFree);
if(nslist){
if(nslist[0] && nslist[0]->href){
int len = xmlStrlen(nslist[0]->href);
if(*nslist && (*nslist)[0].href){
int len = xmlStrlen((*nslist)[0].href);
if(0 < len){
strNs = std::string(reinterpret_cast<const char*>(nslist[0]->href), len);
strNs = std::string(reinterpret_cast<const char*>((*nslist)[0].href), len);
}
}
S3FS_XMLFREE(nslist);
}
}
}
@ -86,19 +85,18 @@ static bool GetXmlNsUrl(xmlDocPtr doc, std::string& nsurl)
return result;
}
static xmlChar* get_base_exp(xmlDocPtr doc, const char* exp)
static unique_ptr_xmlChar get_base_exp(xmlDocPtr doc, const char* exp)
{
xmlXPathObjectPtr marker_xp;
std::string xmlnsurl;
std::string exp_string;
if(!doc){
return NULL;
return {nullptr, xmlFree};
}
xmlXPathContextPtr ctx = xmlXPathNewContext(doc);
unique_ptr_xmlXPathContext ctx(xmlXPathNewContext(doc), xmlXPathFreeContext);
if(!noxmlns && GetXmlNsUrl(doc, xmlnsurl)){
xmlXPathRegisterNs(ctx, reinterpret_cast<const xmlChar*>("s3"), reinterpret_cast<const xmlChar*>(xmlnsurl.c_str()));
xmlXPathRegisterNs(ctx.get(), reinterpret_cast<const xmlChar*>("s3"), reinterpret_cast<const xmlChar*>(xmlnsurl.c_str()));
exp_string = "/s3:ListBucketResult/s3:";
} else {
exp_string = "/ListBucketResult/";
@ -106,67 +104,60 @@ static xmlChar* get_base_exp(xmlDocPtr doc, const char* exp)
exp_string += exp;
if(NULL == (marker_xp = xmlXPathEvalExpression(reinterpret_cast<const xmlChar*>(exp_string.c_str()), ctx))){
xmlXPathFreeContext(ctx);
return NULL;
unique_ptr_xmlXPathObject marker_xp(xmlXPathEvalExpression(reinterpret_cast<const xmlChar*>(exp_string.c_str()), ctx.get()), xmlXPathFreeObject);
if(nullptr == marker_xp){
return {nullptr, xmlFree};
}
if(xmlXPathNodeSetIsEmpty(marker_xp->nodesetval)){
S3FS_PRN_ERR("marker_xp->nodesetval is empty.");
xmlXPathFreeObject(marker_xp);
xmlXPathFreeContext(ctx);
return NULL;
S3FS_PRN_INFO("marker_xp->nodesetval is empty.");
return {nullptr, xmlFree};
}
xmlNodeSetPtr nodes = marker_xp->nodesetval;
xmlChar* result = xmlNodeListGetString(doc, nodes->nodeTab[0]->xmlChildrenNode, 1);
xmlXPathFreeObject(marker_xp);
xmlXPathFreeContext(ctx);
unique_ptr_xmlChar result(xmlNodeListGetString(doc, nodes->nodeTab[0]->xmlChildrenNode, 1), xmlFree);
return result;
}
static xmlChar* get_prefix(xmlDocPtr doc)
static unique_ptr_xmlChar get_prefix(xmlDocPtr doc)
{
return get_base_exp(doc, "Prefix");
}
xmlChar* get_next_continuation_token(xmlDocPtr doc)
unique_ptr_xmlChar get_next_continuation_token(xmlDocPtr doc)
{
return get_base_exp(doc, "NextContinuationToken");
}
xmlChar* get_next_marker(xmlDocPtr doc)
unique_ptr_xmlChar get_next_marker(xmlDocPtr doc)
{
return get_base_exp(doc, "NextMarker");
}
// return: the pointer to object name on allocated memory.
// the pointer to "c_strErrorObjectName".(not allocated)
// NULL(a case of something error occurred)
// nullptr(a case of something error occurred)
static char* get_object_name(xmlDocPtr doc, xmlNodePtr node, const char* path)
{
// Get full path
xmlChar* fullpath = xmlNodeListGetString(doc, node, 1);
unique_ptr_xmlChar fullpath(xmlNodeListGetString(doc, node, 1), xmlFree);
if(!fullpath){
S3FS_PRN_ERR("could not get object full path name..");
return NULL;
return nullptr;
}
// basepath(path) is as same as fullpath.
if(0 == strcmp(reinterpret_cast<char*>(fullpath), path)){
xmlFree(fullpath);
if(0 == strcmp(reinterpret_cast<char*>(fullpath.get()), path)){
return const_cast<char*>(c_strErrorObjectName);
}
// Make dir path and filename
std::string strdirpath = mydirname(std::string(reinterpret_cast<char*>(fullpath)));
std::string strmybpath = mybasename(std::string(reinterpret_cast<char*>(fullpath)));
std::string strdirpath = mydirname(reinterpret_cast<const char*>(fullpath.get()));
std::string strmybpath = mybasename(reinterpret_cast<const char*>(fullpath.get()));
const char* dirpath = strdirpath.c_str();
const char* mybname = strmybpath.c_str();
const char* basepath= (path && '/' == path[0]) ? &path[1] : path;
xmlFree(fullpath);
if('\0' == mybname[0]){
return NULL;
return nullptr;
}
// check subdir & file in subdir
@ -210,35 +201,32 @@ static char* get_object_name(xmlDocPtr doc, xmlNodePtr node, const char* path)
return const_cast<char*>(c_strErrorObjectName);
}
static xmlChar* get_exp_value_xml(xmlDocPtr doc, xmlXPathContextPtr ctx, const char* exp_key)
static unique_ptr_xmlChar get_exp_value_xml(xmlDocPtr doc, xmlXPathContextPtr ctx, const char* exp_key)
{
if(!doc || !ctx || !exp_key){
return NULL;
return {nullptr, xmlFree};
}
xmlXPathObjectPtr exp;
xmlNodeSetPtr exp_nodes;
xmlChar* exp_value;
// search exp_key tag
if(NULL == (exp = xmlXPathEvalExpression(reinterpret_cast<const xmlChar*>(exp_key), ctx))){
unique_ptr_xmlXPathObject exp(xmlXPathEvalExpression(reinterpret_cast<const xmlChar*>(exp_key), ctx), xmlXPathFreeObject);
if(nullptr == exp){
S3FS_PRN_ERR("Could not find key(%s).", exp_key);
return NULL;
return {nullptr, xmlFree};
}
if(xmlXPathNodeSetIsEmpty(exp->nodesetval)){
S3FS_PRN_ERR("Key(%s) node is empty.", exp_key);
S3FS_XMLXPATHFREEOBJECT(exp);
return NULL;
return {nullptr, xmlFree};
}
// get exp_key value & set in struct
exp_nodes = exp->nodesetval;
if(NULL == (exp_value = xmlNodeListGetString(doc, exp_nodes->nodeTab[0]->xmlChildrenNode, 1))){
unique_ptr_xmlChar exp_value(xmlNodeListGetString(doc, exp_nodes->nodeTab[0]->xmlChildrenNode, 1), xmlFree);
if(nullptr == exp_value){
S3FS_PRN_ERR("Key(%s) value is empty.", exp_key);
S3FS_XMLXPATHFREEOBJECT(exp);
return NULL;
return {nullptr, xmlFree};
}
S3FS_XMLXPATHFREEOBJECT(exp);
return exp_value;
}
@ -248,7 +236,7 @@ bool get_incomp_mpu_list(xmlDocPtr doc, incomp_mpu_list_t& list)
return false;
}
xmlXPathContextPtr ctx = xmlXPathNewContext(doc);;
unique_ptr_xmlXPathContext ctx(xmlXPathNewContext(doc), xmlXPathFreeContext);
std::string xmlnsurl;
std::string ex_upload = "//";
@ -257,7 +245,7 @@ bool get_incomp_mpu_list(xmlDocPtr doc, incomp_mpu_list_t& list)
std::string ex_date;
if(!noxmlns && GetXmlNsUrl(doc, xmlnsurl)){
xmlXPathRegisterNs(ctx, reinterpret_cast<const xmlChar*>("s3"), reinterpret_cast<const xmlChar*>(xmlnsurl.c_str()));
xmlXPathRegisterNs(ctx.get(), reinterpret_cast<const xmlChar*>("s3"), reinterpret_cast<const xmlChar*>(xmlnsurl.c_str()));
ex_upload += "s3:";
ex_key += "s3:";
ex_id += "s3:";
@ -269,15 +257,13 @@ bool get_incomp_mpu_list(xmlDocPtr doc, incomp_mpu_list_t& list)
ex_date += "Initiated";
// get "Upload" Tags
xmlXPathObjectPtr upload_xp;
if(NULL == (upload_xp = xmlXPathEvalExpression(reinterpret_cast<const xmlChar*>(ex_upload.c_str()), ctx))){
unique_ptr_xmlXPathObject upload_xp(xmlXPathEvalExpression(reinterpret_cast<const xmlChar*>(ex_upload.c_str()), ctx.get()), xmlXPathFreeObject);
if(nullptr == upload_xp){
S3FS_PRN_ERR("xmlXPathEvalExpression returns null.");
return false;
}
if(xmlXPathNodeSetIsEmpty(upload_xp->nodesetval)){
S3FS_PRN_INFO("upload_xp->nodesetval is empty.");
S3FS_XMLXPATHFREEOBJECT(upload_xp);
S3FS_XMLXPATHFREECONTEXT(ctx);
return true;
}
@ -289,70 +275,57 @@ bool get_incomp_mpu_list(xmlDocPtr doc, incomp_mpu_list_t& list)
ctx->node = upload_nodes->nodeTab[cnt];
INCOMP_MPU_INFO part;
xmlChar* ex_value;
// search "Key" tag
if(NULL == (ex_value = get_exp_value_xml(doc, ctx, ex_key.c_str()))){
unique_ptr_xmlChar ex_value(get_exp_value_xml(doc, ctx.get(), ex_key.c_str()));
if(nullptr == ex_value){
continue;
}
if('/' != *(reinterpret_cast<char*>(ex_value))){
if('/' != *(reinterpret_cast<char*>(ex_value.get()))){
part.key = "/";
}else{
part.key = "";
}
part.key += reinterpret_cast<char*>(ex_value);
S3FS_XMLFREE(ex_value);
part.key += reinterpret_cast<char*>(ex_value.get());
// search "UploadId" tag
if(NULL == (ex_value = get_exp_value_xml(doc, ctx, ex_id.c_str()))){
if(nullptr == (ex_value = get_exp_value_xml(doc, ctx.get(), ex_id.c_str()))){
continue;
}
part.id = reinterpret_cast<char*>(ex_value);
S3FS_XMLFREE(ex_value);
part.id = reinterpret_cast<char*>(ex_value.get());
// search "Initiated" tag
if(NULL == (ex_value = get_exp_value_xml(doc, ctx, ex_date.c_str()))){
if(nullptr == (ex_value = get_exp_value_xml(doc, ctx.get(), ex_date.c_str()))){
continue;
}
part.date = reinterpret_cast<char*>(ex_value);
S3FS_XMLFREE(ex_value);
part.date = reinterpret_cast<char*>(ex_value.get());
list.push_back(part);
}
S3FS_XMLXPATHFREEOBJECT(upload_xp);
S3FS_XMLXPATHFREECONTEXT(ctx);
return true;
}
bool is_truncated(xmlDocPtr doc)
{
bool result = false;
xmlChar* strTruncate = get_base_exp(doc, "IsTruncated");
unique_ptr_xmlChar strTruncate(get_base_exp(doc, "IsTruncated"));
if(!strTruncate){
return false;
}
if(0 == strcasecmp(reinterpret_cast<const char*>(strTruncate), "true")){
result = true;
}
xmlFree(strTruncate);
return result;
return 0 == strcasecmp(reinterpret_cast<const char*>(strTruncate.get()), "true");
}
int append_objects_from_xml_ex(const char* path, xmlDocPtr doc, xmlXPathContextPtr ctx, const char* ex_contents, const char* ex_key, const char* ex_etag, int isCPrefix, S3ObjList& head)
int append_objects_from_xml_ex(const char* path, xmlDocPtr doc, xmlXPathContextPtr ctx, const char* ex_contents, const char* ex_key, const char* ex_etag, int isCPrefix, S3ObjList& head, bool prefix)
{
xmlXPathObjectPtr contents_xp;
xmlNodeSetPtr content_nodes;
if(NULL == (contents_xp = xmlXPathEvalExpression(reinterpret_cast<const xmlChar*>(ex_contents), ctx))){
unique_ptr_xmlXPathObject contents_xp(xmlXPathEvalExpression(reinterpret_cast<const xmlChar*>(ex_contents), ctx), xmlXPathFreeObject);
if(nullptr == contents_xp){
S3FS_PRN_ERR("xmlXPathEvalExpression returns null.");
return -1;
}
if(xmlXPathNodeSetIsEmpty(contents_xp->nodesetval)){
S3FS_PRN_DBG("contents_xp->nodesetval is empty.");
S3FS_XMLXPATHFREEOBJECT(contents_xp);
return 0;
}
content_nodes = contents_xp->nodesetval;
@ -364,14 +337,13 @@ int append_objects_from_xml_ex(const char* path, xmlDocPtr doc, xmlXPathContextP
ctx->node = content_nodes->nodeTab[i];
// object name
xmlXPathObjectPtr key;
if(NULL == (key = xmlXPathEvalExpression(reinterpret_cast<const xmlChar*>(ex_key), ctx))){
unique_ptr_xmlXPathObject key(xmlXPathEvalExpression(reinterpret_cast<const xmlChar*>(ex_key), ctx), xmlXPathFreeObject);
if(nullptr == key){
S3FS_PRN_WARN("key is null. but continue.");
continue;
}
if(xmlXPathNodeSetIsEmpty(key->nodesetval)){
S3FS_PRN_WARN("node is empty. but continue.");
xmlXPathFreeObject(key);
continue;
}
xmlNodeSetPtr key_nodes = key->nodesetval;
@ -386,19 +358,17 @@ int append_objects_from_xml_ex(const char* path, xmlDocPtr doc, xmlXPathContextP
if(!isCPrefix && ex_etag){
// Get ETag
xmlXPathObjectPtr ETag;
if(NULL != (ETag = xmlXPathEvalExpression(reinterpret_cast<const xmlChar*>(ex_etag), ctx))){
unique_ptr_xmlXPathObject ETag(xmlXPathEvalExpression(reinterpret_cast<const xmlChar*>(ex_etag), ctx), xmlXPathFreeObject);
if(nullptr != ETag){
if(xmlXPathNodeSetIsEmpty(ETag->nodesetval)){
S3FS_PRN_INFO("ETag->nodesetval is empty.");
}else{
xmlNodeSetPtr etag_nodes = ETag->nodesetval;
xmlChar* petag = xmlNodeListGetString(doc, etag_nodes->nodeTab[0]->xmlChildrenNode, 1);
unique_ptr_xmlChar petag(xmlNodeListGetString(doc, etag_nodes->nodeTab[0]->xmlChildrenNode, 1), xmlFree);
if(petag){
stretag = reinterpret_cast<char*>(petag);
xmlFree(petag);
stretag = reinterpret_cast<const char*>(petag.get());
}
}
xmlXPathFreeObject(ETag);
}
}
@ -409,19 +379,17 @@ int append_objects_from_xml_ex(const char* path, xmlDocPtr doc, xmlXPathContextP
std::string decname = get_decoded_cr_code(name);
free(name);
if(!head.insert(decname.c_str(), (!stretag.empty() ? stretag.c_str() : NULL), is_dir)){
if(prefix){
head.common_prefixes.push_back(decname);
}
if(!head.insert(decname.c_str(), (!stretag.empty() ? stretag.c_str() : nullptr), is_dir)){
S3FS_PRN_ERR("insert_object returns with error.");
xmlXPathFreeObject(key);
xmlXPathFreeObject(contents_xp);
S3FS_MALLOCTRIM(0);
return -1;
}
}else{
S3FS_PRN_DBG("name is file or subdir in dir. but continue.");
}
xmlXPathFreeObject(key);
}
S3FS_XMLXPATHFREEOBJECT(contents_xp);
return 0;
}
@ -440,16 +408,13 @@ int append_objects_from_xml(const char* path, xmlDocPtr doc, S3ObjList& head)
}
// If there is not <Prefix>, use path instead of it.
xmlChar* pprefix = get_prefix(doc);
std::string prefix = (pprefix ? reinterpret_cast<char*>(pprefix) : path ? path : "");
if(pprefix){
xmlFree(pprefix);
}
auto pprefix = get_prefix(doc);
std::string prefix = (pprefix ? reinterpret_cast<char*>(pprefix.get()) : path ? path : "");
xmlXPathContextPtr ctx = xmlXPathNewContext(doc);
unique_ptr_xmlXPathContext ctx(xmlXPathNewContext(doc), xmlXPathFreeContext);
if(!noxmlns && GetXmlNsUrl(doc, xmlnsurl)){
xmlXPathRegisterNs(ctx, reinterpret_cast<const xmlChar*>("s3"), reinterpret_cast<const xmlChar*>(xmlnsurl.c_str()));
xmlXPathRegisterNs(ctx.get(), reinterpret_cast<const xmlChar*>("s3"), reinterpret_cast<const xmlChar*>(xmlnsurl.c_str()));
ex_contents+= "s3:";
ex_key += "s3:";
ex_cprefix += "s3:";
@ -462,14 +427,12 @@ int append_objects_from_xml(const char* path, xmlDocPtr doc, S3ObjList& head)
ex_prefix += "Prefix";
ex_etag += "ETag";
if(-1 == append_objects_from_xml_ex(prefix.c_str(), doc, ctx, ex_contents.c_str(), ex_key.c_str(), ex_etag.c_str(), 0, head) ||
-1 == append_objects_from_xml_ex(prefix.c_str(), doc, ctx, ex_cprefix.c_str(), ex_prefix.c_str(), NULL, 1, head) )
if(-1 == append_objects_from_xml_ex(prefix.c_str(), doc, ctx.get(), ex_contents.c_str(), ex_key.c_str(), ex_etag.c_str(), 0, head, /*prefix=*/ false) ||
-1 == append_objects_from_xml_ex(prefix.c_str(), doc, ctx.get(), ex_cprefix.c_str(), ex_prefix.c_str(), nullptr, 1, head, /*prefix=*/ true) )
{
S3FS_PRN_ERR("append_objects_from_xml_ex returns with error.");
S3FS_XMLXPATHFREECONTEXT(ctx);
return -1;
}
S3FS_XMLXPATHFREECONTEXT(ctx);
return 0;
}
@ -486,16 +449,15 @@ bool simple_parse_xml(const char* data, size_t len, const char* key, std::string
}
value.clear();
xmlDocPtr doc;
if(NULL == (doc = xmlReadMemory(data, static_cast<int>(len), "", NULL, 0))){
std::unique_ptr<xmlDoc, decltype(&xmlFreeDoc)> doc(xmlReadMemory(data, static_cast<int>(len), "", nullptr, 0), xmlFreeDoc);
if(nullptr == doc){
return false;
}
if(NULL == doc->children){
S3FS_XMLFREEDOC(doc);
if(nullptr == doc->children){
return false;
}
for(xmlNodePtr cur_node = doc->children->children; NULL != cur_node; cur_node = cur_node->next){
for(xmlNodePtr cur_node = doc->children->children; nullptr != cur_node; cur_node = cur_node->next){
// For DEBUG
// std::string cur_node_name(reinterpret_cast<const char *>(cur_node->name));
// printf("cur_node_name: %s\n", cur_node_name.c_str());
@ -516,7 +478,6 @@ bool simple_parse_xml(const char* data, size_t len, const char* key, std::string
}
}
}
S3FS_XMLFREEDOC(doc);
return result;
}
@ -539,7 +500,7 @@ bool init_parser_xml_lock()
if(0 != pthread_mutex_init(pxml_parser_mutex, &attr)){
delete pxml_parser_mutex;
pxml_parser_mutex = NULL;
pxml_parser_mutex = nullptr;
return false;
}
return true;
@ -554,7 +515,7 @@ bool destroy_parser_xml_lock()
return false;
}
delete pxml_parser_mutex;
pxml_parser_mutex = NULL;
pxml_parser_mutex = nullptr;
return true;
}

View File

@ -22,21 +22,27 @@
#define S3FS_S3FS_XML_H_
#include <libxml/xpath.h>
#include <libxml/parser.h> // [NOTE] nessetially include this header in some environments
#include <memory>
#include <string>
#include "mpu_util.h"
class S3ObjList;
typedef std::unique_ptr<xmlChar, decltype(xmlFree)> unique_ptr_xmlChar;
typedef std::unique_ptr<xmlXPathObject, decltype(&xmlXPathFreeObject)> unique_ptr_xmlXPathObject;
typedef std::unique_ptr<xmlXPathContext, decltype(&xmlXPathFreeContext)> unique_ptr_xmlXPathContext;
typedef std::unique_ptr<xmlDoc, decltype(&xmlFreeDoc)> unique_ptr_xmlDoc;
//-------------------------------------------------------------------
// Functions
//-------------------------------------------------------------------
bool is_truncated(xmlDocPtr doc);
int append_objects_from_xml_ex(const char* path, xmlDocPtr doc, xmlXPathContextPtr ctx, const char* ex_contents, const char* ex_key, const char* ex_etag, int isCPrefix, S3ObjList& head);
int append_objects_from_xml_ex(const char* path, xmlDocPtr doc, xmlXPathContextPtr ctx, const char* ex_contents, const char* ex_key, const char* ex_etag, int isCPrefix, S3ObjList& head, bool prefix);
int append_objects_from_xml(const char* path, xmlDocPtr doc, S3ObjList& head);
xmlChar* get_next_continuation_token(xmlDocPtr doc);
xmlChar* get_next_marker(xmlDocPtr doc);
unique_ptr_xmlChar get_next_continuation_token(xmlDocPtr doc);
unique_ptr_xmlChar get_next_marker(xmlDocPtr doc);
bool get_incomp_mpu_list(xmlDocPtr doc, incomp_mpu_list_t& list);
bool simple_parse_xml(const char* data, size_t len, const char* key, std::string& value);

View File

@ -84,7 +84,7 @@ bool S3ObjList::insert(const char* name, const char* etag, bool is_dir)
(*iter).second.orgname = orgname;
(*iter).second.is_dir = is_dir;
if(etag){
(*iter).second.etag = std::string(etag); // over write
(*iter).second.etag = etag; // over write
}
}else{
// add new object
@ -132,10 +132,10 @@ const s3obj_entry* S3ObjList::GetS3Obj(const char* name) const
s3obj_t::const_iterator iter;
if(!name || '\0' == name[0]){
return NULL;
return nullptr;
}
if(objects.end() == (iter = objects.find(name))){
return NULL;
return nullptr;
}
return &((*iter).second);
}
@ -145,10 +145,10 @@ std::string S3ObjList::GetOrgName(const char* name) const
const s3obj_entry* ps3obj;
if(!name || '\0' == name[0]){
return std::string("");
return "";
}
if(NULL == (ps3obj = GetS3Obj(name))){
return std::string("");
if(nullptr == (ps3obj = GetS3Obj(name))){
return "";
}
return ps3obj->orgname;
}
@ -158,13 +158,13 @@ std::string S3ObjList::GetNormalizedName(const char* name) const
const s3obj_entry* ps3obj;
if(!name || '\0' == name[0]){
return std::string("");
return "";
}
if(NULL == (ps3obj = GetS3Obj(name))){
return std::string("");
if(nullptr == (ps3obj = GetS3Obj(name))){
return "";
}
if(ps3obj->normalname.empty()){
return std::string(name);
return name;
}
return ps3obj->normalname;
}
@ -174,10 +174,10 @@ std::string S3ObjList::GetETag(const char* name) const
const s3obj_entry* ps3obj;
if(!name || '\0' == name[0]){
return std::string("");
return "";
}
if(NULL == (ps3obj = GetS3Obj(name))){
return std::string("");
if(nullptr == (ps3obj = GetS3Obj(name))){
return "";
}
return ps3obj->etag;
}
@ -186,7 +186,7 @@ bool S3ObjList::IsDir(const char* name) const
{
const s3obj_entry* ps3obj;
if(NULL == (ps3obj = GetS3Obj(name))){
if(nullptr == (ps3obj = GetS3Obj(name))){
return false;
}
return ps3obj->is_dir;
@ -197,13 +197,13 @@ bool S3ObjList::GetLastName(std::string& lastname) const
bool result = false;
lastname = "";
for(s3obj_t::const_iterator iter = objects.begin(); iter != objects.end(); ++iter){
if((*iter).second.orgname.length()){
if(0 > strcmp(lastname.c_str(), (*iter).second.orgname.c_str())){
if(!iter->second.orgname.empty()){
if(lastname.compare(iter->second.orgname) < 0){
lastname = (*iter).second.orgname;
result = true;
}
}else{
if(0 > strcmp(lastname.c_str(), (*iter).second.normalname.c_str())){
if(lastname.compare(iter->second.normalname) < 0){
lastname = (*iter).second.normalname;
result = true;
}

View File

@ -21,9 +21,9 @@
#ifndef S3FS_S3OBJLIST_H_
#define S3FS_S3OBJLIST_H_
#include <list>
#include <map>
#include <string>
#include <vector>
//-------------------------------------------------------------------
// Structure / Typedef
@ -38,7 +38,7 @@ struct s3obj_entry{
};
typedef std::map<std::string, struct s3obj_entry> s3obj_t;
typedef std::list<std::string> s3obj_list_t;
typedef std::vector<std::string> s3obj_list_t;
//-------------------------------------------------------------------
// Class S3ObjList
@ -47,6 +47,8 @@ class S3ObjList
{
private:
s3obj_t objects;
public:
std::vector<std::string> common_prefixes;
private:
bool insert_normalized(const char* name, const char* normalized, bool is_dir);
@ -60,7 +62,7 @@ class S3ObjList
~S3ObjList() {}
bool IsEmpty() const { return objects.empty(); }
bool insert(const char* name, const char* etag = NULL, bool is_dir = false);
bool insert(const char* name, const char* etag = nullptr, bool is_dir = false);
std::string GetOrgName(const char* name) const;
std::string GetNormalizedName(const char* name) const;
std::string GetETag(const char* name) const;

View File

@ -29,7 +29,7 @@
//-------------------------------------------------------------------
// Class S3fsSignals
//-------------------------------------------------------------------
S3fsSignals* S3fsSignals::pSingleton = NULL;
std::unique_ptr<S3fsSignals> S3fsSignals::pSingleton;
bool S3fsSignals::enableUsr1 = false;
//-------------------------------------------------------------------
@ -38,15 +38,14 @@ bool S3fsSignals::enableUsr1 = false;
bool S3fsSignals::Initialize()
{
if(!S3fsSignals::pSingleton){
S3fsSignals::pSingleton = new S3fsSignals;
S3fsSignals::pSingleton.reset(new S3fsSignals);
}
return true;
}
bool S3fsSignals::Destroy()
{
delete S3fsSignals::pSingleton;
S3fsSignals::pSingleton = NULL;
S3fsSignals::pSingleton.reset();
return true;
}
@ -91,10 +90,10 @@ void* S3fsSignals::CheckCacheWorker(void* arg)
{
Semaphore* pSem = static_cast<Semaphore*>(arg);
if(!pSem){
pthread_exit(NULL);
pthread_exit(nullptr);
}
if(!S3fsSignals::enableUsr1){
pthread_exit(NULL);
pthread_exit(nullptr);
}
// wait and loop
@ -118,7 +117,7 @@ void* S3fsSignals::CheckCacheWorker(void* arg)
pSem->wait();
}
}
return NULL;
return nullptr;
}
void S3fsSignals::HandlerUSR2(int sig)
@ -137,7 +136,7 @@ bool S3fsSignals::InitUsr2Handler()
memset(&sa, 0, sizeof(struct sigaction));
sa.sa_handler = S3fsSignals::HandlerUSR2;
sa.sa_flags = SA_RESTART;
if(0 != sigaction(SIGUSR2, &sa, NULL)){
if(0 != sigaction(SIGUSR2, &sa, nullptr)){
return false;
}
return true;
@ -159,7 +158,7 @@ bool S3fsSignals::InitHupHandler()
memset(&sa, 0, sizeof(struct sigaction));
sa.sa_handler = S3fsSignals::HandlerHUP;
sa.sa_flags = SA_RESTART;
if(0 != sigaction(SIGHUP, &sa, NULL)){
if(0 != sigaction(SIGHUP, &sa, nullptr)){
return false;
}
return true;
@ -168,7 +167,7 @@ bool S3fsSignals::InitHupHandler()
//-------------------------------------------------------------------
// Methods
//-------------------------------------------------------------------
S3fsSignals::S3fsSignals() : pThreadUsr1(NULL), pSemUsr1(NULL)
S3fsSignals::S3fsSignals()
{
if(S3fsSignals::enableUsr1){
if(!InitUsr1Handler()){
@ -201,23 +200,21 @@ bool S3fsSignals::InitUsr1Handler()
// create thread
int result;
pSemUsr1 = new Semaphore(0);
pThreadUsr1 = new pthread_t;
if(0 != (result = pthread_create(pThreadUsr1, NULL, S3fsSignals::CheckCacheWorker, static_cast<void*>(pSemUsr1)))){
std::unique_ptr<Semaphore> pSemUsr1_tmp(new Semaphore(0));
std::unique_ptr<pthread_t> pThreadUsr1_tmp(new pthread_t);
if(0 != (result = pthread_create(pThreadUsr1.get(), nullptr, S3fsSignals::CheckCacheWorker, static_cast<void*>(pSemUsr1_tmp.get())))){
S3FS_PRN_ERR("Could not create thread for SIGUSR1 by %d", result);
delete pSemUsr1;
delete pThreadUsr1;
pSemUsr1 = NULL;
pThreadUsr1 = NULL;
return false;
}
pSemUsr1 = std::move(pSemUsr1_tmp);
pThreadUsr1 = std::move(pThreadUsr1_tmp);
// set handler
struct sigaction sa;
memset(&sa, 0, sizeof(struct sigaction));
sa.sa_handler = S3fsSignals::HandlerUSR1;
sa.sa_flags = SA_RESTART;
if(0 != sigaction(SIGUSR1, &sa, NULL)){
if(0 != sigaction(SIGUSR1, &sa, nullptr)){
S3FS_PRN_ERR("Could not set signal handler for SIGUSR1");
DestroyUsr1Handler();
return false;
@ -238,16 +235,14 @@ bool S3fsSignals::DestroyUsr1Handler()
pSemUsr1->post();
// wait for thread exiting
void* retval = NULL;
void* retval = nullptr;
int result;
if(0 != (result = pthread_join(*pThreadUsr1, &retval))){
S3FS_PRN_ERR("Could not stop thread for SIGUSR1 by %d", result);
return false;
}
delete pSemUsr1;
delete pThreadUsr1;
pSemUsr1 = NULL;
pThreadUsr1 = NULL;
pSemUsr1.reset();
pThreadUsr1.reset();
return true;
}

View File

@ -21,6 +21,8 @@
#ifndef S3FS_SIGHANDLERS_H_
#define S3FS_SIGHANDLERS_H_
#include <memory>
class Semaphore;
//----------------------------------------------
@ -29,14 +31,14 @@ class Semaphore;
class S3fsSignals
{
private:
static S3fsSignals* pSingleton;
static std::unique_ptr<S3fsSignals> pSingleton;
static bool enableUsr1;
pthread_t* pThreadUsr1;
Semaphore* pSemUsr1;
std::unique_ptr<pthread_t> pThreadUsr1;
std::unique_ptr<Semaphore> pSemUsr1;
protected:
static S3fsSignals* get() { return pSingleton; }
static S3fsSignals* get() { return pSingleton.get(); }
static void HandlerUSR1(int sig);
static void* CheckCacheWorker(void* arg);
@ -48,13 +50,17 @@ class S3fsSignals
static bool InitHupHandler();
S3fsSignals();
~S3fsSignals();
S3fsSignals(const S3fsSignals&) = delete;
S3fsSignals(S3fsSignals&&) = delete;
S3fsSignals& operator=(const S3fsSignals&) = delete;
S3fsSignals& operator=(S3fsSignals&&) = delete;
bool InitUsr1Handler();
bool DestroyUsr1Handler();
bool WakeupUsr1Thread();
public:
~S3fsSignals();
static bool Initialize();
static bool Destroy();

View File

@ -32,28 +32,12 @@
//-------------------------------------------------------------------
// Global variables
//-------------------------------------------------------------------
const char SPACES[] = " \t\r\n";
//-------------------------------------------------------------------
// Templates
// Functions
//-------------------------------------------------------------------
template <class T> std::string str(T value)
{
std::ostringstream s;
s << value;
return s.str();
}
template std::string str(short value);
template std::string str(unsigned short value);
template std::string str(int value);
template std::string str(unsigned int value);
template std::string str(long value);
template std::string str(unsigned long value);
template std::string str(long long value);
template std::string str(unsigned long long value);
template<> std::string str(const struct timespec value)
std::string str(const struct timespec value)
{
std::ostringstream s;
s << value.tv_sec;
@ -63,10 +47,6 @@ template<> std::string str(const struct timespec value)
return s.str();
}
//-------------------------------------------------------------------
// Functions
//-------------------------------------------------------------------
#ifdef __MSYS__
/*
* Polyfill for strptime function
@ -87,7 +67,7 @@ char* strptime(const char* s, const char* f, struct tm* tm)
bool s3fs_strtoofft(off_t* value, const char* str, int base)
{
if(value == NULL || str == NULL){
if(value == nullptr || str == nullptr){
return false;
}
errno = 0;
@ -124,15 +104,13 @@ std::string lower(std::string s)
return s;
}
std::string trim_left(const std::string &s, const char *t /* = SPACES */)
std::string trim_left(std::string d, const char *t /* = SPACES */)
{
std::string d(s);
return d.erase(0, s.find_first_not_of(t));
return d.erase(0, d.find_first_not_of(t));
}
std::string trim_right(const std::string &s, const char *t /* = SPACES */)
std::string trim_right(std::string d, const char *t /* = SPACES */)
{
std::string d(s);
std::string::size_type i(d.find_last_not_of(t));
if(i == std::string::npos){
return "";
@ -141,9 +119,17 @@ std::string trim_right(const std::string &s, const char *t /* = SPACES */)
}
}
std::string trim(const std::string &s, const char *t /* = SPACES */)
std::string trim(std::string s, const char *t /* = SPACES */)
{
return trim_left(trim_right(s, t), t);
return trim_left(trim_right(std::move(s), t), t);
}
std::string peeloff(const std::string& s)
{
if(s.size() < 2 || *s.begin() != '"' || *s.rbegin() != '"'){
return s;
}
return s.substr(1, s.size() - 2);
}
//
@ -160,16 +146,16 @@ std::string trim(const std::string &s, const char *t /* = SPACES */)
// Therefore, it is a function to use as URL encoding
// for use in query strings.
//
static const char* encode_general_except_chars = ".-_~"; // For general URL encode
static const char* encode_path_except_chars = ".-_~/"; // For fuse(included path) URL encode
static const char* encode_query_except_chars = ".-_~=&%"; // For query params(and encoded string)
static constexpr char encode_general_except_chars[] = ".-_~"; // For general URL encode
static constexpr char encode_path_except_chars[] = ".-_~/"; // For fuse(included path) URL encode
static constexpr char encode_query_except_chars[] = ".-_~=&%"; // For query params(and encoded string)
static std::string rawUrlEncode(const std::string &s, const char* except_chars)
{
std::string result;
for (size_t i = 0; i < s.length(); ++i) {
unsigned char c = s[i];
if((except_chars && NULL != strchr(except_chars, c)) ||
if((except_chars && nullptr != strchr(except_chars, c)) ||
(c >= 'a' && c <= 'z') ||
(c >= 'A' && c <= 'Z') ||
(c >= '0' && c <= '9') )
@ -274,7 +260,7 @@ bool get_keyword_value(const std::string& target, const char* keyword, std::stri
std::string get_date_rfc850()
{
char buf[100];
time_t t = time(NULL);
time_t t = time(nullptr);
struct tm res;
strftime(buf, sizeof(buf), "%a, %d %b %Y %H:%M:%S GMT", gmtime_r(&t, &res));
return buf;
@ -282,7 +268,7 @@ std::string get_date_rfc850()
void get_date_sigv3(std::string& date, std::string& date8601)
{
time_t tm = time(NULL);
time_t tm = time(nullptr);
date = get_date_string(tm);
date8601 = get_date_iso8601(tm);
}
@ -310,7 +296,7 @@ bool get_unixtime_from_iso8601(const char* pdate, time_t& unixtime)
}
struct tm tm;
char* prest = strptime(pdate, "%Y-%m-%dT%T", &tm);
const char* prest = strptime(pdate, "%Y-%m-%dT%T", &tm);
if(prest == pdate){
// wrong format
return false;
@ -392,31 +378,26 @@ std::string s3fs_hex_upper(const unsigned char* input, size_t length)
return s3fs_hex(input, length, "0123456789ABCDEF");
}
char* s3fs_base64(const unsigned char* input, size_t length)
std::string s3fs_base64(const unsigned char* input, size_t length)
{
static const char base[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";
char* result;
static constexpr char base[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";
if(!input || 0 == length){
return NULL;
}
result = new char[((length + 3 - 1) / 3) * 4 + 1];
std::string result;
result.reserve(((length + 3 - 1) / 3) * 4 + 1);
unsigned char parts[4];
size_t rpos;
size_t wpos;
for(rpos = 0, wpos = 0; rpos < length; rpos += 3){
for(rpos = 0; rpos < length; rpos += 3){
parts[0] = (input[rpos] & 0xfc) >> 2;
parts[1] = ((input[rpos] & 0x03) << 4) | ((((rpos + 1) < length ? input[rpos + 1] : 0x00) & 0xf0) >> 4);
parts[2] = (rpos + 1) < length ? (((input[rpos + 1] & 0x0f) << 2) | ((((rpos + 2) < length ? input[rpos + 2] : 0x00) & 0xc0) >> 6)) : 0x40;
parts[3] = (rpos + 2) < length ? (input[rpos + 2] & 0x3f) : 0x40;
result[wpos++] = base[parts[0]];
result[wpos++] = base[parts[1]];
result[wpos++] = base[parts[2]];
result[wpos++] = base[parts[3]];
result += base[parts[0]];
result += base[parts[1]];
result += base[parts[2]];
result += base[parts[3]];
}
result[wpos] = '\0';
return result;
}
@ -442,34 +423,28 @@ inline unsigned char char_decode64(const char ch)
return by;
}
unsigned char* s3fs_decode64(const char* input, size_t input_len, size_t* plength)
std::string s3fs_decode64(const char* input, size_t input_len)
{
unsigned char* result;
if(!input || 0 == input_len || !plength){
return NULL;
}
result = new unsigned char[input_len / 4 * 3];
std::string result;
result.reserve(input_len / 4 * 3);
unsigned char parts[4];
size_t rpos;
size_t wpos;
for(rpos = 0, wpos = 0; rpos < input_len; rpos += 4){
for(rpos = 0; rpos < input_len; rpos += 4){
parts[0] = char_decode64(input[rpos]);
parts[1] = (rpos + 1) < input_len ? char_decode64(input[rpos + 1]) : 64;
parts[2] = (rpos + 2) < input_len ? char_decode64(input[rpos + 2]) : 64;
parts[3] = (rpos + 3) < input_len ? char_decode64(input[rpos + 3]) : 64;
result[wpos++] = ((parts[0] << 2) & 0xfc) | ((parts[1] >> 4) & 0x03);
result += static_cast<char>(((parts[0] << 2) & 0xfc) | ((parts[1] >> 4) & 0x03));
if(64 == parts[2]){
break;
}
result[wpos++] = ((parts[1] << 4) & 0xf0) | ((parts[2] >> 2) & 0x0f);
result += static_cast<char>(((parts[1] << 4) & 0xf0) | ((parts[2] >> 2) & 0x0f));
if(64 == parts[3]){
break;
}
result[wpos++] = ((parts[2] << 6) & 0xc0) | (parts[3] & 0x3f);
result += static_cast<char>(((parts[2] << 6) & 0xc0) | (parts[3] & 0x3f));
}
*plength = wpos;
return result;
}
@ -484,7 +459,7 @@ unsigned char* s3fs_decode64(const char* input, size_t input_len, size_t* plengt
// Base location for transform. The range 0xE000 - 0xF8ff
// is a private range, se use the start of this range.
static const unsigned int escape_base = 0xe000;
static constexpr unsigned int escape_base = 0xe000;
// encode bytes into wobbly utf8.
// 'result' can be null. returns true if transform was needed.

View File

@ -22,6 +22,7 @@
#define S3FS_STRING_UTIL_H_
#include <cstring>
#include <string>
//
// A collection of string utilities for manipulating URLs and HTTP responses.
@ -29,7 +30,7 @@
//-------------------------------------------------------------------
// Global variables
//-------------------------------------------------------------------
extern const char SPACES[];
static constexpr char SPACES[] = " \t\r\n";
//-------------------------------------------------------------------
// Inline functions
@ -37,11 +38,6 @@ extern const char SPACES[];
static inline int is_prefix(const char *str, const char *prefix) { return strncmp(str, prefix, strlen(prefix)) == 0; }
static inline const char* SAFESTRPTR(const char *strptr) { return strptr ? strptr : ""; }
//-------------------------------------------------------------------
// Templates
//-------------------------------------------------------------------
template <class T> std::string str(T value);
//-------------------------------------------------------------------
// Macros(WTF8)
//-------------------------------------------------------------------
@ -56,6 +52,9 @@ template <class T> std::string str(T value);
//-------------------------------------------------------------------
// Utilities
//-------------------------------------------------------------------
// TODO: rename to to_string?
std::string str(const struct timespec value);
#ifdef __MSYS__
//
// Polyfill for strptime function.
@ -76,10 +75,11 @@ off_t cvt_strtoofft(const char* str, int base);
//
// String Manipulation
//
std::string trim_left(const std::string &s, const char *t = SPACES);
std::string trim_right(const std::string &s, const char *t = SPACES);
std::string trim(const std::string &s, const char *t = SPACES);
std::string trim_left(std::string s, const char *t = SPACES);
std::string trim_right(std::string s, const char *t = SPACES);
std::string trim(std::string s, const char *t = SPACES);
std::string lower(std::string s);
std::string peeloff(const std::string& s);
//
// Date string
@ -107,8 +107,8 @@ bool get_keyword_value(const std::string& target, const char* keyword, std::stri
//
std::string s3fs_hex_lower(const unsigned char* input, size_t length);
std::string s3fs_hex_upper(const unsigned char* input, size_t length);
char* s3fs_base64(const unsigned char* input, size_t length);
unsigned char* s3fs_decode64(const char* input, size_t input_len, size_t* plength);
std::string s3fs_base64(const unsigned char* input, size_t length);
std::string s3fs_decode64(const char* input, size_t input_len);
//
// WTF8

View File

@ -54,9 +54,9 @@ const std::string& S3fsCred::GetBucket()
#define ASSERT_IS_SORTED(x) assert_is_sorted((x), __FILE__, __LINE__)
void assert_is_sorted(struct curl_slist* list, const char *file, int line)
void assert_is_sorted(const struct curl_slist* list, const char *file, int line)
{
for(; list != NULL; list = list->next){
for(; list != nullptr; list = list->next){
std::string key1 = list->data;
key1.erase(key1.find(':'));
std::string key2 = list->data;
@ -74,7 +74,7 @@ void assert_is_sorted(struct curl_slist* list, const char *file, int line)
size_t curl_slist_length(const struct curl_slist* list)
{
size_t len = 0;
for(; list != NULL; list = list->next){
for(; list != nullptr; list = list->next){
++len;
}
return len;
@ -82,7 +82,7 @@ size_t curl_slist_length(const struct curl_slist* list)
void test_sort_insert()
{
struct curl_slist* list = NULL;
struct curl_slist* list = nullptr;
ASSERT_IS_SORTED(list);
// add to head
list = curl_slist_sort_insert(list, "2", "val");
@ -107,7 +107,7 @@ void test_sort_insert()
void test_slist_remove()
{
struct curl_slist* list = NULL;
struct curl_slist* list = nullptr;
// remove no elements
ASSERT_EQUALS(static_cast<size_t>(0), curl_slist_length(list));
@ -115,14 +115,14 @@ void test_slist_remove()
ASSERT_EQUALS(static_cast<size_t>(0), curl_slist_length(list));
// remove only element
list = NULL;
list = nullptr;
list = curl_slist_sort_insert(list, "1", "val");
ASSERT_EQUALS(static_cast<size_t>(1), curl_slist_length(list));
list = curl_slist_remove(list, "1");
ASSERT_EQUALS(static_cast<size_t>(0), curl_slist_length(list));
// remove head element
list = NULL;
list = nullptr;
list = curl_slist_sort_insert(list, "1", "val");
list = curl_slist_sort_insert(list, "2", "val");
ASSERT_EQUALS(static_cast<size_t>(2), curl_slist_length(list));
@ -131,7 +131,7 @@ void test_slist_remove()
curl_slist_free_all(list);
// remove tail element
list = NULL;
list = nullptr;
list = curl_slist_sort_insert(list, "1", "val");
list = curl_slist_sort_insert(list, "2", "val");
ASSERT_EQUALS(static_cast<size_t>(2), curl_slist_length(list));
@ -140,7 +140,7 @@ void test_slist_remove()
curl_slist_free_all(list);
// remove middle element
list = NULL;
list = nullptr;
list = curl_slist_sort_insert(list, "1", "val");
list = curl_slist_sort_insert(list, "2", "val");
list = curl_slist_sort_insert(list, "3", "val");

View File

@ -33,7 +33,7 @@ void test_compress()
ASSERT_EQUALS(off_t(42), list.Size());
ASSERT_FALSE(list.IsPageLoaded(0, 1));
list.SetPageLoadedStatus(0, 1, /*pstatus=*/ PageList::PAGE_LOADED);
list.SetPageLoadedStatus(0, 1, /*pstatus=*/ PageList::page_status::LOADED);
ASSERT_TRUE(list.IsPageLoaded(0, 1));
ASSERT_FALSE(list.IsPageLoaded(0, 2));
@ -44,7 +44,7 @@ void test_compress()
ASSERT_EQUALS(off_t(41), size);
// test adding subsequent page then compressing
list.SetPageLoadedStatus(1, 3, /*pstatus=*/ PageList::PAGE_LOADED);
list.SetPageLoadedStatus(1, 3, /*pstatus=*/ PageList::page_status::LOADED);
list.Compress();
ASSERT_TRUE(list.IsPageLoaded(0, 3));
@ -53,7 +53,7 @@ void test_compress()
ASSERT_EQUALS(off_t(38), size);
// test adding non-contiguous page then compressing
list.SetPageLoadedStatus(5, 1, /*pstatus=*/ PageList::PAGE_LOADED);
list.SetPageLoadedStatus(5, 1, /*pstatus=*/ PageList::page_status::LOADED);
list.Compress();
ASSERT_TRUE(list.FindUnloadedPage(0, start, size));
@ -63,7 +63,7 @@ void test_compress()
printf("\n");
// test adding page between two pages then compressing
list.SetPageLoadedStatus(4, 1, /*pstatus=*/ PageList::PAGE_LOADED);
list.SetPageLoadedStatus(4, 1, /*pstatus=*/ PageList::page_status::LOADED);
list.Compress();
list.Dump();

View File

@ -18,9 +18,9 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <cstdint>
#include <cstdlib>
#include <limits>
#include <stdint.h>
#include <string>
#include "s3fs_logger.h"
@ -50,47 +50,48 @@ void test_trim()
ASSERT_EQUALS(std::string(" 1234"), trim_right(" 1234"));
ASSERT_EQUALS(std::string("1234"), trim_right("1234"));
ASSERT_EQUALS(std::string("0"), str(0));
ASSERT_EQUALS(std::string("1"), str(1));
ASSERT_EQUALS(std::string("-1"), str(-1));
ASSERT_EQUALS(std::string("9223372036854775807"), str(std::numeric_limits<int64_t>::max()));
ASSERT_EQUALS(std::string("-9223372036854775808"), str(std::numeric_limits<int64_t>::min()));
ASSERT_EQUALS(std::string("0"), str(std::numeric_limits<uint64_t>::min()));
ASSERT_EQUALS(std::string("18446744073709551615"), str(std::numeric_limits<uint64_t>::max()));
ASSERT_EQUALS(std::string("1234"), peeloff("\"1234\"")); // "1234" -> 1234
ASSERT_EQUALS(std::string("\"1234\""), peeloff("\"\"1234\"\"")); // ""1234"" -> "1234"
ASSERT_EQUALS(std::string("\"1234"), peeloff("\"\"1234\"")); // ""1234" -> "1234
ASSERT_EQUALS(std::string("1234\""), peeloff("\"1234\"\"")); // "1234"" -> 1234"
ASSERT_EQUALS(std::string("\"1234"), peeloff("\"1234")); // "1234 -> "1234
ASSERT_EQUALS(std::string("1234\""), peeloff("1234\"")); // 1234" -> 1234"
ASSERT_EQUALS(std::string(" \"1234\""), peeloff(" \"1234\"")); // _"1234" -> _"1234"
ASSERT_EQUALS(std::string("\"1234\" "), peeloff("\"1234\" ")); // "1234"_ -> "1234"_
}
void test_base64()
{
unsigned char *buf;
size_t len;
std::string buf;
char tmpbuf = '\0';
ASSERT_STREQUALS(s3fs_base64(NULL, 0), NULL);
buf = s3fs_decode64(NULL, 0, &len);
ASSERT_BUFEQUALS(reinterpret_cast<const char *>(buf), len, NULL, 0);
ASSERT_EQUALS(s3fs_base64(nullptr, 0), std::string(""));
buf = s3fs_decode64(nullptr, 0);
ASSERT_BUFEQUALS(buf.c_str(), buf.length(), &tmpbuf, 0);
ASSERT_STREQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>(""), 0), NULL);
buf = s3fs_decode64("", 0, &len);
ASSERT_BUFEQUALS(reinterpret_cast<const char *>(buf), len, NULL, 0);
ASSERT_EQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>(""), 0), std::string(""));
buf = s3fs_decode64("", 0);
ASSERT_BUFEQUALS(buf.c_str(), buf.length(), &tmpbuf, 0);
ASSERT_STREQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("1"), 1), "MQ==");
buf = s3fs_decode64("MQ==", 4, &len);
ASSERT_BUFEQUALS(reinterpret_cast<const char *>(buf), len, "1", 1);
ASSERT_EQUALS(len, static_cast<size_t>(1));
ASSERT_EQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("1"), 1), std::string("MQ=="));
buf = s3fs_decode64("MQ==", 4);
ASSERT_BUFEQUALS(buf.c_str(), buf.length(), "1", 1);
ASSERT_EQUALS(buf.length(), static_cast<size_t>(1));
ASSERT_STREQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("12"), 2), "MTI=");
buf = s3fs_decode64("MTI=", 4, &len);
ASSERT_BUFEQUALS(reinterpret_cast<const char *>(buf), len, "12", 2);
ASSERT_EQUALS(len, static_cast<size_t>(2));
ASSERT_EQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("12"), 2), std::string("MTI="));
buf = s3fs_decode64("MTI=", 4);
ASSERT_BUFEQUALS(buf.c_str(), buf.length(), "12", 2);
ASSERT_EQUALS(buf.length(), static_cast<size_t>(2));
ASSERT_STREQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("123"), 3), "MTIz");
buf = s3fs_decode64("MTIz", 4, &len);
ASSERT_BUFEQUALS(reinterpret_cast<const char *>(buf), len, "123", 3);
ASSERT_EQUALS(len, static_cast<size_t>(3));
ASSERT_EQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("123"), 3), std::string("MTIz"));
buf = s3fs_decode64("MTIz", 4);
ASSERT_BUFEQUALS(buf.c_str(), buf.length(), "123", 3);
ASSERT_EQUALS(buf.length(), static_cast<size_t>(3));
ASSERT_STREQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("1234"), 4), "MTIzNA==");
buf = s3fs_decode64("MTIzNA==", 8, &len);
ASSERT_BUFEQUALS(reinterpret_cast<const char *>(buf), len, "1234", 4);
ASSERT_EQUALS(len, static_cast<size_t>(4));
ASSERT_EQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("1234"), 4), std::string("MTIzNA=="));
buf = s3fs_decode64("MTIzNA==", 8);
ASSERT_BUFEQUALS(buf.c_str(), buf.length(), "1234", 4);
ASSERT_EQUALS(buf.length(), static_cast<size_t>(4));
// TODO: invalid input
}

View File

@ -32,7 +32,7 @@ template <typename T> void assert_equals(const T &x, const T &y, const char *fil
if (x != y) {
std::cerr << x << " != " << y << " at " << file << ":" << line << std::endl;
std::cerr << std::endl;
std::exit(1);
abort();
}
}
@ -42,7 +42,7 @@ template <> void assert_equals(const std::string &x, const std::string &y, const
std::cerr << x << " != " << y << " at " << file << ":" << line << std::endl;
std::cerr << s3fs_hex_lower(reinterpret_cast<const unsigned char *>(x.c_str()), x.size()) << std::endl;
std::cerr << s3fs_hex_lower(reinterpret_cast<const unsigned char *>(y.c_str()), y.size()) << std::endl;
std::exit(1);
abort();
}
}
@ -51,7 +51,7 @@ template <typename T> void assert_nequals(const T &x, const T &y, const char *fi
{
if (x == y) {
std::cerr << x << " == " << y << " at " << file << ":" << line << std::endl;
std::exit(1);
abort();
}
}
@ -61,29 +61,29 @@ template <> void assert_nequals(const std::string &x, const std::string &y, cons
std::cerr << x << " == " << y << " at " << file << ":" << line << std::endl;
std::cerr << s3fs_hex_lower(reinterpret_cast<const unsigned char *>(x.c_str()), x.size()) << std::endl;
std::cerr << s3fs_hex_lower(reinterpret_cast<const unsigned char *>(y.c_str()), y.size()) << std::endl;
std::exit(1);
abort();
}
}
void assert_strequals(const char *x, const char *y, const char *file, int line)
{
if(x == NULL && y == NULL){
if(x == nullptr && y == nullptr){
return;
// cppcheck-suppress nullPointerRedundantCheck
} else if(x == NULL || y == NULL || strcmp(x, y) != 0){
} else if(x == nullptr || y == nullptr || strcmp(x, y) != 0){
std::cerr << (x ? x : "null") << " != " << (y ? y : "null") << " at " << file << ":" << line << std::endl;
std::exit(1);
abort();
}
}
void assert_bufequals(const char *x, size_t len1, const char *y, size_t len2, const char *file, int line)
{
if(x == NULL && y == NULL){
if(x == nullptr && y == nullptr){
return;
// cppcheck-suppress nullPointerRedundantCheck
} else if(x == NULL || y == NULL || len1 != len2 || memcmp(x, y, len1) != 0){
} else if(x == nullptr || y == nullptr || len1 != len2 || memcmp(x, y, len1) != 0){
std::cerr << (x ? std::string(x, len1) : "null") << " != " << (y ? std::string(y, len2) : "null") << " at " << file << ":" << line << std::endl;
std::exit(1);
abort();
}
}

View File

@ -18,10 +18,10 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <cerrno>
#include <cstdint>
#include <cstdio>
#include <cstdlib>
#include <errno.h>
#include <stdint.h>
#include "s3fs_logger.h"
#include "threadpoolman.h"
@ -30,7 +30,7 @@
//------------------------------------------------
// ThreadPoolMan class variables
//------------------------------------------------
ThreadPoolMan* ThreadPoolMan::singleton = NULL;
ThreadPoolMan* ThreadPoolMan::singleton = nullptr;
//------------------------------------------------
// ThreadPoolMan class methods
@ -49,17 +49,18 @@ void ThreadPoolMan::Destroy()
{
if(ThreadPoolMan::singleton){
delete ThreadPoolMan::singleton;
ThreadPoolMan::singleton = NULL;
ThreadPoolMan::singleton = nullptr;
}
}
bool ThreadPoolMan::Instruct(thpoolman_param* pparam)
bool ThreadPoolMan::Instruct(const thpoolman_param& param)
{
if(!ThreadPoolMan::singleton){
S3FS_PRN_WARN("The singleton object is not initialized yet.");
return false;
}
return ThreadPoolMan::singleton->SetInstruction(pparam);
ThreadPoolMan::singleton->SetInstruction(param);
return true;
}
//
@ -84,41 +85,35 @@ void* ThreadPoolMan::Worker(void* arg)
}
// get instruction
thpoolman_param* pparam;
thpoolman_param param;
{
AutoLock auto_lock(&(psingleton->thread_list_lock));
if(!psingleton->instruction_list.empty()){
pparam = psingleton->instruction_list.front();
psingleton->instruction_list.pop_front();
if(!pparam){
S3FS_PRN_WARN("Got a semaphore, but the instruction is empty.");
}
if(psingleton->instruction_list.empty()){
S3FS_PRN_DBG("Got a semaphore, but the instruction is empty.");
continue;
}else{
S3FS_PRN_WARN("Got a semaphore, but there is no instruction.");
pparam = NULL;
param = psingleton->instruction_list.front();
psingleton->instruction_list.pop_front();
}
}
if(pparam){
void* retval = pparam->pfunc(pparam->args);
if(NULL != retval){
S3FS_PRN_WARN("The instruction function returned with somthign error code(%ld).", reinterpret_cast<long>(retval));
}
if(pparam->psem){
pparam->psem->post();
}
delete pparam;
void* retval = param.pfunc(param.args);
if(nullptr != retval){
S3FS_PRN_WARN("The instruction function returned with somthign error code(%ld).", reinterpret_cast<long>(retval));
}
if(param.psem){
param.psem->post();
}
}
return NULL;
return nullptr;
}
//------------------------------------------------
// ThreadPoolMan methods
//------------------------------------------------
ThreadPoolMan::ThreadPoolMan(int count) : is_exit(false), thpoolman_sem(0), is_lock_init(false), is_exit_flag_init(false)
ThreadPoolMan::ThreadPoolMan(int count) : is_exit(false), thpoolman_sem(0), is_lock_init(false)
{
if(count < 1){
S3FS_PRN_CRIT("Failed to creating singleton for Thread Manager, because thread count(%d) is under 1.", count);
@ -142,12 +137,6 @@ ThreadPoolMan::ThreadPoolMan(int count) : is_exit(false), thpoolman_sem(0), is_l
}
is_lock_init = true;
if(0 != (result = pthread_mutex_init(&thread_exit_flag_lock, &attr))){
S3FS_PRN_CRIT("failed to init thread_exit_flag_lock: %d", result);
abort();
}
is_exit_flag_init = true;
// create threads
if(!StartThreads(count)){
S3FS_PRN_ERR("Failed starting threads at initializing.");
@ -167,25 +156,15 @@ ThreadPoolMan::~ThreadPoolMan()
}
is_lock_init = false;
}
if(is_exit_flag_init ){
int result;
if(0 != (result = pthread_mutex_destroy(&thread_exit_flag_lock))){
S3FS_PRN_CRIT("failed to destroy thread_exit_flag_lock: %d", result);
abort();
}
is_exit_flag_init = false;
}
}
bool ThreadPoolMan::IsExit() const
{
AutoLock auto_lock(&thread_exit_flag_lock);
return is_exit;
}
void ThreadPoolMan::SetExitFlag(bool exit_flag)
{
AutoLock auto_lock(&thread_exit_flag_lock);
is_exit = exit_flag;
}
@ -198,13 +177,13 @@ bool ThreadPoolMan::StopThreads()
// all threads to exit
SetExitFlag(true);
for(uint waitcnt = thread_list.size(); 0 < waitcnt; --waitcnt){
for(size_t waitcnt = thread_list.size(); 0 < waitcnt; --waitcnt){
thpoolman_sem.post();
}
// wait for threads exiting
for(thread_list_t::const_iterator iter = thread_list.begin(); iter != thread_list.end(); ++iter){
void* retval = NULL;
void* retval = nullptr;
int result = pthread_join(*iter, &retval);
if(result){
S3FS_PRN_ERR("failed pthread_join - result(%d)", result);
@ -218,13 +197,6 @@ bool ThreadPoolMan::StopThreads()
while(thpoolman_sem.try_wait()){
}
// clear instructions
for(thpoolman_params_t::const_iterator iter = instruction_list.begin(); iter != instruction_list.end(); ++iter){
thpoolman_param* pparam = *iter;
delete pparam;
}
instruction_list.clear();
return true;
}
@ -236,6 +208,8 @@ bool ThreadPoolMan::StartThreads(int count)
}
// stop all thread if they are running.
// cppcheck-suppress unmatchedSuppression
// cppcheck-suppress knownConditionTrueFalse
if(!StopThreads()){
S3FS_PRN_ERR("Failed to stop existed threads.");
return false;
@ -247,7 +221,7 @@ bool ThreadPoolMan::StartThreads(int count)
// run thread
pthread_t thread;
int result;
if(0 != (result = pthread_create(&thread, NULL, ThreadPoolMan::Worker, static_cast<void*>(this)))){
if(0 != (result = pthread_create(&thread, nullptr, ThreadPoolMan::Worker, static_cast<void*>(this)))){
S3FS_PRN_ERR("failed pthread_create with return code(%d)", result);
StopThreads(); // if possible, stop all threads
return false;
@ -257,23 +231,16 @@ bool ThreadPoolMan::StartThreads(int count)
return true;
}
bool ThreadPoolMan::SetInstruction(thpoolman_param* pparam)
void ThreadPoolMan::SetInstruction(const thpoolman_param& param)
{
if(!pparam){
S3FS_PRN_ERR("The parameter value is NULL.");
return false;
}
// set parameter to list
{
AutoLock auto_lock(&thread_list_lock);
instruction_list.push_back(pparam);
instruction_list.push_back(param);
}
// run thread
thpoolman_sem.post();
return true;
}
/*

View File

@ -21,6 +21,10 @@
#ifndef S3FS_THREADPOOLMAN_H_
#define S3FS_THREADPOOLMAN_H_
#include <atomic>
#include <list>
#include <vector>
#include "psemaphore.h"
//------------------------------------------------
@ -36,7 +40,7 @@ typedef void* (*thpoolman_worker)(void*); // same as start_routine
//
// [NOTE]
// The args member is a value that is an argument of the worker function.
// The psem member is allowed NULL. If it is not NULL, the post() method is
// The psem member is allowed nullptr. If it is not nullptr, the post() method is
// called when finishing the function.
//
struct thpoolman_param
@ -45,12 +49,12 @@ struct thpoolman_param
Semaphore* psem;
thpoolman_worker pfunc;
thpoolman_param() : args(NULL), psem(NULL), pfunc(NULL) {}
thpoolman_param() : args(nullptr), psem(nullptr), pfunc(nullptr) {}
};
typedef std::list<thpoolman_param*> thpoolman_params_t;
typedef std::list<thpoolman_param> thpoolman_params_t;
typedef std::list<pthread_t> thread_list_t;
typedef std::vector<pthread_t> thread_list_t;
//------------------------------------------------
// Class ThreadPoolMan
@ -60,7 +64,7 @@ class ThreadPoolMan
private:
static ThreadPoolMan* singleton;
bool is_exit;
std::atomic<bool> is_exit;
Semaphore thpoolman_sem;
bool is_lock_init;
@ -69,26 +73,27 @@ class ThreadPoolMan
thpoolman_params_t instruction_list;
bool is_exit_flag_init;
mutable pthread_mutex_t thread_exit_flag_lock;
private:
static void* Worker(void* arg);
explicit ThreadPoolMan(int count = 1);
~ThreadPoolMan();
ThreadPoolMan(const ThreadPoolMan&) = delete;
ThreadPoolMan(ThreadPoolMan&&) = delete;
ThreadPoolMan& operator=(const ThreadPoolMan&) = delete;
ThreadPoolMan& operator=(ThreadPoolMan&&) = delete;
bool IsExit() const;
void SetExitFlag(bool exit_flag);
bool StopThreads();
bool StartThreads(int count);
bool SetInstruction(thpoolman_param* pparam);
void SetInstruction(const thpoolman_param& pparam);
public:
static bool Initialize(int count);
static void Destroy();
static bool Instruct(thpoolman_param* pparam);
static bool Instruct(const thpoolman_param& pparam);
};
#endif // S3FS_THREADPOOLMAN_H_

View File

@ -40,12 +40,6 @@
#include <sys/xattr.h>
#endif
#if __cplusplus < 201103L
#define OPERATOR_EXPLICIT
#else
#define OPERATOR_EXPLICIT explicit
#endif
//-------------------------------------------------------------------
// xattrs_t
//-------------------------------------------------------------------
@ -54,118 +48,82 @@
// This header is url encoded string which is json formatted.
// x-amz-meta-xattr:urlencode({"xattr-1":"base64(value-1)","xattr-2":"base64(value-2)","xattr-3":"base64(value-3)"})
//
typedef struct xattr_value
{
unsigned char* pvalue;
size_t length;
explicit xattr_value(unsigned char* pval = NULL, size_t len = 0) : pvalue(pval), length(len) {}
~xattr_value()
{
delete[] pvalue;
}
}XATTRVAL, *PXATTRVAL;
typedef std::map<std::string, PXATTRVAL> xattrs_t;
typedef std::map<std::string, std::string> xattrs_t;
//-------------------------------------------------------------------
// acl_t
//-------------------------------------------------------------------
class acl_t{
public:
enum Value{
PRIVATE,
PUBLIC_READ,
PUBLIC_READ_WRITE,
AWS_EXEC_READ,
AUTHENTICATED_READ,
BUCKET_OWNER_READ,
BUCKET_OWNER_FULL_CONTROL,
LOG_DELIVERY_WRITE,
UNKNOWN
};
// cppcheck-suppress noExplicitConstructor
acl_t(Value value) : value_(value) {}
operator Value() const { return value_; }
const char* str() const
{
switch(value_){
case PRIVATE:
return "private";
case PUBLIC_READ:
return "public-read";
case PUBLIC_READ_WRITE:
return "public-read-write";
case AWS_EXEC_READ:
return "aws-exec-read";
case AUTHENTICATED_READ:
return "authenticated-read";
case BUCKET_OWNER_READ:
return "bucket-owner-read";
case BUCKET_OWNER_FULL_CONTROL:
return "bucket-owner-full-control";
case LOG_DELIVERY_WRITE:
return "log-delivery-write";
case UNKNOWN:
return NULL;
}
abort();
}
static acl_t from_str(const char *acl)
{
if(0 == strcmp(acl, "private")){
return PRIVATE;
}else if(0 == strcmp(acl, "public-read")){
return PUBLIC_READ;
}else if(0 == strcmp(acl, "public-read-write")){
return PUBLIC_READ_WRITE;
}else if(0 == strcmp(acl, "aws-exec-read")){
return AWS_EXEC_READ;
}else if(0 == strcmp(acl, "authenticated-read")){
return AUTHENTICATED_READ;
}else if(0 == strcmp(acl, "bucket-owner-read")){
return BUCKET_OWNER_READ;
}else if(0 == strcmp(acl, "bucket-owner-full-control")){
return BUCKET_OWNER_FULL_CONTROL;
}else if(0 == strcmp(acl, "log-delivery-write")){
return LOG_DELIVERY_WRITE;
}else{
return UNKNOWN;
}
}
private:
OPERATOR_EXPLICIT operator bool();
Value value_;
enum class acl_t{
PRIVATE,
PUBLIC_READ,
PUBLIC_READ_WRITE,
AWS_EXEC_READ,
AUTHENTICATED_READ,
BUCKET_OWNER_READ,
BUCKET_OWNER_FULL_CONTROL,
LOG_DELIVERY_WRITE,
UNKNOWN
};
inline const char* str(acl_t value)
{
switch(value){
case acl_t::PRIVATE:
return "private";
case acl_t::PUBLIC_READ:
return "public-read";
case acl_t::PUBLIC_READ_WRITE:
return "public-read-write";
case acl_t::AWS_EXEC_READ:
return "aws-exec-read";
case acl_t::AUTHENTICATED_READ:
return "authenticated-read";
case acl_t::BUCKET_OWNER_READ:
return "bucket-owner-read";
case acl_t::BUCKET_OWNER_FULL_CONTROL:
return "bucket-owner-full-control";
case acl_t::LOG_DELIVERY_WRITE:
return "log-delivery-write";
case acl_t::UNKNOWN:
return nullptr;
}
abort();
}
inline acl_t to_acl(const char *acl)
{
if(0 == strcmp(acl, "private")){
return acl_t::PRIVATE;
}else if(0 == strcmp(acl, "public-read")){
return acl_t::PUBLIC_READ;
}else if(0 == strcmp(acl, "public-read-write")){
return acl_t::PUBLIC_READ_WRITE;
}else if(0 == strcmp(acl, "aws-exec-read")){
return acl_t::AWS_EXEC_READ;
}else if(0 == strcmp(acl, "authenticated-read")){
return acl_t::AUTHENTICATED_READ;
}else if(0 == strcmp(acl, "bucket-owner-read")){
return acl_t::BUCKET_OWNER_READ;
}else if(0 == strcmp(acl, "bucket-owner-full-control")){
return acl_t::BUCKET_OWNER_FULL_CONTROL;
}else if(0 == strcmp(acl, "log-delivery-write")){
return acl_t::LOG_DELIVERY_WRITE;
}else{
return acl_t::UNKNOWN;
}
}
//-------------------------------------------------------------------
// sse_type_t
//-------------------------------------------------------------------
class sse_type_t{
public:
enum Value{
SSE_DISABLE = 0, // not use server side encrypting
SSE_S3, // server side encrypting by S3 key
SSE_C, // server side encrypting by custom key
SSE_KMS // server side encrypting by kms id
};
// cppcheck-suppress noExplicitConstructor
sse_type_t(Value value) : value_(value) {}
operator Value() const { return value_; }
private:
//OPERATOR_EXPLICIT operator bool();
Value value_;
enum class sse_type_t{
SSE_DISABLE = 0, // not use server side encrypting
SSE_S3, // server side encrypting by S3 key
SSE_C, // server side encrypting by custom key
SSE_KMS // server side encrypting by kms id
};
enum signature_type_t {
enum class signature_type_t {
V2_ONLY,
V4_ONLY,
V2_OR_V4
@ -182,7 +140,7 @@ struct etagpair
std::string etag; // expected etag value
int part_num; // part number
explicit etagpair(const char* petag = NULL, int part = -1) : etag(petag ? petag : ""), part_num(part) {}
explicit etagpair(const char* petag = nullptr, int part = -1) : etag(petag ? petag : ""), part_num(part) {}
~etagpair()
{
@ -196,11 +154,13 @@ struct etagpair
}
};
// Requires pointer stability and thus must be a list not a vector
typedef std::list<etagpair> etaglist_t;
struct petagpool
{
std::list<etagpair*> petaglist;
// Requires pointer stability and thus must be a list not a vector
std::list<etagpair> petaglist;
~petagpool()
{
@ -209,19 +169,13 @@ struct petagpool
void clear()
{
for(std::list<etagpair*>::iterator it = petaglist.begin(); petaglist.end() != it; ++it){
if(*it){
delete (*it);
}
}
petaglist.clear();
}
etagpair* add(const etagpair& etag_entity)
{
etagpair* petag = new etagpair(etag_entity);
petaglist.push_back(petag);
return petag;
petaglist.push_back(etag_entity);
return &petaglist.back();
}
};
@ -238,7 +192,7 @@ struct filepart
bool is_copy; // whether is copy multipart
etagpair* petag; // use only parallel upload
explicit filepart(bool is_uploaded = false, int _fd = -1, off_t part_start = 0, off_t part_size = -1, bool is_copy_part = false, etagpair* petagpair = NULL) : uploaded(false), fd(_fd), startpos(part_start), size(part_size), is_copy(is_copy_part), petag(petagpair) {}
explicit filepart(bool is_uploaded = false, int _fd = -1, off_t part_start = 0, off_t part_size = -1, bool is_copy_part = false, etagpair* petagpair = nullptr) : uploaded(false), fd(_fd), startpos(part_start), size(part_size), is_copy(is_copy_part), petag(petagpair) {}
~filepart()
{
@ -253,7 +207,7 @@ struct filepart
startpos = 0;
size = -1;
is_copy = false;
petag = NULL;
petag = nullptr;
}
void add_etag_list(etaglist_t& list, int partnum = -1)
@ -261,7 +215,7 @@ struct filepart
if(-1 == partnum){
partnum = static_cast<int>(list.size()) + 1;
}
list.push_back(etagpair(NULL, partnum));
list.push_back(etagpair(nullptr, partnum));
petag = &list.back();
}
@ -279,7 +233,7 @@ struct filepart
}
};
typedef std::list<filepart> filepart_list_t;
typedef std::vector<filepart> filepart_list_t;
//
// Each part information for Untreated parts
@ -337,7 +291,7 @@ struct untreatedpart
}
};
typedef std::list<untreatedpart> untreated_list_t;
typedef std::vector<untreatedpart> untreated_list_t;
//
// Information on each part of multipart upload
@ -351,7 +305,7 @@ struct mp_part
explicit mp_part(off_t set_start = 0, off_t set_size = 0, int part = 0) : start(set_start), size(set_size), part_num(part) {}
};
typedef std::list<struct mp_part> mp_part_list_t;
typedef std::vector<struct mp_part> mp_part_list_t;
inline off_t total_mp_part_list(const mp_part_list_t& mplist)
{
@ -362,6 +316,23 @@ inline off_t total_mp_part_list(const mp_part_list_t& mplist)
return size;
}
//
// Rename directory struct
//
struct mvnode
{
mvnode(std::string old_path, std::string new_path, bool is_dir, bool is_normdir)
: old_path(std::move(old_path))
, new_path(std::move(new_path))
, is_dir(is_dir)
, is_normdir(is_normdir)
{}
std::string old_path;
std::string new_path;
bool is_dir;
bool is_normdir;
};
//-------------------------------------------------------------------
// mimes_t
//-------------------------------------------------------------------
@ -376,7 +347,7 @@ typedef std::map<std::string, std::string, case_insensitive_compare_func> mimes_
//-------------------------------------------------------------------
// Typedefs specialized for use
//-------------------------------------------------------------------
typedef std::list<std::string> readline_t;
typedef std::vector<std::string> readline_t;
typedef std::map<std::string, std::string> kvmap_t;
typedef std::map<std::string, kvmap_t> bucketkvmap_t;

View File

@ -36,11 +36,20 @@ noinst_PROGRAMS = \
truncate_read_file \
cr_filename
junk_data_SOURCES = junk_data.c
junk_data_SOURCES = junk_data.cc
write_multiblock_SOURCES = write_multiblock.cc
mknod_test_SOURCES = mknod_test.c
truncate_read_file_SOURCES = truncate_read_file.c
cr_filename_SOURCES = cr_filename.c
mknod_test_SOURCES = mknod_test.cc
truncate_read_file_SOURCES = truncate_read_file.cc
cr_filename_SOURCES = cr_filename.cc
clang-tidy:
clang-tidy \
$(junk_data_SOURCES) \
$(write_multiblock_SOURCES) \
$(mknod_test_SOURCES) \
$(truncate_read_file_SOURCES) \
$(cr_filename_SOURCES) \
-- $(DEPS_CFLAGS) $(CPPFLAGS)
#
# Local variables:

63
test/compile_all_targets.sh Executable file
View File

@ -0,0 +1,63 @@
#!/bin/bash
#
# s3fs - FUSE-based file system backed by Amazon S3
#
# Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
set -o errexit
set -o nounset
set -o pipefail
COMMON_FLAGS='-O -Wall -Werror'
make clean
CXXFLAGS="$COMMON_FLAGS" ./configure --with-gnutls
make --jobs "$(nproc)"
make clean
CXXFLAGS="$COMMON_FLAGS" ./configure --with-gnutls --with-nettle
make --jobs "$(nproc)"
make clean
CXXFLAGS="$COMMON_FLAGS" ./configure --with-nss
make --jobs "$(nproc)"
make clean
CXXFLAGS="$COMMON_FLAGS" ./configure --with-openssl
make --jobs "$(nproc)"
make clean
CXXFLAGS="$COMMON_FLAGS -std=c++23" ./configure
make --jobs "$(nproc)"
make clean
CXXFLAGS="$COMMON_FLAGS -m32" ./configure
make --jobs "$(nproc)"
make clean
CXX=clang++ CXXFLAGS="$COMMON_FLAGS -Wshorten-64-to-32" ./configure
make --jobs "$(nproc)"
#
# Local variables:
# tab-width: 4
# c-basic-offset: 4
# End:
# vim600: expandtab sw=4 ts=4 fdm=marker
# vim<600: expandtab sw=4 ts=4
#

View File

@ -18,8 +18,8 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <stdio.h>
#include <stdlib.h>
#include <cstdio>
#include <cstdlib>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
@ -31,7 +31,7 @@
// This program truncates the file and reads the file in another process
// between truncate and flush(close file).
//
int main(int argc, char *argv[])
int main(int argc, const char *argv[])
{
if(argc != 2){
fprintf(stderr, "[ERROR] Wrong paraemters\n");
@ -41,7 +41,8 @@ int main(int argc, char *argv[])
int fd;
char filepath[4096];
sprintf(filepath, "%s\r", argv[1]);
snprintf(filepath, sizeof(filepath), "%s\r", argv[1]);
filepath[sizeof(filepath) - 1] = '\0'; // for safety
// create empty file
if(-1 == (fd = open(filepath, O_CREAT|O_RDWR, 0644))){

View File

@ -81,29 +81,29 @@ while read -r line; do
if [ "${prev_line_type}" -eq 1 ]; then
if [ "${number_type[1]}" -eq 2 ]; then
# if passed, cut s3fs information messages
head "-${head_line_cnt}" "${SUITELOG}" | tail "-${tail_line_cnt}" | grep -v -e '[0-9]\+\%' | grep -v -e '^s3fs: ' -a -e '\[INF\]'
head "-${head_line_cnt}" "${SUITELOG}" | tail "-${tail_line_cnt}" | grep -v -e '[0-9]\+%' | grep -v -e '^s3fs: ' -a -e '\[INF\]'
elif [ "${number_type[1]}" -eq 3 ]; then
# if failed, print all
head "-${head_line_cnt}" "${SUITELOG}" | tail "-${tail_line_cnt}" | grep -v -e '[0-9]\+\%'
head "-${head_line_cnt}" "${SUITELOG}" | tail "-${tail_line_cnt}" | grep -v -e '[0-9]\+%'
else
# there is start keyword but not end keyword, so print all
head "-${head_line_cnt}" "${SUITELOG}" | tail "-${tail_line_cnt}" | grep -v -e '[0-9]\+\%'
head "-${head_line_cnt}" "${SUITELOG}" | tail "-${tail_line_cnt}" | grep -v -e '[0-9]\+%'
fi
elif [ "${prev_line_type}" -eq 2 ] || [ "${prev_line_type}" -eq 3 ]; then
if [ "${number_type[1]}" -eq 2 ] || [ "${number_type[1]}" -eq 3 ]; then
# previous is end of chmpx, but this type is end of chmpx without start keyword. then print all
head "-${head_line_cnt}" "${SUITELOG}" | tail "-${tail_line_cnt}" | grep -v -e '[0-9]\+\%'
head "-${head_line_cnt}" "${SUITELOG}" | tail "-${tail_line_cnt}" | grep -v -e '[0-9]\+%'
else
# this area is not from start to end, cut s3fs information messages
head "-${head_line_cnt}" "${SUITELOG}" | tail "-${tail_line_cnt}" | grep -v -e '[0-9]\+\%' | grep -v -e '^s3fs: ' -a -e '\[INF\]'
head "-${head_line_cnt}" "${SUITELOG}" | tail "-${tail_line_cnt}" | grep -v -e '[0-9]\+%' | grep -v -e '^s3fs: ' -a -e '\[INF\]'
fi
else
if [ "${number_type[1]}" -eq 2 ] || [ "${number_type[1]}" -eq 3 ]; then
# previous is normal, but this type is end of chmpx without start keyword. then print all
head "-${head_line_cnt}" "${SUITELOG}" | tail "-${tail_line_cnt}" | grep -v -e '[0-9]\+\%'
head "-${head_line_cnt}" "${SUITELOG}" | tail "-${tail_line_cnt}" | grep -v -e '[0-9]\+%'
else
# this area is normal, cut s3fs information messages
head "-${head_line_cnt}" "${SUITELOG}" | tail "-${tail_line_cnt}" | grep -v -e '[0-9]\+\%' | grep -v -e '^s3fs: ' -a -e '\[INF\]'
head "-${head_line_cnt}" "${SUITELOG}" | tail "-${tail_line_cnt}" | grep -v -e '[0-9]\+%' | grep -v -e '^s3fs: ' -a -e '\[INF\]'
fi
fi
if [ "${number_type[1]}" -eq 3 ]; then
@ -121,9 +121,9 @@ file_line_cnt=$(wc -l "${SUITELOG}" | awk '{print $1}')
tail_line_cnt=$((file_line_cnt - prev_line_number))
if [ "${prev_line_type}" -eq 1 ]; then
tail "-${tail_line_cnt}" "${SUITELOG}" | grep -v -e '[0-9]\+\%'
tail "-${tail_line_cnt}" "${SUITELOG}" | grep -v -e '[0-9]\+%'
else
tail "-${tail_line_cnt}" "${SUITELOG}" | grep -v -e '[0-9]\+\%' | grep -v -e '^s3fs: ' -a -e '\[INF\]'
tail "-${tail_line_cnt}" "${SUITELOG}" | grep -v -e '[0-9]\+%' | grep -v -e '^s3fs: ' -a -e '\[INF\]'
fi
#

View File

@ -168,7 +168,7 @@ function start_s3proxy {
if [ -z "${CHAOS_HTTP_PROXY}" ] && [ -z "${CHAOS_HTTP_PROXY_OPT}" ]; then
S3PROXY_CACERT_FILE="/tmp/keystore.pem"
rm -f /tmp/keystore.jks "${S3PROXY_CACERT_FILE}"
echo -e 'password\npassword\n\n\n\n\n\n\nyes' | keytool -genkey -keystore /tmp/keystore.jks -keyalg RSA -keysize 2048 -validity 365 -ext SAN=IP:127.0.0.1
printf 'password\npassword\n\n\n\n\n\n\ny' | keytool -genkey -keystore /tmp/keystore.jks -keyalg RSA -keysize 2048 -validity 365 -ext SAN=IP:127.0.0.1
echo password | keytool -exportcert -keystore /tmp/keystore.jks -rfc -file "${S3PROXY_CACERT_FILE}"
else
S3PROXY_CACERT_FILE=""
@ -229,8 +229,16 @@ function start_s3fs {
fi
# On OSX only, we need to specify the direct_io and auto_cache flag.
#
# And Turn off creation and reference of spotlight index.
# (Leaving spotlight ON will result in a lot of wasted requests,
# which will affect test execution time)
#
if [ "$(uname)" = "Darwin" ]; then
local DIRECT_IO_OPT="-o direct_io -o auto_cache"
# disable spotlight
sudo mdutil -a -i off
else
local DIRECT_IO_OPT=""
fi
@ -248,14 +256,23 @@ function start_s3fs {
fi
# [NOTE]
# On macos, running s3fs via stdbuf will result in no response.
# Therefore, when it is macos, it is not executed via stdbuf.
# This patch may be temporary, but no other method has been found at this time.
# For macos fuse-t, we need to specify the "noattrcache" option to
# disable NFS caching.
#
if [ "$(uname)" = "Darwin" ]; then
local VIA_STDBUF_CMDLINE=""
local FUSE_T_ATTRCACHE_OPT="-o noattrcache"
else
local VIA_STDBUF_CMDLINE="${STDBUF_BIN} -oL -eL"
local FUSE_T_ATTRCACHE_OPT=""
fi
# [NOTE]
# On macOS we may get a VERIFY error for the self-signed certificate used by s3proxy.
# We can specify NO_CHECK_CERT=1 to avoid this.
#
if [ -n "${NO_CHECK_CERT}" ] && [ "${NO_CHECK_CERT}" -eq 1 ]; then
local NO_CHECK_CERT_OPT="-o no_check_certificate"
else
local NO_CHECK_CERT_OPT=""
fi
# Common s3fs options:
@ -279,7 +296,7 @@ function start_s3fs {
(
set -x
CURL_CA_BUNDLE="${S3PROXY_CACERT_FILE}" \
${VIA_STDBUF_CMDLINE} \
${STDBUF_BIN} -oL -eL \
${VALGRIND_EXEC} \
${S3FS} \
${TEST_BUCKET_1} \
@ -292,6 +309,8 @@ function start_s3fs {
${AUTH_OPT} \
${DIRECT_IO_OPT} \
${S3FS_HTTP_PROXY_OPT} \
${NO_CHECK_CERT_OPT} \
${FUSE_T_ATTRCACHE_OPT} \
-o stat_cache_expire=1 \
-o stat_cache_interval_expire=1 \
-o dbglevel="${DBGLEVEL:=info}" \
@ -309,15 +328,15 @@ function start_s3fs {
if [ "$(uname)" = "Darwin" ]; then
local TRYCOUNT=0
while [ "${TRYCOUNT}" -le "${RETRIES:=20}" ]; do
df | grep -q "${TEST_BUCKET_MOUNT_POINT_1}"
rc=$?
if [ "${rc}" -eq 0 ]; then
_DF_RESULT=$(df 2>/dev/null)
if echo "${_DF_RESULT}" | grep -q "${TEST_BUCKET_MOUNT_POINT_1}"; then
break;
fi
sleep 1
TRYCOUNT=$((TRYCOUNT + 1))
done
if [ "${rc}" -ne 0 ]; then
if [ "${TRYCOUNT}" -gt "${RETRIES}" ]; then
echo "Waited ${TRYCOUNT} seconds, but it could not be mounted."
exit 1
fi
else

View File

@ -320,11 +320,7 @@ function test_chown {
mk_test_file
local ORIGINAL_PERMISSIONS
if [ "$(uname)" = "Darwin" ]; then
ORIGINAL_PERMISSIONS=$(stat -f "%u:%g" "${TEST_TEXT_FILE}")
else
ORIGINAL_PERMISSIONS=$(stat --format=%u:%g "${TEST_TEXT_FILE}")
fi
ORIGINAL_PERMISSIONS=$(get_user_and_group "${TEST_TEXT_FILE}")
# [NOTE]
# Prevents test interruptions due to permission errors, etc.
@ -337,11 +333,7 @@ function test_chown {
# if they're the same, we have a problem.
local CHANGED_PERMISSIONS
if [ "$(uname)" = "Darwin" ]; then
CHANGED_PERMISSIONS=$(stat -f "%u:%g" "${TEST_TEXT_FILE}")
else
CHANGED_PERMISSIONS=$(stat --format=%u:%g "${TEST_TEXT_FILE}")
fi
CHANGED_PERMISSIONS=$(get_user_and_group "${TEST_TEXT_FILE}")
if [ "${CHANGED_PERMISSIONS}" = "${ORIGINAL_PERMISSIONS}" ]
then
if [ "${ORIGINAL_PERMISSIONS}" = "1000:1000" ]
@ -366,6 +358,7 @@ function test_list {
local file_cnt=${#file_list[@]}
if [ "${file_cnt}" -ne 2 ]; then
echo "Expected 2 file but got ${file_cnt}"
echo "Files: " "${file_list[@]}"
return 1
fi
@ -389,7 +382,9 @@ function test_external_directory_creation {
describe "Test external directory creation ..."
local OBJECT_NAME; OBJECT_NAME=$(basename "${PWD}")/directory/"${TEST_TEXT_FILE}"
echo "data" | aws_cli s3 cp - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}"
ls directory >/dev/null 2>&1
# shellcheck disable=SC2010
ls | grep -q directory
stat directory >/dev/null 2>&1
get_permissions directory | grep -q 750$
ls directory
cmp <(echo "data") directory/"${TEST_TEXT_FILE}"
@ -412,6 +407,7 @@ function test_external_modification {
local OBJECT_NAME; OBJECT_NAME=$(basename "${PWD}")/"${TEST_TEXT_FILE}"
echo "new new" | aws_cli s3 cp - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}"
cmp "${TEST_TEXT_FILE}" <(echo "new new")
rm -f "${TEST_TEXT_FILE}"
}
@ -431,8 +427,10 @@ function test_external_creation {
#
echo "data" | aws_cli s3 cp - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}"
sleep 1
wait_ostype 1
[ -e "${TEST_TEXT_FILE}" ]
rm -f "${TEST_TEXT_FILE}"
}
@ -647,9 +645,6 @@ function test_multipart_copy {
function test_multipart_mix {
describe "Testing multi-part mix ..."
if [ "$(uname)" = "Darwin" ]; then
cat /dev/null > "${BIG_FILE}"
fi
../../junk_data $((BIG_FILE_BLOCK_SIZE * BIG_FILE_COUNT)) > "${TEMP_DIR}/${BIG_FILE}"
dd if="${TEMP_DIR}/${BIG_FILE}" of="${BIG_FILE}" bs="${BIG_FILE_BLOCK_SIZE}" count="${BIG_FILE_COUNT}"
@ -721,10 +716,29 @@ function test_multipart_mix {
return 1
fi
# [NOTE]
# For macos, in order to free up disk space for statvfs (or df command),
# it is necessary to zero out the file size, delete it, and sync it.
# In the case of macos, even if you delete a file, there seems to be a
# delay in the free space being reflected.
# Testing the ensure_diskfree option shows that if this is not done, free
# disk space will be exhausted.
#
if [ "$(uname)" = "Darwin" ]; then
cat /dev/null > "${TEMP_DIR}/${BIG_FILE}"
cat /dev/null > "${TEMP_DIR}/${BIG_FILE}-mix"
cat /dev/null > "${BIG_FILE}"
cat /dev/null > "${BIG_FILE}-mix"
fi
rm -f "${TEMP_DIR}/${BIG_FILE}"
rm -f "${TEMP_DIR}/${BIG_FILE}-mix"
rm_test_file "${BIG_FILE}"
rm_test_file "${BIG_FILE}-mix"
if [ "$(uname)" = "Darwin" ]; then
sync
fi
}
function test_utimens_during_multipart {
@ -770,8 +784,18 @@ function test_hardlink {
echo foo > "${TEST_TEXT_FILE}"
(
set +o pipefail
ln "${TEST_TEXT_FILE}" "${ALT_TEST_TEXT_FILE}" 2>&1 | grep -q -e 'Operation not supported' -e 'Not supported'
if ! uname | grep -q Darwin; then
set +o pipefail
ln "${TEST_TEXT_FILE}" "${ALT_TEST_TEXT_FILE}" 2>&1 | grep -q -e 'Operation not supported' -e 'Not supported'
else
# [macos] fuse-t
# Not error return code, and no stderr
#
ln "${TEST_TEXT_FILE}" "${ALT_TEST_TEXT_FILE}"
if stat "${ALT_TEST_TEXT_FILE}" >/dev/null 2>&1; then
exit 1
fi
fi
)
rm_test_file
@ -813,9 +837,18 @@ function test_extended_attributes {
touch "${TEST_TEXT_FILE}"
# set value
set_xattr key1 value0 "${TEST_TEXT_FILE}"
get_xattr key1 "${TEST_TEXT_FILE}" | grep -q '^value0$'
# over write value
set_xattr key1 value1 "${TEST_TEXT_FILE}"
get_xattr key1 "${TEST_TEXT_FILE}" | grep -q '^value1$'
# [NOTE]
# macOS still caches extended attributes even when told not to.
# Thus we need to wait one second here.
wait_ostype 1 "Darwin"
# append value
set_xattr key2 value2 "${TEST_TEXT_FILE}"
get_xattr key1 "${TEST_TEXT_FILE}" | grep -q '^value1$'
@ -858,27 +891,7 @@ function test_mtime_file {
local altatime; altatime=$(get_atime "${ALT_TEST_TEXT_FILE}")
if [ "${testmtime}" != "${altmtime}" ] || [ "${testctime}" = "${altctime}" ] || [ "${testatime}" != "${altatime}" ]; then
# [NOTE]{FIXME]
# On macos10, the mtime of the file copied by "cp -p" is
# truncated to usec from nsec, and it cannot be solved.
# This is because the timespec.tv_sec value of the mtime
# of the original file is truncated in usec units at calling
# s3fs_utimens.
# (ex. "1658768609.505917125" vs "1658768609.505917000")
# Now this workaround is not found, so for macos compare
# mtime with only usec.
#
if ! uname | grep -q Darwin; then
echo "cp(-p) expected times: mtime( ${testmtime} == ${altmtime} ), ctime( ${testctime} != ${altctime} ), atime( ${testatime} == ${altatime} )"
return 1
else
testmtime=$(echo "${testmtime}" | cut -c 1-17)
altmtime=$(echo "${altmtime}" | cut -c 1-17)
if [ "${testmtime}" != "${altmtime}" ] || [ "${testctime}" = "${altctime}" ] || [ "${testatime}" != "${altatime}" ]; then
echo "cp(-p) expected times: mtime( ${testmtime} == ${altmtime} ), ctime( ${testctime} != ${altctime} ), atime( ${testatime} == ${altatime} )"
return 1
fi
fi
echo "cp(-p) expected times: mtime( ${testmtime} == ${altmtime} ), ctime( ${testctime} != ${altctime} ), atime( ${testatime} == ${altatime} )"
fi
rm_test_file
@ -938,13 +951,35 @@ function test_update_time_chown() {
local base_ctime; base_ctime=$(get_ctime "${TEST_TEXT_FILE}")
local base_mtime; base_mtime=$(get_mtime "${TEST_TEXT_FILE}")
chown $UID "${TEST_TEXT_FILE}"
# [NOTE]
# In this test, chown is called with the same UID.
#
chown "${UID}" "${TEST_TEXT_FILE}"
local atime; atime=$(get_atime "${TEST_TEXT_FILE}")
local ctime; ctime=$(get_ctime "${TEST_TEXT_FILE}")
local mtime; mtime=$(get_mtime "${TEST_TEXT_FILE}")
if [ "${base_atime}" != "${atime}" ] || [ "${base_ctime}" = "${ctime}" ] || [ "${base_mtime}" != "${mtime}" ]; then
echo "chown expected updated ctime: $base_ctime != $ctime and same mtime: $base_mtime == $mtime, atime: $base_atime == $atime"
return 1
if ! uname | grep -q Darwin; then
if [ "${base_atime}" != "${atime}" ] || [ "${base_ctime}" = "${ctime}" ] || [ "${base_mtime}" != "${mtime}" ]; then
echo "chown expected updated ctime: $base_ctime != $ctime and same mtime: $base_mtime == $mtime, atime: $base_atime == $atime"
return 1
fi
else
# [FIXME] macos fuse-t
# macos fuse-t doesn't update stat if UID doesn't change.
# There is a way to specify "uid=1000" with aws cli and use sudo when chown is executed, but the
# test is not finished.
# For now, we are just leaving the chown call with the same UID as the parameter.
# This test will be fixed in the future.
if [ "${base_atime}" = "${atime}" ] || [ "${base_ctime}" = "${ctime}" ] || [ "${base_mtime}" = "${mtime}" ]; then
if [ "${base_atime}" = "${atime}" ] && [ "${base_ctime}" = "${ctime}" ] && [ "${base_mtime}" = "${mtime}" ]; then
echo "[FIXME] Doing a temporary test bypass : same ctime $base_ctime = $ctime and same mtime: $base_mtime = $mtime and same atime: $base_atime = $atime"
else
echo "chown expected updated ctime: $base_ctime != $ctime and same mtime: $base_mtime != $mtime, atime: $base_atime != $atime"
return 1
fi
fi
fi
rm_test_file
}
@ -1011,12 +1046,24 @@ function test_update_time_touch_a() {
# "touch -a" -> update ctime/atime, not update mtime
#
touch -a "${TEST_TEXT_FILE}"
local atime; atime=$(get_atime "${TEST_TEXT_FILE}")
local ctime; ctime=$(get_ctime "${TEST_TEXT_FILE}")
local mtime; mtime=$(get_mtime "${TEST_TEXT_FILE}")
if [ "${base_atime}" = "${atime}" ] || [ "${base_ctime}" = "${ctime}" ] || [ "${base_mtime}" != "${mtime}" ]; then
echo "touch with -a option expected updated ctime: $base_ctime != $ctime, atime: $base_atime != $atime and same mtime: $base_mtime == $mtime"
return 1
if ! uname | grep -q Darwin; then
if [ "${base_atime}" = "${atime}" ] || [ "${base_ctime}" = "${ctime}" ] || [ "${base_mtime}" != "${mtime}" ]; then
echo "touch with -a option expected updated ctime: $base_ctime != $ctime, atime: $base_atime != $atime and same mtime: $base_mtime == $mtime"
return 1
fi
else
# [macos] fuse-t
# atime/ctime/mtime are all updated.
#
if [ "${base_atime}" = "${atime}" ] || [ "${base_ctime}" = "${ctime}" ] || [ "${base_mtime}" = "${mtime}" ]; then
echo "touch with -a option expected updated ctime: $base_ctime != $ctime, atime: $base_atime != $atime and same mtime: $base_mtime != $mtime"
return 1
fi
fi
rm_test_file
}
@ -1067,6 +1114,8 @@ function test_update_time_cp_p() {
echo "cp with -p option expected updated ctime: $base_ctime != $ctime and same mtime: $base_mtime == $mtime, atime: $base_atime == $atime"
return 1
fi
rm_test_file
rm_test_file "${TIME_TEST_TEXT_FILE}"
}
function test_update_time_mv() {
@ -1146,9 +1195,20 @@ function test_update_directory_time_chown {
local atime; atime=$(get_atime "${TEST_DIR}")
local ctime; ctime=$(get_ctime "${TEST_DIR}")
local mtime; mtime=$(get_mtime "${TEST_DIR}")
if [ "${base_atime}" != "${atime}" ] || [ "${base_ctime}" = "${ctime}" ] || [ "${base_mtime}" != "${mtime}" ]; then
echo "chown expected updated ctime: $base_ctime != $ctime and same mtime: $base_mtime == $mtime, atime: $base_atime == $atime"
return 1
if ! uname | grep -q Darwin; then
if [ "${base_atime}" != "${atime}" ] || [ "${base_ctime}" = "${ctime}" ] || [ "${base_mtime}" != "${mtime}" ]; then
echo "chown expected updated ctime: $base_ctime != $ctime and same mtime: $base_mtime == $mtime, atime: $base_atime == $atime"
return 1
fi
else
# [macos] fuse-t
# atime/ctime/mtime are not updated.
#
if [ "${base_atime}" != "${atime}" ] || [ "${base_ctime}" != "${ctime}" ] || [ "${base_mtime}" != "${mtime}" ]; then
echo "touch with -a option expected updated ctime: $base_ctime == $ctime, atime: $base_atime == $atime and same mtime: $base_mtime == $mtime"
return 1
fi
fi
rm -rf "${TEST_DIR}"
@ -1171,9 +1231,20 @@ function test_update_directory_time_set_xattr {
local atime; atime=$(get_atime "${TEST_DIR}")
local ctime; ctime=$(get_ctime "${TEST_DIR}")
local mtime; mtime=$(get_mtime "${TEST_DIR}")
if [ "${base_atime}" != "${atime}" ] || [ "${base_ctime}" = "${ctime}" ] || [ "${base_mtime}" != "${mtime}" ]; then
echo "set_xattr expected updated ctime: $base_ctime != $ctime and same mtime: $base_mtime == $mtime, atime: $base_atime == $atime"
return 1
if ! uname | grep -q Darwin; then
if [ "${base_atime}" != "${atime}" ] || [ "${base_ctime}" = "${ctime}" ] || [ "${base_mtime}" != "${mtime}" ]; then
echo "set_xattr expected updated ctime: $base_ctime != $ctime and same mtime: $base_mtime == $mtime, atime: $base_atime == $atime"
return 1
fi
else
# [macos] fuse-t
# atime/mtime are not updated.
#
if [ "${base_atime}" != "${atime}" ] || [ "${base_ctime}" = "${ctime}" ] || [ "${base_mtime}" != "${mtime}" ]; then
echo "set_xattr expected updated ctime: $base_ctime != $ctime and same mtime: $base_mtime == $mtime, atime: $base_atime == $atime"
return 1
fi
fi
rm -rf "${TEST_DIR}"
@ -1221,9 +1292,20 @@ function test_update_directory_time_touch_a {
local atime; atime=$(get_atime "${TEST_DIR}")
local ctime; ctime=$(get_ctime "${TEST_DIR}")
local mtime; mtime=$(get_mtime "${TEST_DIR}")
if [ "${base_atime}" = "${atime}" ] || [ "${base_ctime}" = "${ctime}" ] || [ "${base_mtime}" != "${mtime}" ]; then
echo "touch with -a option expected updated ctime: $base_ctime != $ctime, atime: $base_atime != $atime and same mtime: $base_mtime == $mtime"
return 1
if ! uname | grep -q Darwin; then
if [ "${base_atime}" = "${atime}" ] || [ "${base_ctime}" = "${ctime}" ] || [ "${base_mtime}" != "${mtime}" ]; then
echo "touch with -a option expected updated ctime: $base_ctime != $ctime, atime: $base_atime != $atime and same mtime: $base_mtime == $mtime"
return 1
fi
else
# [macos] fuse-t
# atime/ctime/mtime are all updated.
#
if [ "${base_atime}" = "${atime}" ] || [ "${base_ctime}" = "${ctime}" ] || [ "${base_mtime}" = "${mtime}" ]; then
echo "touch with -a option expected updated ctime: $base_ctime != $ctime, atime: $base_atime != $atime and same mtime: $base_mtime != $mtime"
return 1
fi
fi
rm -rf "${TEST_DIR}"
@ -1334,7 +1416,8 @@ function test_update_parent_directory_time_sub() {
local TEST_PARENTDIR_DIR_MV="${TEST_PARENTDIR_PARENT}/testdir2"
#
# Create file -> Update parent directory's mtime/ctime
# Create file -> Darwin: Not update any
# -> Others: Update parent directory's mtime/ctime
#
local base_atime; base_atime=$(get_atime "${TEST_PARENTDIR_PARENT}")
local base_ctime; base_ctime=$(get_ctime "${TEST_PARENTDIR_PARENT}")
@ -1864,12 +1947,28 @@ function test_concurrent_reads {
function test_concurrent_writes {
describe "Test concurrent writes to a file ..."
../../junk_data $((BIG_FILE_BLOCK_SIZE * BIG_FILE_COUNT)) > "${TEST_TEXT_FILE}"
for _ in $(seq 10); do
NUM_PROCS=10
PIDS=()
for _ in $(seq "${NUM_PROCS}"); do
dd if=/dev/zero of="${TEST_TEXT_FILE}" seek=$((RANDOM % BIG_FILE_LENGTH)) count=16 bs=1024 conv=notrunc &
PIDS+=($!)
done
GRC=0
for PID in "${PIDS[@]}"; do
wait "${PID}"
RC=$?
[ $RC -ne 0 ] && GRC="${RC}"
done
wait
rm_test_file
if [ "${GRC}" -ne 0 ]; then
echo "unexpected return code: $GRC"
return 1
fi
}
function test_open_second_fd {
@ -1913,6 +2012,7 @@ function test_clean_up_cache() {
local file_cnt="${#file_list[@]}"
if [ "${file_cnt}" != "${count}" ]; then
echo "Expected $count files but got ${file_cnt}"
echo "Files: " "${file_list[@]}"
rm -rf "${dir}"
return 1
fi
@ -1931,32 +2031,16 @@ function test_content_type() {
local DIR_NAME; DIR_NAME=$(basename "${PWD}")
touch "test.txt"
local CONTENT_TYPE; CONTENT_TYPE=$(aws_cli s3api head-object --bucket "${TEST_BUCKET_1}" --key "${DIR_NAME}/test.txt" | grep "ContentType")
if ! echo "${CONTENT_TYPE}" | grep -q "text/plain"; then
echo "Unexpected Content-Type: ${CONTENT_TYPE}"
return 1;
fi
check_content_type "${DIR_NAME}/test.txt" "text/plain"
touch "test.jpg"
CONTENT_TYPE=$(aws_cli s3api head-object --bucket "${TEST_BUCKET_1}" --key "${DIR_NAME}/test.jpg" | grep "ContentType")
if ! echo "${CONTENT_TYPE}" | grep -q "image/jpeg"; then
echo "Unexpected Content-Type: ${CONTENT_TYPE}"
return 1;
fi
check_content_type "${DIR_NAME}/test.jpg" "image/jpeg"
touch "test.bin"
CONTENT_TYPE=$(aws_cli s3api head-object --bucket "${TEST_BUCKET_1}" --key "${DIR_NAME}/test.bin" | grep "ContentType")
if ! echo "${CONTENT_TYPE}" | grep -q "application/octet-stream"; then
echo "Unexpected Content-Type: ${CONTENT_TYPE}"
return 1;
fi
check_content_type "${DIR_NAME}/test.bin" "application/octet-stream"
mkdir "test.dir"
CONTENT_TYPE=$(aws_cli s3api head-object --bucket "${TEST_BUCKET_1}" --key "${DIR_NAME}/test.dir/" | grep "ContentType")
if ! echo "${CONTENT_TYPE}" | grep -q "application/x-directory"; then
echo "Unexpected Content-Type: ${CONTENT_TYPE}"
return 1;
fi
check_content_type "${DIR_NAME}/test.dir/" "application/x-directory"
rm -f test.txt
rm -f test.jpg
@ -1994,8 +2078,7 @@ function test_cache_file_stat() {
# get cache file inode number
#
local CACHE_FILE_INODE
# shellcheck disable=SC2012
CACHE_FILE_INODE=$(ls -i "${CACHE_DIR}/${TEST_BUCKET_1}/${CACHE_TESTRUN_DIR}/${BIG_FILE}" 2>/dev/null | awk '{print $1}')
CACHE_FILE_INODE=$(get_inode "${CACHE_DIR}/${TEST_BUCKET_1}/${CACHE_TESTRUN_DIR}/${BIG_FILE}")
if [ -z "${CACHE_FILE_INODE}" ]; then
echo "Not found cache file or failed to get inode: ${CACHE_DIR}/${TEST_BUCKET_1}/${CACHE_TESTRUN_DIR}/${BIG_FILE}"
return 1;
@ -2038,8 +2121,7 @@ function test_cache_file_stat() {
#
# get cache file inode number
#
# shellcheck disable=SC2012
CACHE_FILE_INODE=$(ls -i "${CACHE_DIR}/${TEST_BUCKET_1}/${CACHE_TESTRUN_DIR}/${BIG_FILE}" 2>/dev/null | awk '{print $1}')
CACHE_FILE_INODE=$(get_inode "${CACHE_DIR}/${TEST_BUCKET_1}/${CACHE_TESTRUN_DIR}/${BIG_FILE}")
if [ -z "${CACHE_FILE_INODE}" ]; then
echo "Not found cache file or failed to get inode: ${CACHE_DIR}/${TEST_BUCKET_1}/${CACHE_TESTRUN_DIR}/${BIG_FILE}"
return 1;
@ -2184,11 +2266,7 @@ function test_ensurespace_move_file() {
# Backup file stat
#
local ORIGINAL_PERMISSIONS
if [ "$(uname)" = "Darwin" ]; then
ORIGINAL_PERMISSIONS=$(stat -f "%u:%g" "${CACHE_DIR}/.s3fs_test_tmpdir/${BIG_FILE}")
else
ORIGINAL_PERMISSIONS=$(stat --format=%u:%g "${CACHE_DIR}/.s3fs_test_tmpdir/${BIG_FILE}")
fi
ORIGINAL_PERMISSIONS=$(get_user_and_group "${CACHE_DIR}/.s3fs_test_tmpdir/${BIG_FILE}")
#
# Fill the disk size
@ -2214,14 +2292,9 @@ function test_ensurespace_move_file() {
# file stat
#
local MOVED_PERMISSIONS
if [ "$(uname)" = "Darwin" ]; then
MOVED_PERMISSIONS=$(stat -f "%u:%g" "${BIG_FILE}")
else
MOVED_PERMISSIONS=$(stat --format=%u:%g "${BIG_FILE}")
fi
MOVED_PERMISSIONS=$(get_user_and_group "${BIG_FILE}")
local MOVED_FILE_LENGTH
# shellcheck disable=SC2012
MOVED_FILE_LENGTH=$(ls -l "${BIG_FILE}" | awk '{print $5}')
MOVED_FILE_LENGTH=$(get_size "${BIG_FILE}")
#
# check
@ -2252,142 +2325,61 @@ function test_not_existed_dir_obj() {
echo data1 | aws_cli s3 cp - "s3://${TEST_BUCKET_1}/${OBJECT_NAME_1}"
echo data2 | aws_cli s3 cp - "s3://${TEST_BUCKET_1}/${OBJECT_NAME_2}"
# shellcheck disable=SC2009
if ps u -p "${S3FS_PID}" | grep -q compat_dir; then
#
# with "compat_dir", found directories and files
#
# Top directory
# shellcheck disable=SC2010
if ! ls -1 | grep -q '^not_existed_dir_single$'; then
echo "Expect to find \"not_existed_dir_single\" directory, but it is not found"
return 1;
fi
# shellcheck disable=SC2010
if ! ls -1 | grep -q '^not_existed_dir_parent$'; then
echo "Expect to find \"not_existed_dir_parent\" directory, but it is not found"
return 1;
fi
# Single nest directory
# shellcheck disable=SC2010
if ! ls -d not_existed_dir_single | grep -q '^not_existed_dir_single$'; then
echo "Expect to find \"not_existed_dir_single\" directory, but it is not found"
return 1;
fi
# shellcheck disable=SC2010
if ! ls -1 not_existed_dir_single | grep -q "^${TEST_TEXT_FILE}\$"; then
echo "Expect to find \"not_existed_dir_single/${TEST_TEXT_FILE}\" file, but it is not found"
return 1;
fi
# shellcheck disable=SC2010
if ! ls -1 "not_existed_dir_single/${TEST_TEXT_FILE}" | grep -q "^not_existed_dir_single/${TEST_TEXT_FILE}\$"; then
echo "Expect to find \"not_existed_dir_single/${TEST_TEXT_FILE}\" file, but it is not found"
return 1;
fi
# Double nest directory
# shellcheck disable=SC2010
if ! ls -d not_existed_dir_parent | grep -q '^not_existed_dir_parent'; then
echo "Expect to find \"not_existed_dir_parent\" directory, but it is not found"
return 1;
fi
# shellcheck disable=SC2010
if ! ls -1 not_existed_dir_parent | grep -q '^not_existed_dir_child'; then
echo "Expect to find \"not_existed_dir_parent/not_existed_dir_child\" directory, but it is not found"
return 1;
fi
# shellcheck disable=SC2010
if ! ls -d not_existed_dir_parent/not_existed_dir_child | grep -q '^not_existed_dir_parent/not_existed_dir_child'; then
echo "Expect to find \"not_existed_dir_parent/not_existed_dir_child\" directory, but it is not found"
return 1;
fi
# shellcheck disable=SC2010
if ! ls -1 not_existed_dir_parent/not_existed_dir_child | grep -q "^${TEST_TEXT_FILE}\$"; then
echo "Expect to find \"not_existed_dir_parent/not_existed_dir_child/${TEST_TEXT_FILE}\" directory, but it is not found"
return 1;
fi
# shellcheck disable=SC2010
if ! ls -1 "not_existed_dir_parent/not_existed_dir_child/${TEST_TEXT_FILE}" | grep -q "^not_existed_dir_parent/not_existed_dir_child/${TEST_TEXT_FILE}\$"; then
echo "Expect to find \"not_existed_dir_parent/not_existed_dir_child/${TEST_TEXT_FILE}\" directory, but it is not found"
return 1;
fi
rm -rf not_existed_dir_single
rm -rf not_existed_dir_parent
else
#
# without "compat_dir", found directories and files
#
# [NOTE]
# If specify a directory path, the file under that directory will be found.
# And if specify a file full path, it will be found.
#
# Top directory
# shellcheck disable=SC2010
if ls -1 | grep -q '^not_existed_dir_single$'; then
echo "Expect to not find \"not_existed_dir_single\" directory, but it is found"
return 1;
fi
# shellcheck disable=SC2010
if ls -1 | grep -q '^not_existed_dir_parent$'; then
echo "Expect to not find \"not_existed_dir_parent\" directory, but it is found"
return 1;
fi
# Single nest directory
# shellcheck disable=SC2010
if ! ls -d not_existed_dir_single | grep -q '^not_existed_dir_single$'; then
echo "Expect to find \"not_existed_dir_single\" directory, but it is not found"
return 1;
fi
# shellcheck disable=SC2010
if ! ls -1 not_existed_dir_single | grep -q "^${TEST_TEXT_FILE}\$"; then
echo "Expect to find \"not_existed_dir_single/${TEST_TEXT_FILE}\" file, but it is not found"
return 1;
fi
# shellcheck disable=SC2010
if ! ls -1 "not_existed_dir_single/${TEST_TEXT_FILE}" | grep -q "^not_existed_dir_single/${TEST_TEXT_FILE}\$"; then
echo "Expect to find \"not_existed_dir_single/${TEST_TEXT_FILE}\" file, but it is not found"
return 1;
fi
# Double nest directory
# shellcheck disable=SC2010
if ! ls -d not_existed_dir_parent | grep -q '^not_existed_dir_parent'; then
echo "Expect to find \"not_existed_dir_parent\" directory, but it is not found"
return 1;
fi
# shellcheck disable=SC2010
if ls -1 not_existed_dir_parent | grep -q '^not_existed_dir_child'; then
echo "Expect to not find \"not_existed_dir_parent/not_existed_dir_child\" directory, but it is found"
return 1;
fi
# shellcheck disable=SC2010
if ! ls -d not_existed_dir_parent/not_existed_dir_child | grep -q '^not_existed_dir_parent/not_existed_dir_child'; then
echo "Expect to find \"not_existed_dir_parent/not_existed_dir_child\" directory, but it is not found"
return 1;
fi
# shellcheck disable=SC2010
if ! ls -1 not_existed_dir_parent/not_existed_dir_child | grep -q "^${TEST_TEXT_FILE}\$"; then
echo "Expect to find \"not_existed_dir_parent/not_existed_dir_child/${TEST_TEXT_FILE}\" directory, but it is not found"
return 1;
fi
# shellcheck disable=SC2010
if ! ls -1 "not_existed_dir_parent/not_existed_dir_child/${TEST_TEXT_FILE}" | grep -q "^not_existed_dir_parent/not_existed_dir_child/${TEST_TEXT_FILE}\$"; then
echo "Expect to find \"not_existed_dir_parent/not_existed_dir_child/${TEST_TEXT_FILE}\" directory, but it is not found"
return 1;
fi
rm -rf not_existed_dir_single
# [NOTE]
# This case could not remove sub directory, then below command will be failed.
#rm -rf not_existed_dir_parent
# Top directory
# shellcheck disable=SC2010
if ! ls -1 | grep -q '^not_existed_dir_single$'; then
echo "Expect to find \"not_existed_dir_single\" directory, but it is not found"
return 1
fi
# shellcheck disable=SC2010
if ! ls -1 | grep -q '^not_existed_dir_parent$'; then
echo "Expect to find \"not_existed_dir_parent\" directory, but it is not found"
return 1
fi
# Single nest directory
if ! stat not_existed_dir_single; then
echo "Expect to find \"not_existed_dir_single\" directory, but it is not found"
return 1
fi
# shellcheck disable=SC2010
if ! ls -1 not_existed_dir_single | grep -q "^${TEST_TEXT_FILE}\$"; then
echo "Expect to find \"not_existed_dir_single/${TEST_TEXT_FILE}\" file, but it is not found"
return 1
fi
# shellcheck disable=SC2010
if ! ls -1 "not_existed_dir_single/${TEST_TEXT_FILE}" | grep -q "^not_existed_dir_single/${TEST_TEXT_FILE}\$"; then
echo "Expect to find \"not_existed_dir_single/${TEST_TEXT_FILE}\" file, but it is not found"
return 1
fi
# Double nest directory
if ! stat not_existed_dir_parent; then
echo "Expect to find \"not_existed_dir_parent\" directory, but it is not found"
return 1
fi
# shellcheck disable=SC2010
if ! ls -1 not_existed_dir_parent | grep -q '^not_existed_dir_child'; then
echo "Expect to find \"not_existed_dir_parent/not_existed_dir_child\" directory, but it is not found"
return 1
fi
if ! stat not_existed_dir_parent/not_existed_dir_child; then
echo "Expect to find \"not_existed_dir_parent/not_existed_dir_child\" directory, but it is not found"
return 1
fi
# shellcheck disable=SC2010
if ! ls -1 not_existed_dir_parent/not_existed_dir_child | grep -q "^${TEST_TEXT_FILE}\$"; then
echo "Expect to find \"not_existed_dir_parent/not_existed_dir_child/${TEST_TEXT_FILE}\" directory, but it is not found"
return 1
fi
# shellcheck disable=SC2010
if ! ls -1 "not_existed_dir_parent/not_existed_dir_child/${TEST_TEXT_FILE}" | grep -q "^not_existed_dir_parent/not_existed_dir_child/${TEST_TEXT_FILE}\$"; then
echo "Expect to find \"not_existed_dir_parent/not_existed_dir_child/${TEST_TEXT_FILE}\" directory, but it is not found"
return 1
fi
rm -rf not_existed_dir_single
rm -rf not_existed_dir_parent
}
function test_ut_ossfs {
@ -2474,8 +2466,7 @@ function test_write_data_with_skip() {
#
# delete cache file if using cache
#
# shellcheck disable=SC2009
if ps u -p "${S3FS_PID}" | grep -q use_cache; then
if s3fs_args | grep -q use_cache; then
rm -f "${CACHE_DIR}/${TEST_BUCKET_1}/${CACHE_TESTRUN_DIR}/${_SKIPWRITE_FILE}"
rm -f "${CACHE_DIR}/.${TEST_BUCKET_1}.stat/${CACHE_TESTRUN_DIR}/${_SKIPWRITE_FILE}"
fi
@ -2491,8 +2482,7 @@ function test_write_data_with_skip() {
# [NOTE]
# This test uses the file used in the previous test as an existing file.
#
# shellcheck disable=SC2009
if ps u -p "${S3FS_PID}" | grep -q use_cache; then
if s3fs_args | grep -q use_cache; then
rm -f "${CACHE_DIR}/${TEST_BUCKET_1}/${CACHE_TESTRUN_DIR}/${_SKIPWRITE_FILE}"
rm -f "${CACHE_DIR}/.${TEST_BUCKET_1}.stat/${CACHE_TESTRUN_DIR}/${_SKIPWRITE_FILE}"
fi
@ -2529,8 +2519,7 @@ function test_write_data_with_skip() {
#
# delete cache file if using cache
#
# shellcheck disable=SC2009
if ps u -p "${S3FS_PID}" | grep -q use_cache; then
if s3fs_args | grep -q use_cache; then
rm -f "${CACHE_DIR}/${TEST_BUCKET_1}/${CACHE_TESTRUN_DIR}/${_SKIPWRITE_FILE}"
rm -f "${CACHE_DIR}/.${TEST_BUCKET_1}.stat/${CACHE_TESTRUN_DIR}/${_SKIPWRITE_FILE}"
fi
@ -2547,6 +2536,76 @@ function test_write_data_with_skip() {
rm_test_file "${_TMP_SKIPWRITE_FILE}"
}
function test_not_boundary_writes {
describe "Test non-boundary write ..."
# [MEMO]
# Files used in this test, multipart related sizes, etc.
#
# Test file size: 25MB(25 * 1024 * 1024)
# Multipart size: 10MB
# Multipart minimum upload size: 5MB
#
# The multipart upload part that should be executed here is as follows:
# Part number 1: 0 - 10,485,759 (size = 10MB)
# Part number 2: 10,485,760 - 20,971,519 (size = 10MB)
# Part number 3: 20,971,520 - 26,214,399 (size = 5MB)
#
local BOUNDAY_TEST_FILE_SIZE; BOUNDAY_TEST_FILE_SIZE=$((BIG_FILE_BLOCK_SIZE * BIG_FILE_COUNT))
../../junk_data "${BOUNDAY_TEST_FILE_SIZE}" > "${TEST_TEXT_FILE}"
#
# Write in First boundary
#
# Write 0 - 3,145,727(3MB) : less than the multipart minimum size from the beginning
dd if=/dev/zero of="${TEST_TEXT_FILE}" seek=0 count=3072 bs=1024 conv=notrunc
# Write 0 - 7,340,031(7MB) : multipart exceeding the minimum size from the beginning
dd if=/dev/zero of="${TEST_TEXT_FILE}" seek=0 count=7168 bs=1024 conv=notrunc
# Write 0 - 12,582,911(12MB) : beyond the multipart size boundary from the beginning
dd if=/dev/zero of="${TEST_TEXT_FILE}" seek=0 count=12288 bs=1024 conv=notrunc
#
# Write in First and second boundary
#
# Write 3,145,728 - 4,194,303(1MB) : less than the minimum multipart size from the middle of the first multipart area
dd if=/dev/zero of="${TEST_TEXT_FILE}" seek=3072 count=1024 bs=1024 conv=notrunc
# Write 3,145,728 - 9,437,183(6MB) : exceeding the minimum multipart size from the middle of the first multipart area
dd if=/dev/zero of="${TEST_TEXT_FILE}" seek=3072 count=6144 bs=1024 conv=notrunc
# Write 3,145,728 - 12,582,911(9MB) : beyond the multipart boundary from the middle of the first multipart area
dd if=/dev/zero of="${TEST_TEXT_FILE}" seek=3072 count=9216 bs=1024 conv=notrunc
#
# Write in Second boundary
#
# Write 12,582,912 - 14,680,063(2MB) : below the minimum multipart size from the middle of the multipart area
dd if=/dev/zero of="${TEST_TEXT_FILE}" seek=12288 count=2048 bs=1024 conv=notrunc
# Write 12,582,912 - 18,874,367(6MB) : data exceeding the minimum multipart size from the middle of the multipart area
dd if=/dev/zero of="${TEST_TEXT_FILE}" seek=12288 count=6144 bs=1024 conv=notrunc
# Write 12,582,912 - 23,068,671(10MB) : beyond the multipart boundary from the middle of the multipart area
dd if=/dev/zero of="${TEST_TEXT_FILE}" seek=12288 count=10240 bs=1024 conv=notrunc
# Write 12,582,912 - 26,214,399(13MB) : beyond the multipart boundary(last) from the middle of the multipart area
dd if=/dev/zero of="${TEST_TEXT_FILE}" seek=12288 count=13312 bs=1024 conv=notrunc
#
# Write in Last boundary
#
# Write 23,068,672 - 24,117,247(1MB) : below the minimum multipart size from the middle of the final multipart area
dd if=/dev/zero of="${TEST_TEXT_FILE}" seek=22528 count=1024 bs=1024 conv=notrunc
# Write 23,068,672 - 26,214,399(3MB) : beyond the multipart boundary(last) from the middle of the final multipart area
dd if=/dev/zero of="${TEST_TEXT_FILE}" seek=22528 count=3072 bs=1024 conv=notrunc
rm_test_file
}
function test_chmod_mountpoint {
describe "Testing chmod to mount point..."
@ -2571,11 +2630,7 @@ function test_chown_mountpoint {
local MOUNTPOINT_DIR; MOUNTPOINT_DIR=$(cd ..; pwd)
local ORIGINAL_PERMISSIONS
if [ "$(uname)" = "Darwin" ]; then
ORIGINAL_PERMISSIONS=$(stat -f "%u:%g" "${MOUNTPOINT_DIR}")
else
ORIGINAL_PERMISSIONS=$(stat --format=%u:%g "${MOUNTPOINT_DIR}")
fi
ORIGINAL_PERMISSIONS=$(get_user_and_group "${MOUNTPOINT_DIR}")
# [NOTE]
# Prevents test interruptions due to permission errors, etc.
@ -2587,11 +2642,7 @@ function test_chown_mountpoint {
# if they're the same, we have a problem.
local CHANGED_PERMISSIONS
if [ "$(uname)" = "Darwin" ]; then
CHANGED_PERMISSIONS=$(stat -f "%u:%g" "${MOUNTPOINT_DIR}")
else
CHANGED_PERMISSIONS=$(stat --format=%u:%g "${MOUNTPOINT_DIR}")
fi
CHANGED_PERMISSIONS=$(get_user_and_group "${MOUNTPOINT_DIR}")
if [ "${CHANGED_PERMISSIONS}" = "${ORIGINAL_PERMISSIONS}" ]
then
if [ "${ORIGINAL_PERMISSIONS}" = "1000:1000" ]
@ -2625,14 +2676,57 @@ function test_time_mountpoint {
fi
}
function test_file_names_longer_than_posix() {
local DIR_NAME; DIR_NAME=$(basename "${PWD}")
a256="aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
#a256="aaaa"
if ! touch "${a256}"; then
echo "could not create long file name"
return 1
fi
rm -f "${a256}"
echo data | aws_cli s3 cp - "s3://${TEST_BUCKET_1}/${DIR_NAME}/${a256}"
files=(*)
if [ "${#files[@]}" = 0 ]; then
echo "failed to list long file name"
return 1
fi
rm -f "${a256}"
}
function test_statvfs() {
describe "Testing the free/available size on mount point(statvfs)..."
# [NOTE]
# The df command result format is different between Linux and macos,
# but the order of Total/Used/Available size is the same.
#
local MOUNTPOINT_DIR; MOUNTPOINT_DIR=$(cd ..; pwd)
local DF_RESULT; DF_RESULT=$(df "${MOUNTPOINT_DIR}" 2>/dev/null | tail -n +2)
local TOTAL_SIZE; TOTAL_SIZE=$(echo "${DF_RESULT}" | awk '{print $2}')
local USED_SIZE; USED_SIZE=$(echo "${DF_RESULT}" | awk '{print $3}')
local AVAIL_SIZE; AVAIL_SIZE=$(echo "${DF_RESULT}" | awk '{print $4}')
# [NOTE]
# In the disk information (statvfs) provided by s3fs, Total size and
# Available size are always the same and not 0, and used size is always 0.
#
if [ -z "${TOTAL_SIZE}" ] || [ -z "${AVAIL_SIZE}" ] || [ -z "${USED_SIZE}" ] || [ "${TOTAL_SIZE}" = "0" ] || [ "${AVAIL_SIZE}" = "0" ] || [ "${TOTAL_SIZE}" != "${AVAIL_SIZE}" ] || [ "${USED_SIZE}" != "0" ]; then
echo "The result of df <mount point> command is wrong: Total=${TOTAL_SIZE}, Used=${USED_SIZE}, Available=${AVAIL_SIZE}"
return 1
fi
}
function add_all_tests {
# shellcheck disable=SC2009
if ps u -p "${S3FS_PID}" | grep -q use_cache; then
if s3fs_args | grep -q use_cache; then
add_tests test_cache_file_stat
add_tests test_zero_cache_file_stat
else
add_tests test_file_names_longer_than_posix
fi
# shellcheck disable=SC2009
if ! ps u -p "${S3FS_PID}" | grep -q ensure_diskfree && ! uname | grep -q Darwin; then
if ! s3fs_args | grep -q ensure_diskfree && ! uname | grep -q Darwin; then
add_tests test_clean_up_cache
fi
add_tests test_create_empty_file
@ -2693,12 +2787,10 @@ function add_all_tests {
fi
add_tests test_update_directory_time_subdir
add_tests test_update_chmod_opened_file
# shellcheck disable=SC2009
if ps u -p "${S3FS_PID}" | grep -q update_parent_dir_stat; then
if s3fs_args | grep -q update_parent_dir_stat; then
add_tests test_update_parent_directory_time
fi
# shellcheck disable=SC2009
if ! ps u -p "${S3FS_PID}" | grep -q use_xattr; then
if ! s3fs_args | grep -q use_xattr; then
add_tests test_posix_acl
fi
@ -2716,14 +2808,17 @@ function add_all_tests {
add_tests test_truncate_cache
add_tests test_upload_sparsefile
add_tests test_mix_upload_entities
add_tests test_not_existed_dir_obj
# TODO: investigate why only Alpine cannot see the implicit directory objects.
if ! test -f /etc/os-release || ! grep -q -i -e 'ID=alpine' -e 'ID="alpine"' /etc/os-release; then
add_tests test_not_existed_dir_obj
fi
add_tests test_ut_ossfs
add_tests test_cr_filename
# shellcheck disable=SC2009
if ! ps u -p "${S3FS_PID}" | grep -q ensure_diskfree && ! uname | grep -q Darwin; then
if ! s3fs_args | grep -q ensure_diskfree && ! uname | grep -q Darwin; then
add_tests test_ensurespace_move_file
fi
add_tests test_write_data_with_skip
add_tests test_not_boundary_writes
# [NOTE]
# The test on CI will fail depending on the permissions, so skip these(chmod/chown).
@ -2731,6 +2826,7 @@ function add_all_tests {
# add_tests test_chmod_mountpoint
# add_tests test_chown_mountpoint
add_tests test_time_mountpoint
add_tests test_statvfs
}
init_suite

View File

@ -20,21 +20,20 @@
// Generate junk data at high speed. An alternative to dd if=/dev/urandom.
#include <stdio.h>
#include <stdlib.h>
#include <cstdint>
#include <cstdio>
#include <cstdlib>
int main(int argc, char *argv[])
int main(int argc, const char *argv[])
{
if (argc != 2) {
return 1;
}
long long count = strtoull(argv[1], NULL, 10);
uint64_t count = strtoull(argv[1], nullptr, 10);
char buf[128 * 1024];
long long i;
for (i = 0; i < count; i += sizeof(buf)) {
long long j;
for (j = 0; j < sizeof(buf) / sizeof(i); ++j) {
*((long long *)buf + j) = i / sizeof(i) + j;
for (uint64_t i = 0; i < count; i += sizeof(buf)) {
for (uint64_t j = 0; j < sizeof(buf) / sizeof(i); ++j) {
*(reinterpret_cast<uint64_t *>(buf) + j) = i / sizeof(i) + j;
}
fwrite(buf, 1, sizeof(buf) > count - i ? count - i : sizeof(buf), stdout);
}

View File

@ -18,15 +18,14 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <cerrno>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <string.h>
#include <errno.h>
#ifndef __APPLE__
#include <sys/sysmacros.h>
#endif
@ -34,27 +33,27 @@
//---------------------------------------------------------
// Const
//---------------------------------------------------------
const char usage_string[] = "Usage : \"mknod_test <base file path>\"";
static constexpr char usage_string[] = "Usage : \"mknod_test <base file path>\"";
const char str_mode_reg[] = "REGULAR";
const char str_mode_chr[] = "CHARACTER";
const char str_mode_blk[] = "BLOCK";
const char str_mode_fifo[] = "FIFO";
const char str_mode_sock[] = "SOCK";
static constexpr char str_mode_reg[] = "REGULAR";
static constexpr char str_mode_chr[] = "CHARACTER";
static constexpr char str_mode_blk[] = "BLOCK";
static constexpr char str_mode_fifo[] = "FIFO";
static constexpr char str_mode_sock[] = "SOCK";
const char str_ext_reg[] = "reg";
const char str_ext_chr[] = "chr";
const char str_ext_blk[] = "blk";
const char str_ext_fifo[] = "fifo";
const char str_ext_sock[] = "sock";
static constexpr char str_ext_reg[] = "reg";
static constexpr char str_ext_chr[] = "chr";
static constexpr char str_ext_blk[] = "blk";
static constexpr char str_ext_fifo[] = "fifo";
static constexpr char str_ext_sock[] = "sock";
// [NOTE]
// It would be nice if PATH_MAX could be used as is, but since there are
// issues using on Linux and we also must support for macos, this simple
// test program defines a fixed value for simplicity.
//
#define S3FS_TEST_PATH_MAX 255
int max_base_path_length = S3FS_TEST_PATH_MAX - 5;
static constexpr size_t S3FS_TEST_PATH_MAX = 255;
static constexpr size_t MAX_BASE_PATH_LENGTH = S3FS_TEST_PATH_MAX - 5;
//---------------------------------------------------------
// Test function
@ -73,27 +72,31 @@ bool TestMknod(const char* basepath, mode_t mode)
case S_IFREG:
str_mode = str_mode_reg;
dev = 0;
sprintf(filepath, "%s.%s", basepath, str_ext_reg);
snprintf(filepath, sizeof(filepath), "%s.%s", basepath, str_ext_reg);
filepath[S3FS_TEST_PATH_MAX - 1] = '\0'; // for safety
break;
case S_IFCHR:
str_mode = str_mode_chr;
dev = makedev(0, 0);
sprintf(filepath, "%s.%s", basepath, str_ext_chr);
snprintf(filepath, sizeof(filepath), "%s.%s", basepath, str_ext_chr);
filepath[S3FS_TEST_PATH_MAX - 1] = '\0'; // for safety
break;
case S_IFBLK:
str_mode = str_mode_blk;
dev = makedev((unsigned int)(259), 0); // temporary value
sprintf(filepath, "%s.%s", basepath, str_ext_blk);
dev = makedev((unsigned int)(259), 0); // temporary value
snprintf(filepath, sizeof(filepath), "%s.%s", basepath, str_ext_blk);
filepath[S3FS_TEST_PATH_MAX - 1] = '\0'; // for safety
break;
case S_IFIFO:
str_mode = str_mode_fifo;
dev = 0;
sprintf(filepath, "%s.%s", basepath, str_ext_fifo);
snprintf(filepath, sizeof(filepath), "%s.%s", basepath, str_ext_fifo);
filepath[S3FS_TEST_PATH_MAX - 1] = '\0'; // for safety
break;
case S_IFSOCK:
str_mode = str_mode_sock;
dev = 0;
snprintf(filepath, S3FS_TEST_PATH_MAX, "%s.%s", basepath, str_ext_sock);
snprintf(filepath, sizeof(filepath), "%s.%s", basepath, str_ext_sock);
filepath[S3FS_TEST_PATH_MAX - 1] = '\0'; // for safety
break;
default:
@ -134,7 +137,7 @@ bool TestMknod(const char* basepath, mode_t mode)
//---------------------------------------------------------
// Main
//---------------------------------------------------------
int main(int argc, char *argv[])
int main(int argc, const char *argv[])
{
// Parse parameters
if(2 != argc){
@ -146,8 +149,8 @@ int main(int argc, char *argv[])
fprintf(stdout, "%s\n", usage_string);
exit(EXIT_SUCCESS);
}
if(max_base_path_length < strlen(argv[1])){
fprintf(stderr, "[ERROR] Base file path is too long, it must be less than %d\n", max_base_path_length);
if(MAX_BASE_PATH_LENGTH < strlen(argv[1])){
fprintf(stderr, "[ERROR] Base file path is too long, it must be less than %zu\n", MAX_BASE_PATH_LENGTH);
exit(EXIT_FAILURE);
}

View File

@ -29,39 +29,39 @@ COMMON_FLAGS="-g -O0 -Wno-cpp"
# run tests with libstc++ debug mode, https://gcc.gnu.org/onlinedocs/libstdc++/manual/debug_mode.html
make clean
./configure CXXFLAGS="$COMMON_FLAGS -D_GLIBCXX_DEBUG"
make
make check -C test/
make --jobs="$(nproc)"
ALL_TESTS=1 make check -C test/
# run tests under AddressSanitizer, https://clang.llvm.org/docs/AddressSanitizer.html
make clean
./configure CXX=clang++ CXXFLAGS="$COMMON_FLAGS -fsanitize=address -fsanitize-address-use-after-scope"
make
ASAN_OPTIONS='detect_leaks=1,detect_stack_use_after_return=1' make check -C test/
make --jobs="$(nproc)"
ALL_TESTS=1 ASAN_OPTIONS='detect_leaks=1,detect_stack_use_after_return=1' make check -C test/
# run tests under MemorySanitizer, https://clang.llvm.org/docs/MemorySanitizer.html
# TODO: this requires a custom libc++
#make clean
#./configure CXX=clang++ CXXFLAGS="$COMMON_FLAGS -fsanitize=memory"
#make
#make check -C test/
#make --jobs="$(nproc)"
#ALL_TESTS=1 make check -C test/
# run tests under ThreadSanitizer, https://clang.llvm.org/docs/ThreadSanitizer.html
make clean
./configure CXX=clang++ CXXFLAGS="$COMMON_FLAGS -fsanitize=thread"
make
TSAN_OPTIONS='halt_on_error=1' make check -C test/
make --jobs="$(nproc)"
ALL_TESTS=1 TSAN_OPTIONS='halt_on_error=1' make check -C test/
# run tests under UndefinedBehaviorSanitizer, https://clang.llvm.org/docs/UndefinedBehaviorSanitizer.html
make clean
./configure CXX=clang++ CXXFLAGS="$COMMON_FLAGS -fsanitize=undefined,implicit-conversion,local-bounds,unsigned-integer-overflow"
make
make check -C test/
make --jobs="$(nproc)"
ALL_TESTS=1 make check -C test/
# run tests with Valgrind
make clean
./configure CXXFLAGS="$COMMON_FLAGS"
make
RETRIES=100 VALGRIND="--leak-check=full" make check -C test/
make --jobs="$(nproc)"
ALL_TESTS=1 RETRIES=100 VALGRIND="--leak-check=full --error-exitcode=1" S3_URL=http://127.0.0.1:8081 make check -C test/
#
# Local variables:

View File

@ -1,3 +1,4 @@
s3proxy.endpoint=http://127.0.0.1:8081
s3proxy.secure-endpoint=https://127.0.0.1:8080
s3proxy.authorization=aws-v2-or-v4
s3proxy.identity=local-identity

View File

@ -38,6 +38,12 @@ source test-utils.sh
FAKE_FREE_DISK_SIZE=200
ENSURE_DISKFREE_SIZE=10
# set up client-side encryption keys
head -c 32 < /dev/urandom > /tmp/ssekey.bin
base64 < /tmp/ssekey.bin > /tmp/ssekey
openssl md5 -binary < /tmp/ssekey.bin | base64 > /tmp/ssekeymd5
chmod 600 /tmp/ssekey /tmp/ssekey.bin /tmp/ssekeymd5
export CACHE_DIR
export ENSURE_DISKFREE_SIZE
if [ -n "${ALL_TESTS}" ]; then
@ -52,6 +58,7 @@ if [ -n "${ALL_TESTS}" ]; then
sigv4
"singlepart_copy_limit=10" # limit size to exercise multipart code paths
#use_sse # TODO: S3Proxy does not support SSE
#use_sse=custom:/tmp/ssekey # TODO: S3Proxy does not support SSE
"use_cache=${CACHE_DIR} -o ensure_diskfree=${ENSURE_DISKFREE_SIZE} -o fake_diskfree=${FAKE_FREE_DISK_SIZE} -o streamupload"
)
else

View File

@ -71,6 +71,16 @@ else
fi
export SED_BUFFER_FLAG="--unbuffered"
# [NOTE]
# Specifying cache disable option depending on stat(coreutils) version
# TODO: investigate why this is necessary #2327
#
if stat --cached=never / >/dev/null 2>&1; then
STAT_BIN=(stat --cache=never)
else
STAT_BIN=(stat)
fi
function get_xattr() {
if [ "$(uname)" = "Darwin" ]; then
xattr -p "$1" "$2"
@ -95,11 +105,19 @@ function del_xattr() {
fi
}
function get_inode() {
if [ "$(uname)" = "Darwin" ]; then
"${STAT_BIN[@]}" -f "%i" "$1"
else
"${STAT_BIN[@]}" --format "%i" "$1"
fi
}
function get_size() {
if [ "$(uname)" = "Darwin" ]; then
stat -f "%z" "$1"
"${STAT_BIN[@]}" -f "%z" "$1"
else
stat -c %s "$1"
"${STAT_BIN[@]}" --format "%s" "$1"
fi
}
@ -137,22 +155,6 @@ function mk_test_file {
echo "Could not create file ${TEST_TEXT_FILE}, it does not exist"
exit 1
fi
# wait & check
local BASE_TEXT_LENGTH; BASE_TEXT_LENGTH=$(echo "${TEXT}" | wc -c | awk '{print $1}')
local TRY_COUNT=10
while true; do
local MK_TEXT_LENGTH
MK_TEXT_LENGTH=$(wc -c "${TEST_TEXT_FILE}" | awk '{print $1}')
if [ "${BASE_TEXT_LENGTH}" -eq "${MK_TEXT_LENGTH}" ]; then
break
fi
local TRY_COUNT=$((TRY_COUNT - 1))
if [ "${TRY_COUNT}" -le 0 ]; then
echo "Could not create file ${TEST_TEXT_FILE}, that file size is something wrong"
fi
sleep 1
done
}
function rm_test_file {
@ -295,50 +297,55 @@ function run_suite {
}
function get_ctime() {
# ex: "1657504903.019784214"
if [ "$(uname)" = "Darwin" ]; then
# ex: "1657504903.019784214"
stat -f "%Fc" "$1"
"${STAT_BIN[@]}" -f "%Fc" "$1"
else
# ex: "2022-07-24 12:45:18.621046168 +0000"
stat -c "%z" "$1"
"${STAT_BIN[@]}" --format "%.9Z" "$1"
fi
}
function get_mtime() {
# ex: "1657504903.019784214"
if [ "$(uname)" = "Darwin" ]; then
# ex: "1657504903.019784214"
stat -f "%Fm" "$1"
"${STAT_BIN[@]}" -f "%Fm" "$1"
else
# ex: "2022-07-24 12:45:18.621046168 +0000"
stat -c "%y" "$1"
"${STAT_BIN[@]}" --format "%.9Y" "$1"
fi
}
function get_atime() {
# ex: "1657504903.019784214"
if [ "$(uname)" = "Darwin" ]; then
# ex: "1657504903.019784214"
stat -f "%Fa" "$1"
"${STAT_BIN[@]}" -f "%Fa" "$1"
else
# ex: "2022-07-24 12:45:18.621046168 +0000"
stat -c "%x" "$1"
"${STAT_BIN[@]}" --format "%.9X" "$1"
fi
}
function get_permissions() {
if [ "$(uname)" = "Darwin" ]; then
stat -f "%p" "$1"
"${STAT_BIN[@]}" -f "%p" "$1"
else
stat -c "%a" "$1"
"${STAT_BIN[@]}" --format "%a" "$1"
fi
}
function get_user_and_group() {
if [ "$(uname)" = "Darwin" ]; then
stat -f "%u:%g" "$1"
else
"${STAT_BIN[@]}" --format "%u:%g" "$1"
fi
}
function check_content_type() {
local INFO_STR
INFO_STR=$(aws_cli s3api head-object --bucket "${TEST_BUCKET_1}" --key "$1")
if [[ "${INFO_STR}" != *"$2"* ]]
INFO_STR=$(aws_cli s3api head-object --bucket "${TEST_BUCKET_1}" --key "$1" | jq -r .ContentType)
if [ "${INFO_STR}" != "$2" ]
then
echo "moved file content-type is not as expected expected:$2 got:${INFO_STR}"
exit 1
echo "Expected Content-Type: $2 but got: ${INFO_STR}"
return 1
fi
}
@ -353,6 +360,17 @@ function aws_cli() {
if [ -n "${S3FS_PROFILE}" ]; then
FLAGS="--profile ${S3FS_PROFILE}"
fi
if [ "$1" = "s3" ] && [ "$2" != "ls" ] && [ "$2" != "mb" ]; then
if s3fs_args | grep -q use_sse=custom; then
FLAGS="${FLAGS} --sse-c AES256 --sse-c-key fileb:///tmp/ssekey.bin"
fi
elif [ "$1" = "s3api" ] && [ "$2" != "head-bucket" ]; then
if s3fs_args | grep -q use_sse=custom; then
FLAGS="${FLAGS} --sse-customer-algorithm AES256 --sse-customer-key $(cat /tmp/ssekey) --sse-customer-key-md5 $(cat /tmp/ssekeymd5)"
fi
fi
# [NOTE]
# AWS_EC2_METADATA_DISABLED for preventing the metadata service(to 169.254.169.254).
# shellcheck disable=SC2086,SC2068
@ -378,12 +396,43 @@ function make_random_string() {
else
local END_POS=8
fi
"${BASE64_BIN}" --wrap=0 < /dev/urandom | tr -d /+ | head -c "${END_POS}"
if [ "$(uname)" = "Darwin" ]; then
local BASE64_OPT="--break=0"
else
local BASE64_OPT="--wrap=0"
fi
"${BASE64_BIN}" "${BASE64_OPT}" < /dev/urandom 2>/dev/null | tr -d /+ | head -c "${END_POS}"
return 0
}
function s3fs_args() {
if [ "$(uname)" = "Darwin" ]; then
ps -o args -p "${S3FS_PID}" | tail -n +2
else
ps -o args -p "${S3FS_PID}" --no-headers
fi
}
#
# $1: sleep seconds
# $2: OS type(ex. 'Darwin', unset(means all os type))
#
# [NOTE] macos fuse-t
# macos fuse-t mounts over NFS, and the mtime/ctime/atime attribute
# values are in seconds(not m/u/n-sec).
# Therefore, unlike tests on other OSs, we have to wait at least 1
# second.
# This function is called primarily for this purpose.
#
function wait_ostype() {
if [ -z "$2" ] || uname | grep -q "$2"; then
if [ -n "$1" ] && ! (echo "$1" | grep -q '[^0-9]'); then
sleep "$1"
fi
fi
}
#
# Local variables:
# tab-width: 4

View File

@ -18,8 +18,8 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <stdio.h>
#include <stdlib.h>
#include <cstdio>
#include <cstdlib>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
@ -40,7 +40,7 @@ int main(int argc, char *argv[])
}
const char* filepath = argv[1];
off_t size = (off_t)strtoull(argv[2], NULL, 10);
off_t size = static_cast<off_t>(strtoull(argv[2], nullptr, 10));
int fd;
// open file
@ -58,7 +58,8 @@ int main(int argc, char *argv[])
// run sub-process for reading file(cat)
char szCommand[1024];
sprintf(szCommand, "cat %s >/dev/null 2>&1", filepath);
snprintf(szCommand, sizeof(szCommand), "cat %s >/dev/null 2>&1", filepath);
szCommand[sizeof(szCommand) - 1] = '\0'; // for safety
if(0 != system(szCommand)){
fprintf(stderr, "[ERROR] Failed to run sub-process(cat).\n");
close(fd);

View File

@ -18,19 +18,21 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <cerrno>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <iostream>
#include <climits>
#include <string>
#include <list>
#include <memory>
#include <string>
#include <vector>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <string.h>
#include <errno.h>
//---------------------------------------------------------
// Structures and Typedefs
@ -41,38 +43,32 @@ struct write_block_part
off_t size;
};
typedef std::list<write_block_part> wbpart_list_t;
typedef std::list<std::string> strlist_t;
typedef std::vector<write_block_part> wbpart_list_t;
typedef std::list<std::string> strlist_t;
//---------------------------------------------------------
// Const
//---------------------------------------------------------
const char usage_string[] = "Usage : \"write_multiblock -f <file path> -p <start offset:size>\" (allows -f and -p multiple times.)";
static constexpr char usage_string[] = "Usage : \"write_multiblock -f <file path> -p <start offset:size>\" (allows -f and -p multiple times.)";
//---------------------------------------------------------
// Utility functions
//---------------------------------------------------------
static unsigned char* create_random_data(off_t size)
static std::unique_ptr<unsigned char[]> create_random_data(off_t size)
{
int fd;
if(-1 == (fd = open("/dev/urandom", O_RDONLY))){
std::cerr << "[ERROR] Could not open /dev/urandom" << std::endl;
return NULL;
return nullptr;
}
unsigned char* pbuff;
if(NULL == (pbuff = reinterpret_cast<unsigned char*>(malloc(size)))){
std::cerr << "[ERROR] Could not allocate memory." << std::endl;
close(fd);
return NULL;
}
std::unique_ptr<unsigned char[]> pbuff(new unsigned char[size]);
for(ssize_t readpos = 0, readcnt = 0; readpos < size; readpos += readcnt){
if(-1 == (readcnt = read(fd, &(pbuff[readpos]), static_cast<size_t>(size - readpos)))){
if(EAGAIN != errno && EWOULDBLOCK != errno && EINTR != errno){
std::cerr << "[ERROR] Failed reading from /dev/urandom with errno: " << errno << std::endl;
free(pbuff);
close(fd);
return NULL;
return nullptr;
}
readcnt = 0;
}
@ -87,7 +83,7 @@ static off_t cvt_string_to_number(const char* pstr)
}
errno = 0;
char* ptemp = NULL;
char* ptemp = nullptr;
long long result = strtoll(pstr, &ptemp, 10);
if(!ptemp || ptemp == pstr || *ptemp != '\0'){
@ -170,7 +166,7 @@ static bool parse_arguments(int argc, char** argv, strlist_t& files, wbpart_list
while(-1 != (opt = getopt(argc, argv, "f:p:"))){
switch(opt){
case 'f':
files.push_back(std::string(optarg));
files.emplace_back(optarg);
break;
case 'p':
if(!parse_write_blocks(optarg, wbparts, max_size)){
@ -205,10 +201,7 @@ int main(int argc, char** argv)
}
// make data and buffer
unsigned char* pData;
if(NULL == (pData = create_random_data(max_size))){
exit(EXIT_FAILURE);
}
std::unique_ptr<unsigned char[]> pData = create_random_data(max_size);
for(strlist_t::const_iterator fiter = files.begin(); fiter != files.end(); ++fiter){
// open/create file
@ -216,19 +209,16 @@ int main(int argc, char** argv)
struct stat st;
if(0 == stat(fiter->c_str(), &st)){
if(!S_ISREG(st.st_mode)){
std::cerr << "[ERROR] File " << fiter->c_str() << " is existed, but it is not regular file." << std::endl;
free(pData);
std::cerr << "[ERROR] File " << *fiter << " is existed, but it is not regular file." << std::endl;
exit(EXIT_FAILURE);
}
if(-1 == (fd = open(fiter->c_str(), O_WRONLY))){
std::cerr << "[ERROR] Could not open " << fiter->c_str() << std::endl;
free(pData);
std::cerr << "[ERROR] Could not open " << *fiter << std::endl;
exit(EXIT_FAILURE);
}
}else{
if(-1 == (fd = open(fiter->c_str(), O_WRONLY | O_CREAT | O_TRUNC, 0644))){
std::cerr << "[ERROR] Could not create " << fiter->c_str() << std::endl;
free(pData);
std::cerr << "[ERROR] Could not create " << *fiter << std::endl;
exit(EXIT_FAILURE);
}
}
@ -239,9 +229,8 @@ int main(int argc, char** argv)
for(ssize_t writepos = 0, writecnt = 0; writepos < piter->size; writepos += writecnt){
if(-1 == (writecnt = pwrite(fd, &(pData[writepos]), static_cast<size_t>(piter->size - writepos), (piter->start + writepos)))){
if(EAGAIN != errno && EWOULDBLOCK != errno && EINTR != errno){
std::cerr << "[ERROR] Failed writing to " << fiter->c_str() << " by errno : " << errno << std::endl;
std::cerr << "[ERROR] Failed writing to " << *fiter << " by errno : " << errno << std::endl;
close(fd);
free(pData);
exit(EXIT_FAILURE);
}
writecnt = 0;
@ -251,7 +240,6 @@ int main(int argc, char** argv)
// close file
close(fd);
}
free(pData);
exit(EXIT_SUCCESS);
}