Merge pull request #1151 from liuyongqing/master

fix deadlock in clean up cache
This commit is contained in:
Takeshi Nakatani 2020-01-30 22:17:50 +09:00 committed by GitHub
commit ee1d3a9057
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 49 additions and 29 deletions

View File

@ -2223,22 +2223,6 @@ ssize_t FdEntity::Write(const char* bytes, off_t start, size_t size)
return wsize;
}
void FdEntity::CleanupCache()
{
AutoLock auto_lock(&fdent_lock, AutoLock::NO_WAIT);
if (!auto_lock.isLockAcquired()) {
return;
}
if(pagelist.IsModified()){
// cache is not committed to s3, cannot cleanup
return;
}
FdManager::DeleteCacheFile(path.c_str());
}
//------------------------------------------------
// FdManager symbol
//------------------------------------------------
@ -2794,19 +2778,16 @@ void FdManager::CleanupCacheDirInternal(const std::string &path)
if(S_ISDIR(st.st_mode)){
CleanupCacheDirInternal(next_path);
}else{
FdEntity* ent;
if(NULL == (ent = FdManager::get()->Open(next_path.c_str(), NULL, -1, -1, false, true, true))){
S3FS_PRN_DBG("skipping locked file: %s", next_path.c_str());
AutoLock auto_lock(&FdManager::fd_manager_lock, AutoLock::NO_WAIT);
if (!auto_lock.isLockAcquired()) {
S3FS_PRN_ERR("could not get fd_manager_lock when clean up file(%s)", next_path.c_str());
continue;
}
if(ent->IsMultiOpened()){
S3FS_PRN_DBG("skipping opened file: %s", next_path.c_str());
}else{
ent->CleanupCache();
fdent_map_t::iterator iter = fent.find(next_path);
if(fent.end() == iter) {
S3FS_PRN_DBG("cleaned up: %s", next_path.c_str());
FdManager::DeleteCacheFile(next_path.c_str());
}
Close(ent);
}
}
closedir(dp);

View File

@ -166,7 +166,6 @@ class FdEntity
void Close(void);
bool IsOpen(void) const { return (-1 != fd); }
bool IsMultiOpened(void) const { return refcnt > 1; }
int Open(headers_t* pmeta = NULL, off_t size = -1, time_t time = -1, bool no_fd_lock_wait = false);
bool OpenAndLoadAll(headers_t* pmeta = NULL, off_t* size = NULL, bool force_load = false);
int Dup(bool lock_already_held = false);
@ -200,7 +199,6 @@ class FdEntity
ssize_t Write(const char* bytes, off_t start, size_t size);
bool ReserveDiskSpace(off_t size);
void CleanupCache();
};
typedef std::map<std::string, class FdEntity*> fdent_map_t; // key=path, value=FdEntity*

View File

@ -672,7 +672,36 @@ function test_write_multiple_offsets {
rm_test_file ${TEST_TEXT_FILE}
}
function test_clean_up_cache() {
describe "Test clean up cache"
dir="many_files"
count=256
mkdir -p $dir
for x in $(seq $count); do
dd if=/dev/urandom of=$dir/file-$x bs=1048576 count=1
done
file_cnt=$(ls $dir | wc -l)
if [ $file_cnt != $count ]; then
echo "Expected $count files but got $file_cnt"
rm -rf $dir
return 1
fi
CACHE_DISK_AVAIL_SIZE=`get_disk_avail_size $CACHE_DIR`
if [ "$CACHE_DISK_AVAIL_SIZE" -lt "$ENSURE_DISKFREE_SIZE" ];then
echo "Cache disk avail size:$CACHE_DISK_AVAIL_SIZE less than ensure_diskfree size:$ENSURE_DISKFREE_SIZE"
rm -rf $dir
return 1
fi
rm -rf $dir
}
function add_all_tests {
if `ps -ef | grep -v grep | grep s3fs | grep -q ensure_diskfree`; then
add_tests test_clean_up_cache
fi
add_tests test_append_file
add_tests test_truncate_file
add_tests test_truncate_empty_file

View File

@ -17,7 +17,15 @@ CACHE_DIR="/tmp/s3fs-cache"
rm -rf "${CACHE_DIR}"
mkdir "${CACHE_DIR}"
#reserve 200MB for data cache
source test-utils.sh
CACHE_DISK_AVAIL_SIZE=`get_disk_avail_size $CACHE_DIR`
ENSURE_DISKFREE_SIZE=$((CACHE_DISK_AVAIL_SIZE - 200))
export CACHE_DIR
export ENSURE_DISKFREE_SIZE
FLAGS=(
"use_cache=${CACHE_DIR} -o ensure_diskfree=${ENSURE_DISKFREE_SIZE}"
enable_content_md5
enable_noobj_cache
nocopyapi
@ -25,13 +33,12 @@ FLAGS=(
notsup_compat_dir
sigv2
singlepart_copy_limit=$((10 * 1024)) # limit size to exercise multipart code paths
use_cache="${CACHE_DIR}"
#use_sse # TODO: S3Proxy does not support SSE
)
start_s3proxy
for flag in ${FLAGS[*]}; do
for flag in "${FLAGS[@]}"; do
echo "testing s3fs flag: $flag"
start_s3fs -o $flag

View File

@ -258,6 +258,11 @@ function check_content_type() {
fi
}
function get_disk_avail_size() {
DISK_AVAIL_SIZE=`BLOCKSIZE=$((1024 * 1024)) df $1 | awk '{print $4}' | tail -n 1`
echo ${DISK_AVAIL_SIZE}
}
function aws_cli() {
AWS_ACCESS_KEY_ID=local-identity AWS_SECRET_ACCESS_KEY=local-credential aws $* --endpoint-url "${S3_URL}" --no-verify-ssl
}