Fixed wrong stat of cache after new creation file

And added a test for stat of cache after new creation file
This commit is contained in:
Takeshi Nakatani 2021-10-16 15:51:36 +00:00 committed by Andrew Gaul
parent 2f412804e2
commit 023aaf7dff
4 changed files with 68 additions and 22 deletions

View File

@ -2173,6 +2173,16 @@ bool FdEntity::PunchHole(off_t start, size_t size)
return true;
}
// [NOTE]
// Indicate that a new file's is dirty.
// This ensures that both metadata and data are synced during flush.
//
void FdEntity::MarkDirtyNewFile()
{
pagelist.Init(0, false, true);
is_meta_pending = true;
}
/*
* Local variables:
* tab-width: 4

View File

@ -126,11 +126,7 @@ class FdEntity
bool ReserveDiskSpace(off_t size);
bool PunchHole(off_t start = 0, size_t size = 0);
// Indicate that a new file's is dirty. This ensures that both metadata and data are synced during flush.
void MarkDirtyNewFile() {
pagelist.SetPageLoadedStatus(0, 1, PageList::PAGE_LOAD_MODIFIED);
is_meta_pending = true;
}
void MarkDirtyNewFile();
};
typedef std::map<std::string, class FdEntity*> fdent_map_t; // key=path, value=FdEntity*

View File

@ -62,6 +62,8 @@ inline void raw_add_compress_fdpage_list(fdpage_list_t& pagelist, fdpage& page,
// default_modify: modified flag value in the list after compression when default_modify=true
//
// NOTE: ignore_modify and ignore_load cannot both be true.
// Zero size pages will be deleted. However, if the page information is the only one,
// it will be left behind. This is what you need to do to create a new empty file.
//
static fdpage_list_t raw_compress_fdpage_list(const fdpage_list_t& pages, bool ignore_load, bool ignore_modify, bool default_load, bool default_modify)
{
@ -70,28 +72,33 @@ static fdpage_list_t raw_compress_fdpage_list(const fdpage_list_t& pages, bool i
bool is_first = true;
for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){
if(!is_first){
if( (!ignore_load && (tmppage.loaded != iter->loaded )) ||
(!ignore_modify && (tmppage.modified != iter->modified)) )
{
// Different from the previous area, add it to list
raw_add_compress_fdpage_list(compressed_pages, tmppage, ignore_load, ignore_modify, default_load, default_modify);
// keep current area
tmppage = fdpage(iter->offset, iter->bytes, (ignore_load ? default_load : iter->loaded), (ignore_modify ? default_modify : iter->modified));
}else{
// Same as the previous area
if(tmppage.next() != iter->offset){
// These are not contiguous areas, add it to list
if(0 < tmppage.bytes){
if( (!ignore_load && (tmppage.loaded != iter->loaded )) ||
(!ignore_modify && (tmppage.modified != iter->modified)) )
{
// Different from the previous area, add it to list
raw_add_compress_fdpage_list(compressed_pages, tmppage, ignore_load, ignore_modify, default_load, default_modify);
// keep current area
tmppage = fdpage(iter->offset, iter->bytes, (ignore_load ? default_load : iter->loaded), (ignore_modify ? default_modify : iter->modified));
}else{
// These are contiguous areas
// Same as the previous area
if(tmppage.next() != iter->offset){
// These are not contiguous areas, add it to list
raw_add_compress_fdpage_list(compressed_pages, tmppage, ignore_load, ignore_modify, default_load, default_modify);
// add current area
tmppage.bytes += iter->bytes;
// keep current area
tmppage = fdpage(iter->offset, iter->bytes, (ignore_load ? default_load : iter->loaded), (ignore_modify ? default_modify : iter->modified));
}else{
// These are contiguous areas
// add current area
tmppage.bytes += iter->bytes;
}
}
}else{
// if found empty page, skip it
tmppage = fdpage(iter->offset, iter->bytes, (ignore_load ? default_load : iter->loaded), (ignore_modify ? default_modify : iter->modified));
}
}else{
// first erea
@ -103,7 +110,13 @@ static fdpage_list_t raw_compress_fdpage_list(const fdpage_list_t& pages, bool i
}
// add last area
if(!is_first){
raw_add_compress_fdpage_list(compressed_pages, tmppage, ignore_load, ignore_modify, default_load, default_modify);
// [NOTE]
// Zero size pages are not allowed. However, if it is the only one, allow it.
// This is a special process that exists only to create empty files.
//
if(compressed_pages.empty() || 0 != tmppage.bytes){
raw_add_compress_fdpage_list(compressed_pages, tmppage, ignore_load, ignore_modify, default_load, default_modify);
}
}
return compressed_pages;
}
@ -367,7 +380,7 @@ void PageList::Clear()
bool PageList::Init(off_t size, bool is_loaded, bool is_modified)
{
Clear();
if(0 < size){
if(0 <= size){
fdpage page(0, size, is_loaded, is_modified);
pages.push_back(page);
}

View File

@ -1321,6 +1321,32 @@ function test_cache_file_stat() {
rm_test_file "${BIG_FILE}"
}
function test_zero_cache_file_stat() {
describe "Test zero byte cache file stat ..."
rm_test_file ${TEST_TEXT_FILE}
#
# create empty file
#
touch ${TEST_TEXT_FILE}
#
# get "testrun-xxx" directory name
#
CACHE_TESTRUN_DIR=$(ls -1 ${CACHE_DIR}/${TEST_BUCKET_1}/ 2>/dev/null | grep testrun 2>/dev/null)
# [NOTE]
# The stat file is a one-line text file, expecting for "<inode>:0"(ex. "4543937: 0").
#
head -1 ${CACHE_DIR}/.${TEST_BUCKET_1}.stat/${CACHE_TESTRUN_DIR}/${TEST_TEXT_FILE} 2>/dev/null | grep -q ':0$' 2>/dev/null
if [ $? -ne 0 ]; then
echo "The cache file stat after creating an empty file is incorrect : ${CACHE_DIR}/.${TEST_BUCKET_1}.stat/${CACHE_TESTRUN_DIR}/${TEST_TEXT_FILE}"
return 1;
fi
rm_test_file ${TEST_TEXT_FILE}
}
function test_upload_sparsefile {
describe "Testing upload sparse file ..."
@ -1462,6 +1488,7 @@ function test_ut_ossfs {
function add_all_tests {
if ps u $S3FS_PID | grep -q use_cache; then
add_tests test_cache_file_stat
add_tests test_zero_cache_file_stat
fi
if ! ps u $S3FS_PID | grep -q ensure_diskfree && ! uname | grep -q Darwin; then
add_tests test_clean_up_cache