s3fs-fuse/test/small-integration-test.sh

66 lines
1.4 KiB
Bash
Raw Normal View History

#!/bin/bash
#
# Test s3fs-fuse file system operations with
#
set -o errexit
2019-07-03 05:31:48 +00:00
set -o pipefail
# Require root
REQUIRE_ROOT=require-root.sh
#source $REQUIRE_ROOT
source integration-test-common.sh
CACHE_DIR="/tmp/s3fs-cache"
rm -rf "${CACHE_DIR}"
mkdir "${CACHE_DIR}"
#reserve 200MB for data cache
source test-utils.sh
CACHE_DISK_AVAIL_SIZE=`get_disk_avail_size $CACHE_DIR`
2020-02-04 14:00:47 +00:00
if [ `uname` = "Darwin" ]; then
# [FIXME]
# Only on MacOS, there are cases where process or system
# other than the s3fs cache uses disk space.
# We can imagine that this is caused by Timemachine, but
# there is no workaround, so s3fs cache size is set +1gb
# for error bypass.
#
ENSURE_DISKFREE_SIZE=$((CACHE_DISK_AVAIL_SIZE - 1200))
else
ENSURE_DISKFREE_SIZE=$((CACHE_DISK_AVAIL_SIZE - 200))
fi
export CACHE_DIR
export ENSURE_DISKFREE_SIZE
FLAGS=(
"use_cache=${CACHE_DIR} -o ensure_diskfree=${ENSURE_DISKFREE_SIZE}"
enable_content_md5
2019-08-01 19:39:11 +00:00
enable_noobj_cache
2020-04-11 09:59:24 +00:00
max_stat_cache_size=100
nocopyapi
nomultipart
notsup_compat_dir
sigv2
singlepart_copy_limit=$((10 * 1024)) # limit size to exercise multipart code paths
#use_sse # TODO: S3Proxy does not support SSE
)
start_s3proxy
for flag in "${FLAGS[@]}"; do
echo "testing s3fs flag: $flag"
start_s3fs -o $flag
./integration-test-main.sh
stop_s3fs
done
stop_s3proxy
echo "$0: tests complete."