Individually test multiple s3fs flags

Remove unneeded comments; single part limits ensure that the tests
exercise multipart code paths even with smaller files.
References #971.
This commit is contained in:
Andrew Gaul 2019-06-12 16:35:26 -07:00
parent 21321a9d96
commit 4f23f38583
4 changed files with 29 additions and 13 deletions

View File

@ -983,6 +983,9 @@ static int do_create_bucket()
// retry to check
s3fscurl.DestroyCurlHandle();
res = s3fscurl.PutRequest("/", meta, tmpfd);
}else if(responseCode == 409){
// bucket already exists
res = 0;
}
}
if(ptmpfp != NULL){

View File

@ -135,7 +135,6 @@ function stop_s3proxy {
# Mount the bucket, function arguments passed to s3fs in addition to
# a set of common arguments.
function start_s3fs {
# Public bucket if PUBLIC is set
if [ -n "${PUBLIC}" ]; then
AUTH_OPT="-o public_bucket=1"

View File

@ -1,5 +1,5 @@
s3proxy.secure-endpoint=https://127.0.0.1:8080
s3proxy.authorization=aws-v4
s3proxy.authorization=aws-v2-or-v4
s3proxy.identity=local-identity
s3proxy.credential=local-credential
s3proxy.keystore-path=keystore.jks

View File

@ -12,19 +12,33 @@ REQUIRE_ROOT=require-root.sh
source integration-test-common.sh
CACHE_DIR="/tmp/s3fs-cache"
rm -rf "${CACHE_DIR}"
mkdir "${CACHE_DIR}"
FLAGS=(
enable_content_md5
nocopyapi
nomultipart
notsup_compat_dir
sigv2
singlepart_copy_limit=$((10 * 1024)) # limit size to exercise multipart code paths
use_cache="${CACHE_DIR}"
#use_sse # TODO: S3Proxy does not support SSE
)
start_s3proxy
#
# enable_content_md5
# Causes s3fs to validate file contents. This isn't included in the common
# options used by start_s3fs because tests may be performance tests
# singlepart_copy_limit
# Appeared in upstream s3fs-fuse tests, possibly a limitation of S3Proxy
# TODO: github archaeology to see why it was added.
#
start_s3fs -o enable_content_md5 \
-o singlepart_copy_limit=$((10 * 1024))
for flag in ${FLAGS[*]}; do
echo "testing s3fs flag: $flag"
start_s3fs -o $flag
./integration-test-main.sh
stop_s3fs
done
stop_s3proxy
echo "$0: tests complete."