use correct content-type when overwriting and copying files

This commit is contained in:
liuyongqing 2019-08-29 13:25:09 +08:00 committed by Andrew Gaul
parent 0536dc1112
commit ae4bcd405c
3 changed files with 37 additions and 15 deletions

View File

@ -2961,13 +2961,14 @@ int S3fsCurl::PutHeadRequest(const char* tpath, headers_t& meta, bool is_copy)
responseHeaders.clear();
bodydata.Clear();
string contype = S3fsCurl::LookupMimeType(string(tpath));
requestHeaders = curl_slist_sort_insert(requestHeaders, "Content-Type", contype.c_str());
// Make request headers
for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){
string key = lower(iter->first);
string value = iter->second;
if(key == "content-type"){
requestHeaders = curl_slist_sort_insert(requestHeaders, iter->first.c_str(), value.c_str());
}else if(key.substr(0, 9) == "x-amz-acl"){
if(key.substr(0, 9) == "x-amz-acl"){
// not set value, but after set it.
}else if(key.substr(0, 10) == "x-amz-meta"){
requestHeaders = curl_slist_sort_insert(requestHeaders, iter->first.c_str(), value.c_str());
@ -3106,12 +3107,13 @@ int S3fsCurl::PutRequest(const char* tpath, headers_t& meta, int fd)
requestHeaders = curl_slist_sort_insert(requestHeaders, "Content-MD5", strMD5.c_str());
}
string contype = S3fsCurl::LookupMimeType(string(tpath));
requestHeaders = curl_slist_sort_insert(requestHeaders, "Content-Type", contype.c_str());
for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){
string key = lower(iter->first);
string value = iter->second;
if(key == "content-type"){
requestHeaders = curl_slist_sort_insert(requestHeaders, iter->first.c_str(), value.c_str());
}else if(key.substr(0, 9) == "x-amz-acl"){
if(key.substr(0, 9) == "x-amz-acl"){
// not set value, but after set it.
}else if(key.substr(0, 10) == "x-amz-meta"){
requestHeaders = curl_slist_sort_insert(requestHeaders, iter->first.c_str(), value.c_str());
@ -3714,13 +3716,14 @@ int S3fsCurl::CopyMultipartPostSetup(const char* from, const char* to, int part_
bodydata.Clear();
headdata.Clear();
string contype = S3fsCurl::LookupMimeType(string(to));
requestHeaders = curl_slist_sort_insert(requestHeaders, "Content-Type", contype.c_str());
// Make request headers
for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){
string key = lower(iter->first);
string value = iter->second;
if(key == "content-type"){
requestHeaders = curl_slist_sort_insert(requestHeaders, iter->first.c_str(), value.c_str());
}else if(key == "x-amz-copy-source"){
if(key == "x-amz-copy-source"){
requestHeaders = curl_slist_sort_insert(requestHeaders, iter->first.c_str(), value.c_str());
}else if(key == "x-amz-copy-source-range"){
requestHeaders = curl_slist_sort_insert(requestHeaders, iter->first.c_str(), value.c_str());

View File

@ -74,6 +74,12 @@ function test_mv_file {
echo "Could not move file"
return 1
fi
#check the renamed file content-type
if [ -f "/etc/mime.types" ]
then
check_content_type "$1/$ALT_TEST_TEXT_FILE" "text/plain"
fi
# Check the contents of the alt file
ALT_FILE_LENGTH=`wc -c $ALT_TEST_TEXT_FILE | awk '{print $1}'`
@ -282,7 +288,7 @@ function test_external_modification {
echo "old" > ${TEST_TEXT_FILE}
OBJECT_NAME="$(basename $PWD)/${TEST_TEXT_FILE}"
sleep 2
echo "new new" | aws_cli cp - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}"
echo "new new" | aws_cli s3 cp - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}"
cmp ${TEST_TEXT_FILE} <(echo "new new")
rm -f ${TEST_TEXT_FILE}
}
@ -291,7 +297,7 @@ function test_read_external_object() {
describe "create objects via aws CLI and read via s3fs"
OBJECT_NAME="$(basename $PWD)/${TEST_TEXT_FILE}"
sleep 3
echo "test" | aws_cli cp - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}"
echo "test" | aws_cli s3 cp - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}"
cmp ${TEST_TEXT_FILE} <(echo "test")
rm -f ${TEST_TEXT_FILE}
}
@ -343,6 +349,9 @@ function test_multipart_copy {
return 1
fi
#check the renamed file content-type
check_content_type "$1/${BIG_FILE}-copy" "application/octet-stream"
rm -f "/tmp/${BIG_FILE}"
rm_test_file "${BIG_FILE}-copy"
}

View File

@ -142,7 +142,8 @@ function cd_run_dir {
echo "TEST_BUCKET_MOUNT_POINT variable not set"
exit 1
fi
RUN_DIR=$(mktemp -d ${TEST_BUCKET_MOUNT_POINT_1}/testrun-XXXXXX)
RUN_DIR=${TEST_BUCKET_MOUNT_POINT_1}/${1}
mkdir -p ${RUN_DIR}
cd ${RUN_DIR}
}
@ -191,7 +192,8 @@ function describe {
# made after the test run.
function run_suite {
orig_dir=$PWD
cd_run_dir
key_prefix="testrun-$RANDOM"
cd_run_dir $key_prefix
for t in "${TEST_LIST[@]}"; do
# The following sequence runs tests in a subshell to allow continuation
# on test failure, but still allowing errexit to be in effect during
@ -202,7 +204,7 @@ function run_suite {
# Other ways of trying to capture the return value will also disable
# errexit in the function due to bash... compliance with POSIX?
set +o errexit
(set -o errexit; $t)
(set -o errexit; $t $key_prefix)
if [[ $? == 0 ]]; then
report_pass $t
else
@ -247,7 +249,15 @@ function get_mtime() {
stat -c %Y "$1"
fi
}
function check_content_type() {
INFO_STR=`aws_cli s3api head-object --bucket ${TEST_BUCKET_1} --key $1`
if [[ "${INFO_STR}" != *"$2"* ]]
then
echo "moved file content-type is not as expected expected:$2 got:${INFO_STR}"
exit 1
fi
}
function aws_cli() {
AWS_ACCESS_KEY_ID=local-identity AWS_SECRET_ACCESS_KEY=local-credential aws s3 --endpoint-url "${S3_URL}" --no-verify-ssl $*
AWS_ACCESS_KEY_ID=local-identity AWS_SECRET_ACCESS_KEY=local-credential aws $* --endpoint-url "${S3_URL}" --no-verify-ssl
}