2011-03-01 19:35:55 +00:00
|
|
|
/*
|
|
|
|
* s3fs - FUSE-based file system backed by Amazon S3
|
|
|
|
*
|
2017-05-07 11:24:17 +00:00
|
|
|
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
2011-03-01 19:35:55 +00:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version 2
|
|
|
|
* of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <time.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <unistd.h>
|
2013-08-19 06:29:24 +00:00
|
|
|
#include <stdint.h>
|
2011-03-01 19:35:55 +00:00
|
|
|
#include <fcntl.h>
|
|
|
|
#include <dirent.h>
|
|
|
|
#include <errno.h>
|
|
|
|
#include <syslog.h>
|
|
|
|
#include <pthread.h>
|
Fixed Issue 229 and Changes codes
1) Set metadata "Content-Encoding" automatically(Issue 292)
For this issue, s3fs is added new option "ahbe_conf".
New option means the configuration file path, and this file specifies
additional HTTP header by file(object) extension.
Thus you can specify any HTTP header for each object by extension.
* ahbe_conf file format:
-----------
line = [file suffix] HTTP-header [HTTP-header-values]
file suffix = file(object) suffix, if this field is empty,
it means "*"(all object).
HTTP-header = additional HTTP header name
HTTP-header-values = additional HTTP header value
-----------
* Example:
-----------
.gz Content-Encoding gzip
.Z Content-Encoding compress
X-S3FS-MYHTTPHEAD myvalue
-----------
A sample configuration file is uploaded in "test" directory.
If ahbe_conf parameter is specified, s3fs loads it's configuration
and compares extension(suffix) of object(file) when uploading
(PUT/POST) it. If the extension is same, s3fs adds/sends specified
HTTP header and value.
A case of sample configuration file, if a object(it's extension is
".gz") which already has Content-Encoding HTTP header is renamed
to ".txt" extension, s3fs does not set Content-Encoding. Because
".txt" is not match any line in configuration file.
So, s3fs matches the extension by each PUT/POST action.
* Please take care about "Content-Encoding".
This new option allows setting ANY HTTP header by object extension.
For example, you can specify "Content-Encoding" for ".gz"/etc
extension in configuration. But this means that S3 always returns
"Content-Encoding: gzip" when a client requests with other
"Accept-Encoding:" header. It SHOULD NOT be good.
Please see RFC 2616.
2) Changes about allow_other/uid/gid option for mount point
I reviewed about mount point permission and allow_other/uid/gid
options, and found bugs about these.
s3fs is fixed bugs and changed to the following specifications.
* s3fs only allows uid(gid) options as 0(root), when the effective
user is zero(root).
* A mount point(directory) must have a permission to allow
accessing by effective user/group.
* If allow_other option is specified, the mount point permission
is set 0777(all users allow all access).
In another case, the mount point is set 0700(only allows
effective user).
* When uid/gid option is specified, the mount point owner/group
is set uid/gid option value.
If uid/gid is not set, it is set effective user/group id.
This changes maybe fixes some issue(321, 338).
3) Changes a logic about (Issue 229)
The chmod command returns -EIO when changing the mount point.
It is correct, s3fs can not changed owner/group/mtime for the
mount point, but s3fs sends a request for changing the bucket.
This revision does not send the request, and returns EIO as
soon as possible.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@465 df820570-a93a-0410-bd06-b72b767a4274
2013-08-16 19:24:01 +00:00
|
|
|
#include <assert.h>
|
2011-03-01 19:35:55 +00:00
|
|
|
#include <curl/curl.h>
|
2013-07-05 02:28:31 +00:00
|
|
|
#include <libxml/xpath.h>
|
|
|
|
#include <libxml/xpathInternals.h>
|
|
|
|
#include <libxml/tree.h>
|
2011-03-01 19:35:55 +00:00
|
|
|
#include <iostream>
|
|
|
|
#include <fstream>
|
2017-11-18 18:10:29 +00:00
|
|
|
#include <sstream>
|
2011-03-01 19:35:55 +00:00
|
|
|
#include <string>
|
|
|
|
#include <map>
|
Summary of Changes(1.63 -> 1.64)
* This new version was made for fixing big issue about directory object.
Please be careful and review new s3fs.
==========================
List of Changes
==========================
1) Fixed bugs
Fixed some memory leak and un-freed curl handle.
Fixed codes with a bug which is not found yet.
Fixed a bug that the s3fs could not update object's mtime when the s3fs had a opened file descriptor.
Please let us know a bug, when you find new bug of a memory leak.
2) Changed codes
Changed codes of s3fs_readdir() and list_bucket() etc.
Changed codes so that the get_realpath() function returned std::string.
Changed codes about exit() function. Because the exit() function is called from many fuse callback function directly, these function called fuse_exit() function and retuned with error.
Changed codes so that the case of the characters for the "x-amz-meta" response header is ignored.
3) Added a option
Added the norenameapi option for the storage compatible with S3 without copy API.
This option is subset of nocopyapi option.
Please read man page or call with --help option.
4) Object for directory
This is very big and important change.
The object of directory is changed "dir/" instead of "dir" for being compatible with other S3 client applications.
And this version understands the object of directory which is made by old version.
If the new s3fs changes the attributes or owner/group or mtime of the directory object, the s3fs automatically changes the object from old object name("dir") to new("dir/").
If you need to change old object name("dir") to new("dir/") manually, you can use shell script(mergedir.sh) in test directory.
* About the directory object name
AWS S3 allows the object name as both "dir" and "dir/".
The s3fs before this version understood only "dir" as directory object name, but old version did not understand the "dir/" object name.
The new version understands both of "dir" and "dir/" object name.
The s3fs user needs to be care for the special situation that I mentioned later.
The new version deletes old "dir" object and makes new "dir/" object, when the user operates the directory object for changing the permission or owner/group or mtime.
This operation does on background and automatically.
If you need to merge manually, you can use shell script which is mergedir.sh in test directory.
This script runs chmod/chown/touch commands after finding a directory.
Other S3 client application makes a directory object("dir/") without meta information which is needed to understand by the s3fs, this script can add meta information for a directory object.
If this script function is insufficient for you, you can read and modify the codes by yourself.
Please use the shell script carefully because of changing the object.
If you find a bug in this script, please let me know.
* Details
** The directory object made by old version
The directory object made by old version is not understood by other S3 client application.
New s3fs version was updated for keeping compatibility with other clients.
You can use the mergedir.sh in test directory for merging from old directory object("dir") to new("dir/").
The directory object name is changed from "dir" to "dir/" after the mergedir.sh is run, this changed "dir/" object is understood by other S3 clients.
This script runs chmod/chown/chgrp/touch/etc commands against the old directory object("dir"), then new s3fs merges that directory automatically.
If you need to change directory object from old to new manually, you can do it by running these commands which change the directory attributes(mode/owner/group/mtime).
** The directory object made by new version
The directory object name made by new version is "dir/".
Because the name includes "/", other S3 client applications understand it as the directory.
I tested new directory by s3cmd/tntDrive/DragonDisk/Gladinet as other S3 clients, the result was good compatibility.
You need to know that the compatibility has small problem by the difference in specifications between clients.
And you need to be careful about that the old s3fs can not understand the directory object which made by new s3fs.
You should change all s3fs which accesses same bucket.
** The directory object made by other S3 client application
Because the object is determined as a directory by the s3fs, the s3fs makes and uses special meta information which is "x-amz-meta-***" and "Content-Type" as HTTP header.
The s3fs sets and uses HTTP headers for the directory object, those headers are listed below.
Content-Type: application/x-directory
x-amz-meta-mode: <mode>
x-amz-meta-uid: <UID>
x-amz-meta-gid <GID>
x-amz-meta-mtime: <unix time of modified file>
Other S3 client application builds the directory object without attributes which is needed by the s3fs.
When the "ls" command is run on the s3fs-fuse file system which has directories/files made by other S3 clients, this result is shown below.
d--------- 1 root root 0 Feb 27 11:21 dir
---------- 1 root root 1024 Mar 14 02:15 file
Because the objects don't have meta information("x-amz-meta-mode"), it means mode=0000.
In this case, the directory object is shown only "d", because the s3fs determines the object as a directory when the object is the name with "/" or has "Content-type: application/x-directory" header.
(The s3fs sets "Content-Type: application/x-directory" to the directory object, but other S3 clients set "binary/octet-stream".)
In that result, nobody without root is allowed to operate the object.
The owner and group are "root"(UID=0) because the object doesn't have "x-amz-meta-uid/gid".
If the object doesn't have "x-amz-meta-mtime", the s3fs uses "Last-Modified" HTTP header.
Therefore the object's mtime is "Last-Modified" value.(This logic is same as old version)
It has been already explained, if you need to change the object attributes, you can do it by manually operation or mergedir.sh.
* Example of the compatibility with s3cmd etc
** Case A) Only "dir/file" object
One of case, there is only "dir/file" object without "dir/" object, that object is made by s3cmd or etc.
In this case, the response of REST API(list bucket) with "delimiter=/" parameter has "CommonPrefixes", and the "dir/" is listed in "CommonPrefixes/Prefix", but the "dir/" object is not real object.
The s3fs needs to determine this object as directory, however there is no real directory object("dir" or "dir/").
But both new s3fs and old one does NOT understand this "dir/" in "CommonPrefixes", because the s3fs fails to get meta information from "dir" or "dir/".
On this case, the result of "ls" command is shown below.
??????????? ? ? ? ? ? dir
This "dir" is not operated by anyone and any process, because the s3fs does not understand this object permission.
And "dir/file" object can not be shown and operated too.
Some other S3 clients(tntDrive/Gladinet/etc) can not understand this object as same as the s3fs.
If you need to operate "dir/file" object, you need to make the "dir/" object as a directory.
To make the "dir/" directory object, you need to do below.
Because there is already the "dir" object which is not real object, you can not make "dir/" directory.
(s3cmd does not make "dir/" object because the object name has "/".).
You should make another name directory(ex: "dir2/"), and move the "dir/file" objects to in new directory.
Last, you can rename the directory name from "dir2/" to "dir/".
** Case B) Both "dir" and "dir/file" object
This case is that there are "dir" and "dir/file" objects which were made by s3cmd/etc.
s3cmd and s3fs understand the "dir" object as normal(file) object because this object does not have meta information and a name with "/".
But the result of REST API(list bucket) has "dir/" name in "CommonPrefixes/Prefix".
The s3fs checks "dir/" and "dir" as a directory, but the "dir" object is not directory object.
(Because the new s3fs need to compatible old version, the s3fs checks a directory object in order of "dir/", "dir")
In this case, the result of "ls" command is shown below.
---------- 1 root root 0 Feb 27 02:48 dir
As a result, the "dir/file" can not be shown and operated because the "dir" object is a file.
If you determine the "dir" as a directory, you need to add mete information to the "dir" object by s3cmd.
** Case C) Both "dir" and "dir/" object
Last case is that there are "dir" and "dir/" objects which were made by other S3 clients.
(example: At first you upload a object "dir/" as a directory by new 3sfs, and you upload a object "dir" by s3cmd.)
New s3fs determines "dir/" as a directory, because the s3fs searches in oder of "dir/", "dir".
As a result, the "dir" object can not be shown and operated.
** Compatibility between S3 clients
Both new and old s3fs do not understand both "dir" and "dir/" at the same time, tntDrive and Galdinet are same as the s3fs.
If there are "dir/" and "dir" objects, the s3fs gives priority to "dir/".
But s3cmd and DragonDisk understand both objects.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@392 df820570-a93a-0410-bd06-b72b767a4274
2013-03-23 14:04:07 +00:00
|
|
|
#include <algorithm>
|
2013-05-27 02:22:47 +00:00
|
|
|
#include <list>
|
2013-07-05 02:28:31 +00:00
|
|
|
#include <vector>
|
2011-03-01 19:35:55 +00:00
|
|
|
|
2013-03-30 13:37:14 +00:00
|
|
|
#include "common.h"
|
2011-03-01 19:35:55 +00:00
|
|
|
#include "curl.h"
|
2011-08-31 22:20:20 +00:00
|
|
|
#include "string_util.h"
|
2013-04-11 01:49:00 +00:00
|
|
|
#include "s3fs.h"
|
2013-05-27 02:22:47 +00:00
|
|
|
#include "s3fs_util.h"
|
2014-05-06 14:23:05 +00:00
|
|
|
#include "s3fs_auth.h"
|
2016-02-07 05:41:56 +00:00
|
|
|
#include "addhead.h"
|
2011-03-01 19:35:55 +00:00
|
|
|
|
|
|
|
using namespace std;
|
|
|
|
|
2015-04-09 20:52:04 +00:00
|
|
|
static const std::string empty_payload_hash = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855";
|
|
|
|
|
2014-07-19 19:02:55 +00:00
|
|
|
//-------------------------------------------------------------------
|
|
|
|
// Utilities
|
|
|
|
//-------------------------------------------------------------------
|
|
|
|
// [TODO]
|
2014-04-05 05:11:55 +00:00
|
|
|
// This function uses temporary file, but should not use it.
|
2014-07-19 19:02:55 +00:00
|
|
|
// For not using it, we implement function in each auth file(openssl, nss. gnutls).
|
|
|
|
//
|
|
|
|
static bool make_md5_from_string(const char* pstr, string& md5)
|
|
|
|
{
|
|
|
|
if(!pstr || '\0' == pstr[0]){
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("Parameter is wrong.");
|
2014-07-19 19:02:55 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
FILE* fp;
|
|
|
|
if(NULL == (fp = tmpfile())){
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("Could not make tmpfile.");
|
2014-07-19 19:02:55 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
size_t length = strlen(pstr);
|
|
|
|
if(length != fwrite(pstr, sizeof(char), length, fp)){
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("Failed to write tmpfile.");
|
2014-07-19 19:02:55 +00:00
|
|
|
fclose(fp);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
int fd;
|
|
|
|
if(0 != fflush(fp) || 0 != fseek(fp, 0L, SEEK_SET) || -1 == (fd = fileno(fp))){
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("Failed to make MD5.");
|
2014-07-19 19:02:55 +00:00
|
|
|
fclose(fp);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
// base64 md5
|
|
|
|
md5 = s3fs_get_content_md5(fd);
|
|
|
|
if(0 == md5.length()){
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("Failed to make MD5.");
|
2014-07-19 19:02:55 +00:00
|
|
|
fclose(fp);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
fclose(fp);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-04-09 20:52:04 +00:00
|
|
|
static string url_to_host(const std::string &url)
|
|
|
|
{
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_INFO3("url is %s", url.c_str());
|
2015-04-09 20:52:04 +00:00
|
|
|
|
2015-09-30 19:41:27 +00:00
|
|
|
static const string http = "http://";
|
|
|
|
static const string https = "https://";
|
|
|
|
std::string host;
|
2015-04-09 20:52:04 +00:00
|
|
|
|
2015-09-30 19:41:27 +00:00
|
|
|
if (url.compare(0, http.size(), http) == 0) {
|
|
|
|
host = url.substr(http.size());
|
|
|
|
} else if (url.compare(0, https.size(), https) == 0) {
|
|
|
|
host = url.substr(https.size());
|
|
|
|
} else {
|
|
|
|
assert(!"url does not begin with http:// or https://");
|
|
|
|
}
|
2015-04-09 20:52:04 +00:00
|
|
|
|
2015-09-30 19:41:27 +00:00
|
|
|
size_t idx;
|
2016-01-11 00:52:04 +00:00
|
|
|
if ((idx = host.find('/')) != string::npos) {
|
2015-09-30 19:41:27 +00:00
|
|
|
return host.substr(0, idx);
|
|
|
|
} else {
|
|
|
|
return host;
|
|
|
|
}
|
2015-04-09 20:52:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static string get_bucket_host()
|
|
|
|
{
|
2015-04-19 13:29:01 +00:00
|
|
|
if(!pathrequeststyle){
|
2015-04-09 20:52:04 +00:00
|
|
|
return bucket + "." + url_to_host(host);
|
2015-04-19 13:29:01 +00:00
|
|
|
}
|
2016-01-11 00:52:04 +00:00
|
|
|
return url_to_host(host);
|
2015-04-09 20:52:04 +00:00
|
|
|
}
|
|
|
|
|
2016-11-23 20:27:15 +00:00
|
|
|
// compare ETag ignoring quotes
|
|
|
|
static bool etag_equals(std::string s1, std::string s2) {
|
|
|
|
if(s1.length() > 1 && s1[0] == '\"' && s1[s1.length() - 1] == '\"'){
|
|
|
|
s1 = s1.substr(1, s1.size() - 2);
|
|
|
|
}
|
|
|
|
if(s2.length() > 1 && s2[0] == '\"' && s2[s2.length() - 1] == '\"'){
|
|
|
|
s2 = s2.substr(1, s2.size() - 2);
|
|
|
|
}
|
|
|
|
return s1 == s2;
|
|
|
|
}
|
|
|
|
|
2014-08-26 17:11:10 +00:00
|
|
|
#if 0 // noused
|
2014-07-19 19:02:55 +00:00
|
|
|
static string tolower_header_name(const char* head)
|
|
|
|
{
|
|
|
|
string::size_type pos;
|
|
|
|
string name = head;
|
|
|
|
string value("");
|
|
|
|
if(string::npos != (pos = name.find(':'))){
|
|
|
|
value= name.substr(pos);
|
|
|
|
name = name.substr(0, pos);
|
|
|
|
}
|
|
|
|
name = lower(name);
|
|
|
|
name += value;
|
|
|
|
return name;
|
|
|
|
}
|
2014-08-26 17:11:10 +00:00
|
|
|
#endif
|
2014-07-19 19:02:55 +00:00
|
|
|
|
2013-04-11 01:49:00 +00:00
|
|
|
//-------------------------------------------------------------------
|
|
|
|
// Class BodyData
|
|
|
|
//-------------------------------------------------------------------
|
2017-11-18 03:50:36 +00:00
|
|
|
static const int BODYDATA_RESIZE_APPEND_MIN = 1024;
|
|
|
|
static const int BODYDATA_RESIZE_APPEND_MID = 1024 * 1024;
|
|
|
|
static const int BODYDATA_RESIZE_APPEND_MAX = 10 * 1024 * 1024;
|
|
|
|
|
|
|
|
static size_t adjust_block(size_t bytes, size_t block) { return ((bytes / block) + ((bytes % block) ? 1 : 0)) * block; }
|
2013-04-11 01:49:00 +00:00
|
|
|
|
|
|
|
bool BodyData::Resize(size_t addbytes)
|
|
|
|
{
|
|
|
|
if(IsSafeSize(addbytes)){
|
|
|
|
return true;
|
|
|
|
}
|
2013-09-14 21:50:39 +00:00
|
|
|
|
2013-04-11 01:49:00 +00:00
|
|
|
// New size
|
2017-11-18 03:50:36 +00:00
|
|
|
size_t need_size = adjust_block((lastpos + addbytes + 1) - bufsize, sizeof(off_t));
|
2013-09-14 21:50:39 +00:00
|
|
|
|
2013-04-11 01:49:00 +00:00
|
|
|
if(BODYDATA_RESIZE_APPEND_MAX < bufsize){
|
|
|
|
need_size = (BODYDATA_RESIZE_APPEND_MAX < need_size ? need_size : BODYDATA_RESIZE_APPEND_MAX);
|
|
|
|
}else if(BODYDATA_RESIZE_APPEND_MID < bufsize){
|
|
|
|
need_size = (BODYDATA_RESIZE_APPEND_MID < need_size ? need_size : BODYDATA_RESIZE_APPEND_MID);
|
|
|
|
}else if(BODYDATA_RESIZE_APPEND_MIN < bufsize){
|
|
|
|
need_size = ((bufsize * 2) < need_size ? need_size : (bufsize * 2));
|
|
|
|
}else{
|
|
|
|
need_size = (BODYDATA_RESIZE_APPEND_MIN < need_size ? need_size : BODYDATA_RESIZE_APPEND_MIN);
|
|
|
|
}
|
|
|
|
// realloc
|
2013-09-14 21:50:39 +00:00
|
|
|
char* newtext;
|
|
|
|
if(NULL == (newtext = (char*)realloc(text, (bufsize + need_size)))){
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_CRIT("not enough memory (realloc returned NULL)");
|
2013-09-14 21:50:39 +00:00
|
|
|
free(text);
|
|
|
|
text = NULL;
|
2013-04-11 01:49:00 +00:00
|
|
|
return false;
|
|
|
|
}
|
2013-09-14 21:50:39 +00:00
|
|
|
text = newtext;
|
2013-04-11 01:49:00 +00:00
|
|
|
bufsize += need_size;
|
2013-09-14 21:50:39 +00:00
|
|
|
|
2013-04-11 01:49:00 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void BodyData::Clear(void)
|
|
|
|
{
|
|
|
|
if(text){
|
|
|
|
free(text);
|
|
|
|
text = NULL;
|
|
|
|
}
|
|
|
|
lastpos = 0;
|
|
|
|
bufsize = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool BodyData::Append(void* ptr, size_t bytes)
|
|
|
|
{
|
|
|
|
if(!ptr){
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if(0 == bytes){
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
if(!Resize(bytes)){
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
memcpy(&text[lastpos], ptr, bytes);
|
|
|
|
lastpos += bytes;
|
|
|
|
text[lastpos] = '\0';
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
const char* BodyData::str(void) const
|
|
|
|
{
|
|
|
|
if(!text){
|
2018-02-28 12:06:06 +00:00
|
|
|
static const char* strnull = "";
|
2013-04-11 01:49:00 +00:00
|
|
|
return strnull;
|
|
|
|
}
|
|
|
|
return text;
|
|
|
|
}
|
|
|
|
|
2016-04-22 06:57:31 +00:00
|
|
|
//-------------------------------------------------------------------
|
|
|
|
// Class CurlHandlerPool
|
|
|
|
//-------------------------------------------------------------------
|
|
|
|
|
|
|
|
bool CurlHandlerPool::Init()
|
|
|
|
{
|
|
|
|
if (0 != pthread_mutex_init(&mLock, NULL)) {
|
|
|
|
S3FS_PRN_ERR("Init curl handlers lock failed");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
mHandlers = new CURL*[mMaxHandlers](); // this will init the array to 0
|
|
|
|
for (int i = 0; i < mMaxHandlers; ++i, ++mIndex) {
|
|
|
|
mHandlers[i] = curl_easy_init();
|
|
|
|
if (!mHandlers[i]) {
|
|
|
|
S3FS_PRN_ERR("Init curl handlers pool failed");
|
|
|
|
Destroy();
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool CurlHandlerPool::Destroy()
|
|
|
|
{
|
|
|
|
assert(mIndex >= -1 && mIndex < mMaxHandlers);
|
|
|
|
|
|
|
|
for (int i = 0; i <= mIndex; ++i) {
|
|
|
|
curl_easy_cleanup(mHandlers[i]);
|
|
|
|
}
|
|
|
|
delete[] mHandlers;
|
|
|
|
|
|
|
|
if (0 != pthread_mutex_destroy(&mLock)) {
|
|
|
|
S3FS_PRN_ERR("Destroy curl handlers lock failed");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
CURL* CurlHandlerPool::GetHandler()
|
|
|
|
{
|
|
|
|
CURL* h = NULL;
|
|
|
|
|
|
|
|
assert(mIndex >= -1 && mIndex < mMaxHandlers);
|
|
|
|
|
|
|
|
pthread_mutex_lock(&mLock);
|
|
|
|
if (mIndex >= 0) {
|
|
|
|
S3FS_PRN_DBG("Get handler from pool: %d", mIndex);
|
|
|
|
h = mHandlers[mIndex--];
|
|
|
|
}
|
|
|
|
pthread_mutex_unlock(&mLock);
|
|
|
|
|
|
|
|
if (!h) {
|
|
|
|
S3FS_PRN_INFO("Pool empty: create new handler");
|
|
|
|
h = curl_easy_init();
|
|
|
|
}
|
|
|
|
|
|
|
|
return h;
|
|
|
|
}
|
|
|
|
|
|
|
|
void CurlHandlerPool::ReturnHandler(CURL* h)
|
|
|
|
{
|
|
|
|
bool needCleanup = true;
|
|
|
|
|
|
|
|
assert(mIndex >= -1 && mIndex < mMaxHandlers);
|
|
|
|
|
|
|
|
pthread_mutex_lock(&mLock);
|
|
|
|
if (mIndex < mMaxHandlers - 1) {
|
|
|
|
mHandlers[++mIndex] = h;
|
2018-05-06 12:11:53 +00:00
|
|
|
curl_easy_reset(h);
|
2016-04-22 06:57:31 +00:00
|
|
|
needCleanup = false;
|
|
|
|
S3FS_PRN_DBG("Return handler to pool: %d", mIndex);
|
|
|
|
}
|
|
|
|
pthread_mutex_unlock(&mLock);
|
|
|
|
|
|
|
|
if (needCleanup) {
|
|
|
|
S3FS_PRN_INFO("Pool full: destroy the handler");
|
|
|
|
curl_easy_cleanup(h);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-03-30 13:37:14 +00:00
|
|
|
//-------------------------------------------------------------------
|
2013-07-05 02:28:31 +00:00
|
|
|
// Class S3fsCurl
|
|
|
|
//-------------------------------------------------------------------
|
2017-11-18 03:50:36 +00:00
|
|
|
static const int MULTIPART_SIZE = 10 * 1024 * 1024;
|
|
|
|
static const int MAX_MULTI_COPY_SOURCE_SIZE = 500 * 1024 * 1024;
|
|
|
|
|
|
|
|
static const int IAM_EXPIRE_MERGIN = 20 * 60; // update timing
|
|
|
|
static const std::string ECS_IAM_ENV_VAR = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI";
|
|
|
|
static const std::string IAMCRED_ACCESSKEYID = "AccessKeyId";
|
|
|
|
static const std::string IAMCRED_SECRETACCESSKEY = "SecretAccessKey";
|
|
|
|
static const std::string IAMCRED_ROLEARN = "RoleArn";
|
2013-10-06 13:45:32 +00:00
|
|
|
|
2015-03-21 07:04:20 +00:00
|
|
|
// [NOTICE]
|
|
|
|
// This symbol is for libcurl under 7.23.0
|
|
|
|
#ifndef CURLSHE_NOT_BUILT_IN
|
|
|
|
#define CURLSHE_NOT_BUILT_IN 5
|
|
|
|
#endif
|
|
|
|
|
2013-08-27 08:12:01 +00:00
|
|
|
pthread_mutex_t S3fsCurl::curl_handles_lock;
|
2013-09-14 21:50:39 +00:00
|
|
|
pthread_mutex_t S3fsCurl::curl_share_lock[SHARE_MUTEX_MAX];
|
2013-08-27 08:12:01 +00:00
|
|
|
bool S3fsCurl::is_initglobal_done = false;
|
2016-04-22 06:57:31 +00:00
|
|
|
CurlHandlerPool* S3fsCurl::sCurlPool = NULL;
|
|
|
|
int S3fsCurl::sCurlPoolSize = 32;
|
2013-08-27 08:12:01 +00:00
|
|
|
CURLSH* S3fsCurl::hCurlShare = NULL;
|
2015-05-20 15:32:36 +00:00
|
|
|
bool S3fsCurl::is_cert_check = true; // default
|
2013-08-27 08:12:01 +00:00
|
|
|
bool S3fsCurl::is_dns_cache = true; // default
|
2013-09-14 21:50:39 +00:00
|
|
|
bool S3fsCurl::is_ssl_session_cache= true; // default
|
2015-04-12 02:04:13 +00:00
|
|
|
long S3fsCurl::connect_timeout = 300; // default
|
|
|
|
time_t S3fsCurl::readwrite_timeout = 60; // default
|
2013-08-27 08:12:01 +00:00
|
|
|
int S3fsCurl::retries = 3; // default
|
|
|
|
bool S3fsCurl::is_public_bucket = false;
|
|
|
|
string S3fsCurl::default_acl = "private";
|
2015-09-17 20:10:45 +00:00
|
|
|
storage_class_t S3fsCurl::storage_class = STANDARD;
|
2014-07-19 19:02:55 +00:00
|
|
|
sseckeylist_t S3fsCurl::sseckeys;
|
2015-10-06 14:46:14 +00:00
|
|
|
std::string S3fsCurl::ssekmsid = "";
|
|
|
|
sse_type_t S3fsCurl::ssetype = SSE_DISABLE;
|
2013-08-27 08:12:01 +00:00
|
|
|
bool S3fsCurl::is_content_md5 = false;
|
|
|
|
bool S3fsCurl::is_verbose = false;
|
|
|
|
string S3fsCurl::AWSAccessKeyId;
|
|
|
|
string S3fsCurl::AWSSecretAccessKey;
|
2013-10-06 13:45:32 +00:00
|
|
|
string S3fsCurl::AWSAccessToken;
|
|
|
|
time_t S3fsCurl::AWSAccessTokenExpire= 0;
|
2017-11-05 19:24:02 +00:00
|
|
|
bool S3fsCurl::is_ecs = false;
|
2017-11-23 08:46:24 +00:00
|
|
|
bool S3fsCurl::is_ibm_iam_auth = false;
|
|
|
|
string S3fsCurl::IAM_cred_url = "http://169.254.169.254/latest/meta-data/iam/security-credentials/";
|
|
|
|
size_t S3fsCurl::IAM_field_count = 4;
|
|
|
|
string S3fsCurl::IAM_token_field = "Token";
|
|
|
|
string S3fsCurl::IAM_expiry_field = "Expiration";
|
2013-10-06 13:45:32 +00:00
|
|
|
string S3fsCurl::IAM_role;
|
2013-08-27 08:12:01 +00:00
|
|
|
long S3fsCurl::ssl_verify_hostname = 1; // default(original code...)
|
|
|
|
curltime_t S3fsCurl::curl_times;
|
|
|
|
curlprogress_t S3fsCurl::curl_progress;
|
|
|
|
string S3fsCurl::curl_ca_bundle;
|
|
|
|
mimes_t S3fsCurl::mimeTypes;
|
2017-09-17 09:16:05 +00:00
|
|
|
string S3fsCurl::userAgent;
|
2014-03-30 07:53:41 +00:00
|
|
|
int S3fsCurl::max_parallel_cnt = 5; // default
|
|
|
|
off_t S3fsCurl::multipart_size = MULTIPART_SIZE; // default
|
2015-01-28 17:13:11 +00:00
|
|
|
bool S3fsCurl::is_sigv4 = true; // default
|
2016-04-17 07:44:03 +00:00
|
|
|
bool S3fsCurl::is_ua = true; // default
|
2013-07-05 02:28:31 +00:00
|
|
|
|
|
|
|
//-------------------------------------------------------------------
|
|
|
|
// Class methods for S3fsCurl
|
2013-03-30 13:37:14 +00:00
|
|
|
//-------------------------------------------------------------------
|
2013-08-27 08:12:01 +00:00
|
|
|
bool S3fsCurl::InitS3fsCurl(const char* MimeFile)
|
2013-03-30 13:37:14 +00:00
|
|
|
{
|
2013-08-27 08:12:01 +00:00
|
|
|
if(0 != pthread_mutex_init(&S3fsCurl::curl_handles_lock, NULL)){
|
|
|
|
return false;
|
|
|
|
}
|
2013-09-14 21:50:39 +00:00
|
|
|
if(0 != pthread_mutex_init(&S3fsCurl::curl_share_lock[SHARE_MUTEX_DNS], NULL)){
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if(0 != pthread_mutex_init(&S3fsCurl::curl_share_lock[SHARE_MUTEX_SSL_SESSION], NULL)){
|
2013-08-27 08:12:01 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if(!S3fsCurl::InitMimeType(MimeFile)){
|
|
|
|
return false;
|
2013-07-05 02:28:31 +00:00
|
|
|
}
|
|
|
|
if(!S3fsCurl::InitGlobalCurl()){
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if(!S3fsCurl::InitShareCurl()){
|
|
|
|
return false;
|
|
|
|
}
|
2013-08-27 08:12:01 +00:00
|
|
|
if(!S3fsCurl::InitCryptMutex()){
|
|
|
|
return false;
|
|
|
|
}
|
2018-05-27 10:48:03 +00:00
|
|
|
sCurlPool = new CurlHandlerPool(sCurlPoolSize);
|
|
|
|
if (!sCurlPool->Init()) {
|
|
|
|
return false;
|
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
return true;
|
2013-03-30 13:37:14 +00:00
|
|
|
}
|
2011-08-31 22:20:20 +00:00
|
|
|
|
2013-08-27 08:12:01 +00:00
|
|
|
bool S3fsCurl::DestroyS3fsCurl(void)
|
2013-03-30 13:37:14 +00:00
|
|
|
{
|
2013-08-27 08:12:01 +00:00
|
|
|
int result = true;
|
2013-07-05 02:28:31 +00:00
|
|
|
|
2013-08-27 08:12:01 +00:00
|
|
|
if(!S3fsCurl::DestroyCryptMutex()){
|
2013-09-14 21:50:39 +00:00
|
|
|
result = false;
|
2013-08-27 08:12:01 +00:00
|
|
|
}
|
2018-05-21 10:20:09 +00:00
|
|
|
if(!sCurlPool->Destroy()){
|
2013-09-14 21:50:39 +00:00
|
|
|
result = false;
|
2013-07-05 02:28:31 +00:00
|
|
|
}
|
2018-05-27 10:48:03 +00:00
|
|
|
delete sCurlPool;
|
|
|
|
sCurlPool = NULL;
|
2018-05-21 10:20:09 +00:00
|
|
|
if(!S3fsCurl::DestroyShareCurl()){
|
2016-04-22 06:57:31 +00:00
|
|
|
result = false;
|
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
if(!S3fsCurl::DestroyGlobalCurl()){
|
2013-09-14 21:50:39 +00:00
|
|
|
result = false;
|
2013-07-05 02:28:31 +00:00
|
|
|
}
|
2013-09-14 21:50:39 +00:00
|
|
|
if(0 != pthread_mutex_destroy(&S3fsCurl::curl_share_lock[SHARE_MUTEX_DNS])){
|
|
|
|
result = false;
|
|
|
|
}
|
|
|
|
if(0 != pthread_mutex_destroy(&S3fsCurl::curl_share_lock[SHARE_MUTEX_SSL_SESSION])){
|
2013-08-27 08:12:01 +00:00
|
|
|
result = false;
|
|
|
|
}
|
|
|
|
if(0 != pthread_mutex_destroy(&S3fsCurl::curl_handles_lock)){
|
|
|
|
result = false;
|
2013-07-05 02:28:31 +00:00
|
|
|
}
|
|
|
|
return result;
|
2013-03-30 13:37:14 +00:00
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
bool S3fsCurl::InitGlobalCurl(void)
|
2013-06-01 15:31:31 +00:00
|
|
|
{
|
2013-07-05 02:28:31 +00:00
|
|
|
if(S3fsCurl::is_initglobal_done){
|
|
|
|
return false;
|
|
|
|
}
|
2013-06-01 15:31:31 +00:00
|
|
|
if(CURLE_OK != curl_global_init(CURL_GLOBAL_ALL)){
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("init_curl_global_all returns error.");
|
2013-06-01 15:31:31 +00:00
|
|
|
return false;
|
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
S3fsCurl::is_initglobal_done = true;
|
2013-06-01 15:31:31 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
bool S3fsCurl::DestroyGlobalCurl(void)
|
2013-06-01 15:31:31 +00:00
|
|
|
{
|
2013-07-05 02:28:31 +00:00
|
|
|
if(!S3fsCurl::is_initglobal_done){
|
|
|
|
return false;
|
|
|
|
}
|
2013-06-01 15:31:31 +00:00
|
|
|
curl_global_cleanup();
|
2013-07-05 02:28:31 +00:00
|
|
|
S3fsCurl::is_initglobal_done = false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool S3fsCurl::InitShareCurl(void)
|
|
|
|
{
|
|
|
|
CURLSHcode nSHCode;
|
|
|
|
|
2013-09-14 21:50:39 +00:00
|
|
|
if(!S3fsCurl::is_dns_cache && !S3fsCurl::is_ssl_session_cache){
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_INFO("Curl does not share DNS data.");
|
2013-09-14 21:50:39 +00:00
|
|
|
return true;
|
2013-07-05 02:28:31 +00:00
|
|
|
}
|
|
|
|
if(S3fsCurl::hCurlShare){
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_WARN("already initiated.");
|
2013-07-05 02:28:31 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if(NULL == (S3fsCurl::hCurlShare = curl_share_init())){
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("curl_share_init failed");
|
2013-07-05 02:28:31 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if(CURLSHE_OK != (nSHCode = curl_share_setopt(S3fsCurl::hCurlShare, CURLSHOPT_LOCKFUNC, S3fsCurl::LockCurlShare))){
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("curl_share_setopt(LOCKFUNC) returns %d(%s)", nSHCode, curl_share_strerror(nSHCode));
|
2013-07-05 02:28:31 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if(CURLSHE_OK != (nSHCode = curl_share_setopt(S3fsCurl::hCurlShare, CURLSHOPT_UNLOCKFUNC, S3fsCurl::UnlockCurlShare))){
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("curl_share_setopt(UNLOCKFUNC) returns %d(%s)", nSHCode, curl_share_strerror(nSHCode));
|
2013-07-05 02:28:31 +00:00
|
|
|
return false;
|
|
|
|
}
|
2014-12-24 06:31:54 +00:00
|
|
|
if(S3fsCurl::is_dns_cache){
|
2015-03-21 07:04:20 +00:00
|
|
|
nSHCode = curl_share_setopt(S3fsCurl::hCurlShare, CURLSHOPT_SHARE, CURL_LOCK_DATA_DNS);
|
|
|
|
if(CURLSHE_OK != nSHCode && CURLSHE_BAD_OPTION != nSHCode && CURLSHE_NOT_BUILT_IN != nSHCode){
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("curl_share_setopt(DNS) returns %d(%s)", nSHCode, curl_share_strerror(nSHCode));
|
2013-09-14 21:50:39 +00:00
|
|
|
return false;
|
2015-03-21 07:04:20 +00:00
|
|
|
}else if(CURLSHE_BAD_OPTION == nSHCode || CURLSHE_NOT_BUILT_IN == nSHCode){
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_WARN("curl_share_setopt(DNS) returns %d(%s), but continue without shared dns data.", nSHCode, curl_share_strerror(nSHCode));
|
2013-09-14 21:50:39 +00:00
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
}
|
2014-12-24 06:31:54 +00:00
|
|
|
if(S3fsCurl::is_ssl_session_cache){
|
2015-03-21 07:04:20 +00:00
|
|
|
nSHCode = curl_share_setopt(S3fsCurl::hCurlShare, CURLSHOPT_SHARE, CURL_LOCK_DATA_SSL_SESSION);
|
|
|
|
if(CURLSHE_OK != nSHCode && CURLSHE_BAD_OPTION != nSHCode && CURLSHE_NOT_BUILT_IN != nSHCode){
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("curl_share_setopt(SSL SESSION) returns %d(%s)", nSHCode, curl_share_strerror(nSHCode));
|
2013-09-14 21:50:39 +00:00
|
|
|
return false;
|
2015-03-21 07:04:20 +00:00
|
|
|
}else if(CURLSHE_BAD_OPTION == nSHCode || CURLSHE_NOT_BUILT_IN == nSHCode){
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_WARN("curl_share_setopt(SSL SESSION) returns %d(%s), but continue without shared ssl session data.", nSHCode, curl_share_strerror(nSHCode));
|
2013-09-14 21:50:39 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if(CURLSHE_OK != (nSHCode = curl_share_setopt(S3fsCurl::hCurlShare, CURLSHOPT_USERDATA, (void*)&S3fsCurl::curl_share_lock[0]))){
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("curl_share_setopt(USERDATA) returns %d(%s)", nSHCode, curl_share_strerror(nSHCode));
|
2013-07-05 02:28:31 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool S3fsCurl::DestroyShareCurl(void)
|
|
|
|
{
|
|
|
|
if(!S3fsCurl::hCurlShare){
|
2013-09-14 21:50:39 +00:00
|
|
|
if(!S3fsCurl::is_dns_cache && !S3fsCurl::is_ssl_session_cache){
|
|
|
|
return true;
|
2013-07-05 02:28:31 +00:00
|
|
|
}
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_WARN("already destroy share curl.");
|
2013-07-05 02:28:31 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if(CURLSHE_OK != curl_share_cleanup(S3fsCurl::hCurlShare)){
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
S3fsCurl::hCurlShare = NULL;
|
|
|
|
return true;
|
2013-06-01 15:31:31 +00:00
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
void S3fsCurl::LockCurlShare(CURL* handle, curl_lock_data nLockData, curl_lock_access laccess, void* useptr)
|
2013-05-22 08:49:23 +00:00
|
|
|
{
|
2013-09-14 21:50:39 +00:00
|
|
|
if(!hCurlShare){
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
pthread_mutex_t* lockmutex = static_cast<pthread_mutex_t*>(useptr);
|
|
|
|
if(CURL_LOCK_DATA_DNS == nLockData){
|
|
|
|
pthread_mutex_lock(&lockmutex[SHARE_MUTEX_DNS]);
|
|
|
|
}else if(CURL_LOCK_DATA_SSL_SESSION == nLockData){
|
|
|
|
pthread_mutex_lock(&lockmutex[SHARE_MUTEX_SSL_SESSION]);
|
2013-05-22 08:49:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
void S3fsCurl::UnlockCurlShare(CURL* handle, curl_lock_data nLockData, void* useptr)
|
2013-05-22 08:49:23 +00:00
|
|
|
{
|
2013-09-14 21:50:39 +00:00
|
|
|
if(!hCurlShare){
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
pthread_mutex_t* lockmutex = static_cast<pthread_mutex_t*>(useptr);
|
|
|
|
if(CURL_LOCK_DATA_DNS == nLockData){
|
|
|
|
pthread_mutex_unlock(&lockmutex[SHARE_MUTEX_DNS]);
|
|
|
|
}else if(CURL_LOCK_DATA_SSL_SESSION == nLockData){
|
|
|
|
pthread_mutex_unlock(&lockmutex[SHARE_MUTEX_SSL_SESSION]);
|
2013-05-22 08:49:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-08-27 08:12:01 +00:00
|
|
|
bool S3fsCurl::InitCryptMutex(void)
|
|
|
|
{
|
2014-05-06 14:23:05 +00:00
|
|
|
return s3fs_init_crypt_mutex();
|
2013-08-27 08:12:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool S3fsCurl::DestroyCryptMutex(void)
|
|
|
|
{
|
2014-05-06 14:23:05 +00:00
|
|
|
return s3fs_destroy_crypt_mutex();
|
2013-08-27 08:12:01 +00:00
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
// homegrown timeout mechanism
|
|
|
|
int S3fsCurl::CurlProgress(void *clientp, double dltotal, double dlnow, double ultotal, double ulnow)
|
2013-05-22 08:49:23 +00:00
|
|
|
{
|
2013-07-05 02:28:31 +00:00
|
|
|
CURL* curl = static_cast<CURL*>(clientp);
|
|
|
|
time_t now = time(0);
|
|
|
|
progress_t p(dlnow, ulnow);
|
2013-05-22 08:49:23 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
pthread_mutex_lock(&S3fsCurl::curl_handles_lock);
|
|
|
|
|
|
|
|
// any progress?
|
|
|
|
if(p != S3fsCurl::curl_progress[curl]){
|
|
|
|
// yes!
|
|
|
|
S3fsCurl::curl_times[curl] = now;
|
|
|
|
S3fsCurl::curl_progress[curl] = p;
|
|
|
|
}else{
|
|
|
|
// timeout?
|
|
|
|
if(now - S3fsCurl::curl_times[curl] > readwrite_timeout){
|
|
|
|
pthread_mutex_unlock(&S3fsCurl::curl_handles_lock);
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("timeout now: %jd, curl_times[curl]: %jd, readwrite_timeout: %jd",
|
2013-08-19 06:29:24 +00:00
|
|
|
(intmax_t)now, (intmax_t)(S3fsCurl::curl_times[curl]), (intmax_t)readwrite_timeout);
|
2013-07-05 02:28:31 +00:00
|
|
|
return CURLE_ABORTED_BY_CALLBACK;
|
|
|
|
}
|
2013-05-22 08:49:23 +00:00
|
|
|
}
|
2013-06-15 15:29:08 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
pthread_mutex_unlock(&S3fsCurl::curl_handles_lock);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool S3fsCurl::InitMimeType(const char* MimeFile)
|
|
|
|
{
|
|
|
|
if(!MimeFile){
|
|
|
|
MimeFile = "/etc/mime.types"; // default
|
2013-05-22 08:49:23 +00:00
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
|
|
|
|
string line;
|
|
|
|
ifstream MT(MimeFile);
|
|
|
|
if(MT.good()){
|
|
|
|
while(getline(MT, line)){
|
|
|
|
if(line[0]=='#'){
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if(line.size() == 0){
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
stringstream tmp(line);
|
|
|
|
string mimeType;
|
|
|
|
tmp >> mimeType;
|
|
|
|
while(tmp){
|
|
|
|
string ext;
|
|
|
|
tmp >> ext;
|
|
|
|
if(ext.size() == 0){
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
S3fsCurl::mimeTypes[ext] = mimeType;
|
|
|
|
}
|
|
|
|
}
|
2013-05-22 08:49:23 +00:00
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-09-17 09:16:05 +00:00
|
|
|
void S3fsCurl::InitUserAgent(void)
|
|
|
|
{
|
|
|
|
if(S3fsCurl::userAgent.empty()){
|
|
|
|
S3fsCurl::userAgent = "s3fs/";
|
|
|
|
S3fsCurl::userAgent += VERSION;
|
|
|
|
S3fsCurl::userAgent += " (commit hash ";
|
|
|
|
S3fsCurl::userAgent += COMMIT_HASH_VAL;
|
|
|
|
S3fsCurl::userAgent += "; ";
|
|
|
|
S3fsCurl::userAgent += s3fs_crypt_lib_name();
|
|
|
|
S3fsCurl::userAgent += ")";
|
2018-02-28 07:51:35 +00:00
|
|
|
S3fsCurl::userAgent += instance_name;
|
2017-09-17 09:16:05 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
//
|
|
|
|
// @param s e.g., "index.html"
|
|
|
|
// @return e.g., "text/html"
|
|
|
|
//
|
2015-08-17 04:42:45 +00:00
|
|
|
string S3fsCurl::LookupMimeType(const string& name)
|
2013-07-05 02:28:31 +00:00
|
|
|
{
|
|
|
|
string result("application/octet-stream");
|
|
|
|
string::size_type last_pos = name.find_last_of('.');
|
|
|
|
string::size_type first_pos = name.find_first_of('.');
|
|
|
|
string prefix, ext, ext2;
|
|
|
|
|
|
|
|
// No dots in name, just return
|
|
|
|
if(last_pos == string::npos){
|
|
|
|
return result;
|
2013-05-22 08:49:23 +00:00
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
// extract the last extension
|
|
|
|
if(last_pos != string::npos){
|
|
|
|
ext = name.substr(1+last_pos, string::npos);
|
2013-06-15 15:29:08 +00:00
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
if (last_pos != string::npos) {
|
|
|
|
// one dot was found, now look for another
|
|
|
|
if (first_pos != string::npos && first_pos < last_pos) {
|
|
|
|
prefix = name.substr(0, last_pos);
|
|
|
|
// Now get the second to last file extension
|
|
|
|
string::size_type next_pos = prefix.find_last_of('.');
|
|
|
|
if (next_pos != string::npos) {
|
|
|
|
ext2 = prefix.substr(1+next_pos, string::npos);
|
|
|
|
}
|
|
|
|
}
|
2013-05-22 08:49:23 +00:00
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
|
|
|
|
// if we get here, then we have an extension (ext)
|
|
|
|
mimes_t::const_iterator iter = S3fsCurl::mimeTypes.find(ext);
|
|
|
|
// if the last extension matches a mimeType, then return
|
|
|
|
// that mime type
|
|
|
|
if (iter != S3fsCurl::mimeTypes.end()) {
|
|
|
|
result = (*iter).second;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
// return with the default result if there isn't a second extension
|
|
|
|
if(first_pos == last_pos){
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Didn't find a mime-type for the first extension
|
|
|
|
// Look for second extension in mimeTypes, return if found
|
|
|
|
iter = S3fsCurl::mimeTypes.find(ext2);
|
|
|
|
if (iter != S3fsCurl::mimeTypes.end()) {
|
|
|
|
result = (*iter).second;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
// neither the last extension nor the second-to-last extension
|
|
|
|
// matched a mimeType, return the default mime type
|
|
|
|
return result;
|
2013-05-22 08:49:23 +00:00
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
bool S3fsCurl::LocateBundle(void)
|
2013-05-22 08:49:23 +00:00
|
|
|
{
|
2013-07-05 02:28:31 +00:00
|
|
|
// See if environment variable CURL_CA_BUNDLE is set
|
|
|
|
// if so, check it, if it is a good path, then set the
|
|
|
|
// curl_ca_bundle variable to it
|
|
|
|
if(0 == S3fsCurl::curl_ca_bundle.size()){
|
2018-02-28 12:06:06 +00:00
|
|
|
char* CURL_CA_BUNDLE = getenv("CURL_CA_BUNDLE");
|
2013-07-05 02:28:31 +00:00
|
|
|
if(CURL_CA_BUNDLE != NULL) {
|
2014-10-01 10:42:39 +00:00
|
|
|
// check for existence and readability of the file
|
2013-07-05 02:28:31 +00:00
|
|
|
ifstream BF(CURL_CA_BUNDLE);
|
|
|
|
if(!BF.good()){
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("%s: file specified by CURL_CA_BUNDLE environment variable is not readable", program_name.c_str());
|
2013-07-05 02:28:31 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
BF.close();
|
|
|
|
S3fsCurl::curl_ca_bundle.assign(CURL_CA_BUNDLE);
|
|
|
|
return true;
|
|
|
|
}
|
2017-11-05 11:26:05 +00:00
|
|
|
}else{
|
|
|
|
// Already set ca bundle variable
|
|
|
|
return true;
|
2013-05-22 08:49:23 +00:00
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
|
|
|
|
// not set via environment variable, look in likely locations
|
|
|
|
|
|
|
|
///////////////////////////////////////////
|
2017-11-05 11:26:05 +00:00
|
|
|
// following comment from curl's (7.21.2) acinclude.m4 file
|
2013-07-05 02:28:31 +00:00
|
|
|
///////////////////////////////////////////
|
|
|
|
// dnl CURL_CHECK_CA_BUNDLE
|
|
|
|
// dnl -------------------------------------------------
|
|
|
|
// dnl Check if a default ca-bundle should be used
|
|
|
|
// dnl
|
|
|
|
// dnl regarding the paths this will scan:
|
|
|
|
// dnl /etc/ssl/certs/ca-certificates.crt Debian systems
|
|
|
|
// dnl /etc/pki/tls/certs/ca-bundle.crt Redhat and Mandriva
|
|
|
|
// dnl /usr/share/ssl/certs/ca-bundle.crt old(er) Redhat
|
|
|
|
// dnl /usr/local/share/certs/ca-root.crt FreeBSD
|
|
|
|
// dnl /etc/ssl/cert.pem OpenBSD
|
|
|
|
// dnl /etc/ssl/certs/ (ca path) SUSE
|
2017-11-05 11:26:05 +00:00
|
|
|
///////////////////////////////////////////
|
|
|
|
// Within CURL the above path should have been checked
|
|
|
|
// according to the OS. Thus, although we do not need
|
|
|
|
// to check files here, we will only examine some files.
|
|
|
|
//
|
2013-07-05 02:28:31 +00:00
|
|
|
ifstream BF("/etc/pki/tls/certs/ca-bundle.crt");
|
|
|
|
if(BF.good()){
|
2017-11-05 11:26:05 +00:00
|
|
|
BF.close();
|
|
|
|
S3fsCurl::curl_ca_bundle.assign("/etc/pki/tls/certs/ca-bundle.crt");
|
2013-09-14 21:50:39 +00:00
|
|
|
}else{
|
2017-11-05 11:26:05 +00:00
|
|
|
BF.open("/etc/ssl/certs/ca-certificates.crt");
|
|
|
|
if(BF.good()){
|
|
|
|
BF.close();
|
|
|
|
S3fsCurl::curl_ca_bundle.assign("/etc/ssl/certs/ca-certificates.crt");
|
|
|
|
}else{
|
|
|
|
BF.open("/usr/share/ssl/certs/ca-bundle.crt");
|
|
|
|
if(BF.good()){
|
|
|
|
BF.close();
|
|
|
|
S3fsCurl::curl_ca_bundle.assign("/usr/share/ssl/certs/ca-bundle.crt");
|
|
|
|
}else{
|
|
|
|
BF.open("/usr/local/share/certs/ca-root.crt");
|
|
|
|
if(BF.good()){
|
|
|
|
BF.close();
|
|
|
|
S3fsCurl::curl_ca_bundle.assign("/usr/share/ssl/certs/ca-bundle.crt");
|
|
|
|
}else{
|
|
|
|
S3FS_PRN_ERR("%s: /.../ca-bundle.crt is not readable", program_name.c_str());
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-06-15 15:29:08 +00:00
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
Changes codes for performance(part 3)
* Summay
This revision includes big change about temporary file and local cache file.
By this big change, s3fs works with good performance when s3fs opens/
closes/syncs/reads object.
I made a big change about the handling about temporary file and local cache
file to do this implementation.
* Detail
1) About temporary file(local file)
s3fs uses a temporary file on local file system when s3fs does download/
upload/open/seek object on S3.
After this revision, s3fs calls ftruncate() function when s3fs makes the
temporary file.
In this way s3fs can set a file size of precisely length without downloading.
(Notice - ftruncate function is for XSI-compliant systems, so that possibly
you have a problem on non-XSI-compliant systems.)
By this change, s3fs can download a part of a object by requesting with
"Range" http header. It seems like downloading by each block unit.
The default block(part) size is 50MB, it is caused the result which is default
parallel requests count(5) by default multipart upload size(10MB).
If you need to change this block size, you can change by new option
"fd_page_size". This option can take from 1MB(1024 * 1024) to any bytes.
So that, you have to take care about that fdcache.cpp(and fdcache.h) were
changed a lot.
2) About local cache
Local cache files which are in directory specified by "use_cache" option do
not have always all of object data.
This cause is that s3fs uses ftruncate function and reads(writes) each block
unit of a temporary file.
s3fs manages each block unit's status which are "downloaded area" or "not".
For this status, s3fs makes new temporary file in cache directory which is
specified by "use_cache" option. This status files is in a directory which is
named "<use_cache sirectory>/.<bucket_name>/".
When s3fs opens this status file, s3fs locks this file for exclusive control by
calling flock function. You need to take care about this, the status files can
not be laid on network drive(like NFS).
This revision changes about file open mode, s3fs always opens a local cache
file and each status file with writable mode.
Last, this revision adds new option "del_cache", this option means that s3fs
deletes all local cache file when s3fs starts and exits.
3) Uploading
When s3fs writes data to file descriptor through FUSE request, old s3fs
revision downloads all of the object. But new revision does not download all,
it downloads only small percial area(some block units) including writing data
area.
And when s3fs closes or flushes the file descriptor, s3fs downloads other area
which is not downloaded from server. After that, s3fs uploads all of data.
Already r456 revision has parallel upload function, then this revision with
r456 and r457 are very big change for performance.
4) Downloading
By changing a temporary file and a local cache file, when s3fs downloads a
object, it downloads only the required range(some block units).
And s3fs downloads units by parallel GET request, it is same as a case of
uploading. (Maximum parallel request count and each download size are
specified same parameters for uploading.)
In the new revision, when s3fs opens file, s3fs returns file descriptor soon.
Because s3fs only opens(makes) the file descriptor with no downloading
data. And when s3fs reads a data, s3fs downloads only some block unit
including specified area.
This result is good for performance.
5) Changes option name
The option "parallel_upload" which added at r456 is changed to new option
name as "parallel_count". This reason is this option value is not only used by
uploading object, but a uploading object also uses this option. (For a while,
you can use old option name "parallel_upload" for compatibility.)
git-svn-id: http://s3fs.googlecode.com/svn/trunk@458 df820570-a93a-0410-bd06-b72b767a4274
2013-07-23 16:01:48 +00:00
|
|
|
size_t S3fsCurl::WriteMemoryCallback(void* ptr, size_t blockSize, size_t numBlocks, void* data)
|
2013-07-05 02:28:31 +00:00
|
|
|
{
|
2015-08-17 00:02:05 +00:00
|
|
|
BodyData* body = static_cast<BodyData*>(data);
|
2013-07-05 02:28:31 +00:00
|
|
|
|
|
|
|
if(!body->Append(ptr, blockSize, numBlocks)){
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_CRIT("BodyData.Append() returned false.");
|
2013-07-05 02:28:31 +00:00
|
|
|
S3FS_FUSE_EXIT();
|
|
|
|
return -1;
|
2013-05-22 08:49:23 +00:00
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
return (blockSize * numBlocks);
|
2013-05-22 08:49:23 +00:00
|
|
|
}
|
|
|
|
|
Changes codes for performance(part 3)
* Summay
This revision includes big change about temporary file and local cache file.
By this big change, s3fs works with good performance when s3fs opens/
closes/syncs/reads object.
I made a big change about the handling about temporary file and local cache
file to do this implementation.
* Detail
1) About temporary file(local file)
s3fs uses a temporary file on local file system when s3fs does download/
upload/open/seek object on S3.
After this revision, s3fs calls ftruncate() function when s3fs makes the
temporary file.
In this way s3fs can set a file size of precisely length without downloading.
(Notice - ftruncate function is for XSI-compliant systems, so that possibly
you have a problem on non-XSI-compliant systems.)
By this change, s3fs can download a part of a object by requesting with
"Range" http header. It seems like downloading by each block unit.
The default block(part) size is 50MB, it is caused the result which is default
parallel requests count(5) by default multipart upload size(10MB).
If you need to change this block size, you can change by new option
"fd_page_size". This option can take from 1MB(1024 * 1024) to any bytes.
So that, you have to take care about that fdcache.cpp(and fdcache.h) were
changed a lot.
2) About local cache
Local cache files which are in directory specified by "use_cache" option do
not have always all of object data.
This cause is that s3fs uses ftruncate function and reads(writes) each block
unit of a temporary file.
s3fs manages each block unit's status which are "downloaded area" or "not".
For this status, s3fs makes new temporary file in cache directory which is
specified by "use_cache" option. This status files is in a directory which is
named "<use_cache sirectory>/.<bucket_name>/".
When s3fs opens this status file, s3fs locks this file for exclusive control by
calling flock function. You need to take care about this, the status files can
not be laid on network drive(like NFS).
This revision changes about file open mode, s3fs always opens a local cache
file and each status file with writable mode.
Last, this revision adds new option "del_cache", this option means that s3fs
deletes all local cache file when s3fs starts and exits.
3) Uploading
When s3fs writes data to file descriptor through FUSE request, old s3fs
revision downloads all of the object. But new revision does not download all,
it downloads only small percial area(some block units) including writing data
area.
And when s3fs closes or flushes the file descriptor, s3fs downloads other area
which is not downloaded from server. After that, s3fs uploads all of data.
Already r456 revision has parallel upload function, then this revision with
r456 and r457 are very big change for performance.
4) Downloading
By changing a temporary file and a local cache file, when s3fs downloads a
object, it downloads only the required range(some block units).
And s3fs downloads units by parallel GET request, it is same as a case of
uploading. (Maximum parallel request count and each download size are
specified same parameters for uploading.)
In the new revision, when s3fs opens file, s3fs returns file descriptor soon.
Because s3fs only opens(makes) the file descriptor with no downloading
data. And when s3fs reads a data, s3fs downloads only some block unit
including specified area.
This result is good for performance.
5) Changes option name
The option "parallel_upload" which added at r456 is changed to new option
name as "parallel_count". This reason is this option value is not only used by
uploading object, but a uploading object also uses this option. (For a while,
you can use old option name "parallel_upload" for compatibility.)
git-svn-id: http://s3fs.googlecode.com/svn/trunk@458 df820570-a93a-0410-bd06-b72b767a4274
2013-07-23 16:01:48 +00:00
|
|
|
size_t S3fsCurl::ReadCallback(void* ptr, size_t size, size_t nmemb, void* userp)
|
2013-05-22 08:49:23 +00:00
|
|
|
{
|
2013-07-05 02:28:31 +00:00
|
|
|
S3fsCurl* pCurl = reinterpret_cast<S3fsCurl*>(userp);
|
|
|
|
|
|
|
|
if(1 > (size * nmemb)){
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if(0 >= pCurl->postdata_remaining){
|
|
|
|
return 0;
|
2013-05-22 08:49:23 +00:00
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
int copysize = std::min((int)(size * nmemb), pCurl->postdata_remaining);
|
|
|
|
memcpy(ptr, pCurl->postdata, copysize);
|
|
|
|
|
|
|
|
pCurl->postdata_remaining = (pCurl->postdata_remaining > copysize ? (pCurl->postdata_remaining - copysize) : 0);
|
|
|
|
pCurl->postdata += static_cast<size_t>(copysize);
|
|
|
|
|
|
|
|
return copysize;
|
2013-05-22 08:49:23 +00:00
|
|
|
}
|
|
|
|
|
Changes codes for performance(part 3)
* Summay
This revision includes big change about temporary file and local cache file.
By this big change, s3fs works with good performance when s3fs opens/
closes/syncs/reads object.
I made a big change about the handling about temporary file and local cache
file to do this implementation.
* Detail
1) About temporary file(local file)
s3fs uses a temporary file on local file system when s3fs does download/
upload/open/seek object on S3.
After this revision, s3fs calls ftruncate() function when s3fs makes the
temporary file.
In this way s3fs can set a file size of precisely length without downloading.
(Notice - ftruncate function is for XSI-compliant systems, so that possibly
you have a problem on non-XSI-compliant systems.)
By this change, s3fs can download a part of a object by requesting with
"Range" http header. It seems like downloading by each block unit.
The default block(part) size is 50MB, it is caused the result which is default
parallel requests count(5) by default multipart upload size(10MB).
If you need to change this block size, you can change by new option
"fd_page_size". This option can take from 1MB(1024 * 1024) to any bytes.
So that, you have to take care about that fdcache.cpp(and fdcache.h) were
changed a lot.
2) About local cache
Local cache files which are in directory specified by "use_cache" option do
not have always all of object data.
This cause is that s3fs uses ftruncate function and reads(writes) each block
unit of a temporary file.
s3fs manages each block unit's status which are "downloaded area" or "not".
For this status, s3fs makes new temporary file in cache directory which is
specified by "use_cache" option. This status files is in a directory which is
named "<use_cache sirectory>/.<bucket_name>/".
When s3fs opens this status file, s3fs locks this file for exclusive control by
calling flock function. You need to take care about this, the status files can
not be laid on network drive(like NFS).
This revision changes about file open mode, s3fs always opens a local cache
file and each status file with writable mode.
Last, this revision adds new option "del_cache", this option means that s3fs
deletes all local cache file when s3fs starts and exits.
3) Uploading
When s3fs writes data to file descriptor through FUSE request, old s3fs
revision downloads all of the object. But new revision does not download all,
it downloads only small percial area(some block units) including writing data
area.
And when s3fs closes or flushes the file descriptor, s3fs downloads other area
which is not downloaded from server. After that, s3fs uploads all of data.
Already r456 revision has parallel upload function, then this revision with
r456 and r457 are very big change for performance.
4) Downloading
By changing a temporary file and a local cache file, when s3fs downloads a
object, it downloads only the required range(some block units).
And s3fs downloads units by parallel GET request, it is same as a case of
uploading. (Maximum parallel request count and each download size are
specified same parameters for uploading.)
In the new revision, when s3fs opens file, s3fs returns file descriptor soon.
Because s3fs only opens(makes) the file descriptor with no downloading
data. And when s3fs reads a data, s3fs downloads only some block unit
including specified area.
This result is good for performance.
5) Changes option name
The option "parallel_upload" which added at r456 is changed to new option
name as "parallel_count". This reason is this option value is not only used by
uploading object, but a uploading object also uses this option. (For a while,
you can use old option name "parallel_upload" for compatibility.)
git-svn-id: http://s3fs.googlecode.com/svn/trunk@458 df820570-a93a-0410-bd06-b72b767a4274
2013-07-23 16:01:48 +00:00
|
|
|
size_t S3fsCurl::HeaderCallback(void* data, size_t blockSize, size_t numBlocks, void* userPtr)
|
2013-03-30 13:37:14 +00:00
|
|
|
{
|
2011-09-01 19:24:12 +00:00
|
|
|
headers_t* headers = reinterpret_cast<headers_t*>(userPtr);
|
|
|
|
string header(reinterpret_cast<char*>(data), blockSize * numBlocks);
|
|
|
|
string key;
|
|
|
|
stringstream ss(header);
|
2013-03-30 13:37:14 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
if(getline(ss, key, ':')){
|
Summary of Changes(1.63 -> 1.64)
* This new version was made for fixing big issue about directory object.
Please be careful and review new s3fs.
==========================
List of Changes
==========================
1) Fixed bugs
Fixed some memory leak and un-freed curl handle.
Fixed codes with a bug which is not found yet.
Fixed a bug that the s3fs could not update object's mtime when the s3fs had a opened file descriptor.
Please let us know a bug, when you find new bug of a memory leak.
2) Changed codes
Changed codes of s3fs_readdir() and list_bucket() etc.
Changed codes so that the get_realpath() function returned std::string.
Changed codes about exit() function. Because the exit() function is called from many fuse callback function directly, these function called fuse_exit() function and retuned with error.
Changed codes so that the case of the characters for the "x-amz-meta" response header is ignored.
3) Added a option
Added the norenameapi option for the storage compatible with S3 without copy API.
This option is subset of nocopyapi option.
Please read man page or call with --help option.
4) Object for directory
This is very big and important change.
The object of directory is changed "dir/" instead of "dir" for being compatible with other S3 client applications.
And this version understands the object of directory which is made by old version.
If the new s3fs changes the attributes or owner/group or mtime of the directory object, the s3fs automatically changes the object from old object name("dir") to new("dir/").
If you need to change old object name("dir") to new("dir/") manually, you can use shell script(mergedir.sh) in test directory.
* About the directory object name
AWS S3 allows the object name as both "dir" and "dir/".
The s3fs before this version understood only "dir" as directory object name, but old version did not understand the "dir/" object name.
The new version understands both of "dir" and "dir/" object name.
The s3fs user needs to be care for the special situation that I mentioned later.
The new version deletes old "dir" object and makes new "dir/" object, when the user operates the directory object for changing the permission or owner/group or mtime.
This operation does on background and automatically.
If you need to merge manually, you can use shell script which is mergedir.sh in test directory.
This script runs chmod/chown/touch commands after finding a directory.
Other S3 client application makes a directory object("dir/") without meta information which is needed to understand by the s3fs, this script can add meta information for a directory object.
If this script function is insufficient for you, you can read and modify the codes by yourself.
Please use the shell script carefully because of changing the object.
If you find a bug in this script, please let me know.
* Details
** The directory object made by old version
The directory object made by old version is not understood by other S3 client application.
New s3fs version was updated for keeping compatibility with other clients.
You can use the mergedir.sh in test directory for merging from old directory object("dir") to new("dir/").
The directory object name is changed from "dir" to "dir/" after the mergedir.sh is run, this changed "dir/" object is understood by other S3 clients.
This script runs chmod/chown/chgrp/touch/etc commands against the old directory object("dir"), then new s3fs merges that directory automatically.
If you need to change directory object from old to new manually, you can do it by running these commands which change the directory attributes(mode/owner/group/mtime).
** The directory object made by new version
The directory object name made by new version is "dir/".
Because the name includes "/", other S3 client applications understand it as the directory.
I tested new directory by s3cmd/tntDrive/DragonDisk/Gladinet as other S3 clients, the result was good compatibility.
You need to know that the compatibility has small problem by the difference in specifications between clients.
And you need to be careful about that the old s3fs can not understand the directory object which made by new s3fs.
You should change all s3fs which accesses same bucket.
** The directory object made by other S3 client application
Because the object is determined as a directory by the s3fs, the s3fs makes and uses special meta information which is "x-amz-meta-***" and "Content-Type" as HTTP header.
The s3fs sets and uses HTTP headers for the directory object, those headers are listed below.
Content-Type: application/x-directory
x-amz-meta-mode: <mode>
x-amz-meta-uid: <UID>
x-amz-meta-gid <GID>
x-amz-meta-mtime: <unix time of modified file>
Other S3 client application builds the directory object without attributes which is needed by the s3fs.
When the "ls" command is run on the s3fs-fuse file system which has directories/files made by other S3 clients, this result is shown below.
d--------- 1 root root 0 Feb 27 11:21 dir
---------- 1 root root 1024 Mar 14 02:15 file
Because the objects don't have meta information("x-amz-meta-mode"), it means mode=0000.
In this case, the directory object is shown only "d", because the s3fs determines the object as a directory when the object is the name with "/" or has "Content-type: application/x-directory" header.
(The s3fs sets "Content-Type: application/x-directory" to the directory object, but other S3 clients set "binary/octet-stream".)
In that result, nobody without root is allowed to operate the object.
The owner and group are "root"(UID=0) because the object doesn't have "x-amz-meta-uid/gid".
If the object doesn't have "x-amz-meta-mtime", the s3fs uses "Last-Modified" HTTP header.
Therefore the object's mtime is "Last-Modified" value.(This logic is same as old version)
It has been already explained, if you need to change the object attributes, you can do it by manually operation or mergedir.sh.
* Example of the compatibility with s3cmd etc
** Case A) Only "dir/file" object
One of case, there is only "dir/file" object without "dir/" object, that object is made by s3cmd or etc.
In this case, the response of REST API(list bucket) with "delimiter=/" parameter has "CommonPrefixes", and the "dir/" is listed in "CommonPrefixes/Prefix", but the "dir/" object is not real object.
The s3fs needs to determine this object as directory, however there is no real directory object("dir" or "dir/").
But both new s3fs and old one does NOT understand this "dir/" in "CommonPrefixes", because the s3fs fails to get meta information from "dir" or "dir/".
On this case, the result of "ls" command is shown below.
??????????? ? ? ? ? ? dir
This "dir" is not operated by anyone and any process, because the s3fs does not understand this object permission.
And "dir/file" object can not be shown and operated too.
Some other S3 clients(tntDrive/Gladinet/etc) can not understand this object as same as the s3fs.
If you need to operate "dir/file" object, you need to make the "dir/" object as a directory.
To make the "dir/" directory object, you need to do below.
Because there is already the "dir" object which is not real object, you can not make "dir/" directory.
(s3cmd does not make "dir/" object because the object name has "/".).
You should make another name directory(ex: "dir2/"), and move the "dir/file" objects to in new directory.
Last, you can rename the directory name from "dir2/" to "dir/".
** Case B) Both "dir" and "dir/file" object
This case is that there are "dir" and "dir/file" objects which were made by s3cmd/etc.
s3cmd and s3fs understand the "dir" object as normal(file) object because this object does not have meta information and a name with "/".
But the result of REST API(list bucket) has "dir/" name in "CommonPrefixes/Prefix".
The s3fs checks "dir/" and "dir" as a directory, but the "dir" object is not directory object.
(Because the new s3fs need to compatible old version, the s3fs checks a directory object in order of "dir/", "dir")
In this case, the result of "ls" command is shown below.
---------- 1 root root 0 Feb 27 02:48 dir
As a result, the "dir/file" can not be shown and operated because the "dir" object is a file.
If you determine the "dir" as a directory, you need to add mete information to the "dir" object by s3cmd.
** Case C) Both "dir" and "dir/" object
Last case is that there are "dir" and "dir/" objects which were made by other S3 clients.
(example: At first you upload a object "dir/" as a directory by new 3sfs, and you upload a object "dir" by s3cmd.)
New s3fs determines "dir/" as a directory, because the s3fs searches in oder of "dir/", "dir".
As a result, the "dir" object can not be shown and operated.
** Compatibility between S3 clients
Both new and old s3fs do not understand both "dir" and "dir/" at the same time, tntDrive and Galdinet are same as the s3fs.
If there are "dir/" and "dir" objects, the s3fs gives priority to "dir/".
But s3cmd and DragonDisk understand both objects.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@392 df820570-a93a-0410-bd06-b72b767a4274
2013-03-23 14:04:07 +00:00
|
|
|
// Force to lower, only "x-amz"
|
|
|
|
string lkey = key;
|
|
|
|
transform(lkey.begin(), lkey.end(), lkey.begin(), static_cast<int (*)(int)>(std::tolower));
|
2014-05-13 10:57:16 +00:00
|
|
|
if(lkey.compare(0, 5, "x-amz") == 0){
|
Summary of Changes(1.63 -> 1.64)
* This new version was made for fixing big issue about directory object.
Please be careful and review new s3fs.
==========================
List of Changes
==========================
1) Fixed bugs
Fixed some memory leak and un-freed curl handle.
Fixed codes with a bug which is not found yet.
Fixed a bug that the s3fs could not update object's mtime when the s3fs had a opened file descriptor.
Please let us know a bug, when you find new bug of a memory leak.
2) Changed codes
Changed codes of s3fs_readdir() and list_bucket() etc.
Changed codes so that the get_realpath() function returned std::string.
Changed codes about exit() function. Because the exit() function is called from many fuse callback function directly, these function called fuse_exit() function and retuned with error.
Changed codes so that the case of the characters for the "x-amz-meta" response header is ignored.
3) Added a option
Added the norenameapi option for the storage compatible with S3 without copy API.
This option is subset of nocopyapi option.
Please read man page or call with --help option.
4) Object for directory
This is very big and important change.
The object of directory is changed "dir/" instead of "dir" for being compatible with other S3 client applications.
And this version understands the object of directory which is made by old version.
If the new s3fs changes the attributes or owner/group or mtime of the directory object, the s3fs automatically changes the object from old object name("dir") to new("dir/").
If you need to change old object name("dir") to new("dir/") manually, you can use shell script(mergedir.sh) in test directory.
* About the directory object name
AWS S3 allows the object name as both "dir" and "dir/".
The s3fs before this version understood only "dir" as directory object name, but old version did not understand the "dir/" object name.
The new version understands both of "dir" and "dir/" object name.
The s3fs user needs to be care for the special situation that I mentioned later.
The new version deletes old "dir" object and makes new "dir/" object, when the user operates the directory object for changing the permission or owner/group or mtime.
This operation does on background and automatically.
If you need to merge manually, you can use shell script which is mergedir.sh in test directory.
This script runs chmod/chown/touch commands after finding a directory.
Other S3 client application makes a directory object("dir/") without meta information which is needed to understand by the s3fs, this script can add meta information for a directory object.
If this script function is insufficient for you, you can read and modify the codes by yourself.
Please use the shell script carefully because of changing the object.
If you find a bug in this script, please let me know.
* Details
** The directory object made by old version
The directory object made by old version is not understood by other S3 client application.
New s3fs version was updated for keeping compatibility with other clients.
You can use the mergedir.sh in test directory for merging from old directory object("dir") to new("dir/").
The directory object name is changed from "dir" to "dir/" after the mergedir.sh is run, this changed "dir/" object is understood by other S3 clients.
This script runs chmod/chown/chgrp/touch/etc commands against the old directory object("dir"), then new s3fs merges that directory automatically.
If you need to change directory object from old to new manually, you can do it by running these commands which change the directory attributes(mode/owner/group/mtime).
** The directory object made by new version
The directory object name made by new version is "dir/".
Because the name includes "/", other S3 client applications understand it as the directory.
I tested new directory by s3cmd/tntDrive/DragonDisk/Gladinet as other S3 clients, the result was good compatibility.
You need to know that the compatibility has small problem by the difference in specifications between clients.
And you need to be careful about that the old s3fs can not understand the directory object which made by new s3fs.
You should change all s3fs which accesses same bucket.
** The directory object made by other S3 client application
Because the object is determined as a directory by the s3fs, the s3fs makes and uses special meta information which is "x-amz-meta-***" and "Content-Type" as HTTP header.
The s3fs sets and uses HTTP headers for the directory object, those headers are listed below.
Content-Type: application/x-directory
x-amz-meta-mode: <mode>
x-amz-meta-uid: <UID>
x-amz-meta-gid <GID>
x-amz-meta-mtime: <unix time of modified file>
Other S3 client application builds the directory object without attributes which is needed by the s3fs.
When the "ls" command is run on the s3fs-fuse file system which has directories/files made by other S3 clients, this result is shown below.
d--------- 1 root root 0 Feb 27 11:21 dir
---------- 1 root root 1024 Mar 14 02:15 file
Because the objects don't have meta information("x-amz-meta-mode"), it means mode=0000.
In this case, the directory object is shown only "d", because the s3fs determines the object as a directory when the object is the name with "/" or has "Content-type: application/x-directory" header.
(The s3fs sets "Content-Type: application/x-directory" to the directory object, but other S3 clients set "binary/octet-stream".)
In that result, nobody without root is allowed to operate the object.
The owner and group are "root"(UID=0) because the object doesn't have "x-amz-meta-uid/gid".
If the object doesn't have "x-amz-meta-mtime", the s3fs uses "Last-Modified" HTTP header.
Therefore the object's mtime is "Last-Modified" value.(This logic is same as old version)
It has been already explained, if you need to change the object attributes, you can do it by manually operation or mergedir.sh.
* Example of the compatibility with s3cmd etc
** Case A) Only "dir/file" object
One of case, there is only "dir/file" object without "dir/" object, that object is made by s3cmd or etc.
In this case, the response of REST API(list bucket) with "delimiter=/" parameter has "CommonPrefixes", and the "dir/" is listed in "CommonPrefixes/Prefix", but the "dir/" object is not real object.
The s3fs needs to determine this object as directory, however there is no real directory object("dir" or "dir/").
But both new s3fs and old one does NOT understand this "dir/" in "CommonPrefixes", because the s3fs fails to get meta information from "dir" or "dir/".
On this case, the result of "ls" command is shown below.
??????????? ? ? ? ? ? dir
This "dir" is not operated by anyone and any process, because the s3fs does not understand this object permission.
And "dir/file" object can not be shown and operated too.
Some other S3 clients(tntDrive/Gladinet/etc) can not understand this object as same as the s3fs.
If you need to operate "dir/file" object, you need to make the "dir/" object as a directory.
To make the "dir/" directory object, you need to do below.
Because there is already the "dir" object which is not real object, you can not make "dir/" directory.
(s3cmd does not make "dir/" object because the object name has "/".).
You should make another name directory(ex: "dir2/"), and move the "dir/file" objects to in new directory.
Last, you can rename the directory name from "dir2/" to "dir/".
** Case B) Both "dir" and "dir/file" object
This case is that there are "dir" and "dir/file" objects which were made by s3cmd/etc.
s3cmd and s3fs understand the "dir" object as normal(file) object because this object does not have meta information and a name with "/".
But the result of REST API(list bucket) has "dir/" name in "CommonPrefixes/Prefix".
The s3fs checks "dir/" and "dir" as a directory, but the "dir" object is not directory object.
(Because the new s3fs need to compatible old version, the s3fs checks a directory object in order of "dir/", "dir")
In this case, the result of "ls" command is shown below.
---------- 1 root root 0 Feb 27 02:48 dir
As a result, the "dir/file" can not be shown and operated because the "dir" object is a file.
If you determine the "dir" as a directory, you need to add mete information to the "dir" object by s3cmd.
** Case C) Both "dir" and "dir/" object
Last case is that there are "dir" and "dir/" objects which were made by other S3 clients.
(example: At first you upload a object "dir/" as a directory by new 3sfs, and you upload a object "dir" by s3cmd.)
New s3fs determines "dir/" as a directory, because the s3fs searches in oder of "dir/", "dir".
As a result, the "dir" object can not be shown and operated.
** Compatibility between S3 clients
Both new and old s3fs do not understand both "dir" and "dir/" at the same time, tntDrive and Galdinet are same as the s3fs.
If there are "dir/" and "dir" objects, the s3fs gives priority to "dir/".
But s3cmd and DragonDisk understand both objects.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@392 df820570-a93a-0410-bd06-b72b767a4274
2013-03-23 14:04:07 +00:00
|
|
|
key = lkey;
|
|
|
|
}
|
2011-09-01 19:24:12 +00:00
|
|
|
string value;
|
|
|
|
getline(ss, value);
|
|
|
|
(*headers)[key] = trim(value);
|
|
|
|
}
|
|
|
|
return blockSize * numBlocks;
|
|
|
|
}
|
|
|
|
|
Changes codes for performance(part 3)
* Summay
This revision includes big change about temporary file and local cache file.
By this big change, s3fs works with good performance when s3fs opens/
closes/syncs/reads object.
I made a big change about the handling about temporary file and local cache
file to do this implementation.
* Detail
1) About temporary file(local file)
s3fs uses a temporary file on local file system when s3fs does download/
upload/open/seek object on S3.
After this revision, s3fs calls ftruncate() function when s3fs makes the
temporary file.
In this way s3fs can set a file size of precisely length without downloading.
(Notice - ftruncate function is for XSI-compliant systems, so that possibly
you have a problem on non-XSI-compliant systems.)
By this change, s3fs can download a part of a object by requesting with
"Range" http header. It seems like downloading by each block unit.
The default block(part) size is 50MB, it is caused the result which is default
parallel requests count(5) by default multipart upload size(10MB).
If you need to change this block size, you can change by new option
"fd_page_size". This option can take from 1MB(1024 * 1024) to any bytes.
So that, you have to take care about that fdcache.cpp(and fdcache.h) were
changed a lot.
2) About local cache
Local cache files which are in directory specified by "use_cache" option do
not have always all of object data.
This cause is that s3fs uses ftruncate function and reads(writes) each block
unit of a temporary file.
s3fs manages each block unit's status which are "downloaded area" or "not".
For this status, s3fs makes new temporary file in cache directory which is
specified by "use_cache" option. This status files is in a directory which is
named "<use_cache sirectory>/.<bucket_name>/".
When s3fs opens this status file, s3fs locks this file for exclusive control by
calling flock function. You need to take care about this, the status files can
not be laid on network drive(like NFS).
This revision changes about file open mode, s3fs always opens a local cache
file and each status file with writable mode.
Last, this revision adds new option "del_cache", this option means that s3fs
deletes all local cache file when s3fs starts and exits.
3) Uploading
When s3fs writes data to file descriptor through FUSE request, old s3fs
revision downloads all of the object. But new revision does not download all,
it downloads only small percial area(some block units) including writing data
area.
And when s3fs closes or flushes the file descriptor, s3fs downloads other area
which is not downloaded from server. After that, s3fs uploads all of data.
Already r456 revision has parallel upload function, then this revision with
r456 and r457 are very big change for performance.
4) Downloading
By changing a temporary file and a local cache file, when s3fs downloads a
object, it downloads only the required range(some block units).
And s3fs downloads units by parallel GET request, it is same as a case of
uploading. (Maximum parallel request count and each download size are
specified same parameters for uploading.)
In the new revision, when s3fs opens file, s3fs returns file descriptor soon.
Because s3fs only opens(makes) the file descriptor with no downloading
data. And when s3fs reads a data, s3fs downloads only some block unit
including specified area.
This result is good for performance.
5) Changes option name
The option "parallel_upload" which added at r456 is changed to new option
name as "parallel_count". This reason is this option value is not only used by
uploading object, but a uploading object also uses this option. (For a while,
you can use old option name "parallel_upload" for compatibility.)
git-svn-id: http://s3fs.googlecode.com/svn/trunk@458 df820570-a93a-0410-bd06-b72b767a4274
2013-07-23 16:01:48 +00:00
|
|
|
size_t S3fsCurl::UploadReadCallback(void* ptr, size_t size, size_t nmemb, void* userp)
|
2013-07-12 00:33:36 +00:00
|
|
|
{
|
|
|
|
S3fsCurl* pCurl = reinterpret_cast<S3fsCurl*>(userp);
|
|
|
|
|
|
|
|
if(1 > (size * nmemb)){
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if(-1 == pCurl->partdata.fd || 0 >= pCurl->partdata.size){
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
// read size
|
|
|
|
ssize_t copysize = (size * nmemb) < (size_t)pCurl->partdata.size ? (size * nmemb) : (size_t)pCurl->partdata.size;
|
|
|
|
ssize_t readbytes;
|
|
|
|
ssize_t totalread;
|
|
|
|
// read and set
|
|
|
|
for(totalread = 0, readbytes = 0; totalread < copysize; totalread += readbytes){
|
|
|
|
readbytes = pread(pCurl->partdata.fd, &((char*)ptr)[totalread], (copysize - totalread), pCurl->partdata.startpos + totalread);
|
|
|
|
if(0 == readbytes){
|
|
|
|
// eof
|
|
|
|
break;
|
|
|
|
}else if(-1 == readbytes){
|
|
|
|
// error
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("read file error(%d).", errno);
|
2013-07-12 00:33:36 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
pCurl->partdata.startpos += totalread;
|
|
|
|
pCurl->partdata.size -= totalread;
|
|
|
|
|
|
|
|
return totalread;
|
|
|
|
}
|
|
|
|
|
Changes codes for performance(part 3)
* Summay
This revision includes big change about temporary file and local cache file.
By this big change, s3fs works with good performance when s3fs opens/
closes/syncs/reads object.
I made a big change about the handling about temporary file and local cache
file to do this implementation.
* Detail
1) About temporary file(local file)
s3fs uses a temporary file on local file system when s3fs does download/
upload/open/seek object on S3.
After this revision, s3fs calls ftruncate() function when s3fs makes the
temporary file.
In this way s3fs can set a file size of precisely length without downloading.
(Notice - ftruncate function is for XSI-compliant systems, so that possibly
you have a problem on non-XSI-compliant systems.)
By this change, s3fs can download a part of a object by requesting with
"Range" http header. It seems like downloading by each block unit.
The default block(part) size is 50MB, it is caused the result which is default
parallel requests count(5) by default multipart upload size(10MB).
If you need to change this block size, you can change by new option
"fd_page_size". This option can take from 1MB(1024 * 1024) to any bytes.
So that, you have to take care about that fdcache.cpp(and fdcache.h) were
changed a lot.
2) About local cache
Local cache files which are in directory specified by "use_cache" option do
not have always all of object data.
This cause is that s3fs uses ftruncate function and reads(writes) each block
unit of a temporary file.
s3fs manages each block unit's status which are "downloaded area" or "not".
For this status, s3fs makes new temporary file in cache directory which is
specified by "use_cache" option. This status files is in a directory which is
named "<use_cache sirectory>/.<bucket_name>/".
When s3fs opens this status file, s3fs locks this file for exclusive control by
calling flock function. You need to take care about this, the status files can
not be laid on network drive(like NFS).
This revision changes about file open mode, s3fs always opens a local cache
file and each status file with writable mode.
Last, this revision adds new option "del_cache", this option means that s3fs
deletes all local cache file when s3fs starts and exits.
3) Uploading
When s3fs writes data to file descriptor through FUSE request, old s3fs
revision downloads all of the object. But new revision does not download all,
it downloads only small percial area(some block units) including writing data
area.
And when s3fs closes or flushes the file descriptor, s3fs downloads other area
which is not downloaded from server. After that, s3fs uploads all of data.
Already r456 revision has parallel upload function, then this revision with
r456 and r457 are very big change for performance.
4) Downloading
By changing a temporary file and a local cache file, when s3fs downloads a
object, it downloads only the required range(some block units).
And s3fs downloads units by parallel GET request, it is same as a case of
uploading. (Maximum parallel request count and each download size are
specified same parameters for uploading.)
In the new revision, when s3fs opens file, s3fs returns file descriptor soon.
Because s3fs only opens(makes) the file descriptor with no downloading
data. And when s3fs reads a data, s3fs downloads only some block unit
including specified area.
This result is good for performance.
5) Changes option name
The option "parallel_upload" which added at r456 is changed to new option
name as "parallel_count". This reason is this option value is not only used by
uploading object, but a uploading object also uses this option. (For a while,
you can use old option name "parallel_upload" for compatibility.)
git-svn-id: http://s3fs.googlecode.com/svn/trunk@458 df820570-a93a-0410-bd06-b72b767a4274
2013-07-23 16:01:48 +00:00
|
|
|
size_t S3fsCurl::DownloadWriteCallback(void* ptr, size_t size, size_t nmemb, void* userp)
|
|
|
|
{
|
|
|
|
S3fsCurl* pCurl = reinterpret_cast<S3fsCurl*>(userp);
|
|
|
|
|
|
|
|
if(1 > (size * nmemb)){
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if(-1 == pCurl->partdata.fd || 0 >= pCurl->partdata.size){
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// write size
|
|
|
|
ssize_t copysize = (size * nmemb) < (size_t)pCurl->partdata.size ? (size * nmemb) : (size_t)pCurl->partdata.size;
|
|
|
|
ssize_t writebytes;
|
|
|
|
ssize_t totalwrite;
|
|
|
|
|
|
|
|
// write
|
|
|
|
for(totalwrite = 0, writebytes = 0; totalwrite < copysize; totalwrite += writebytes){
|
|
|
|
writebytes = pwrite(pCurl->partdata.fd, &((char*)ptr)[totalwrite], (copysize - totalwrite), pCurl->partdata.startpos + totalwrite);
|
|
|
|
if(0 == writebytes){
|
|
|
|
// eof?
|
|
|
|
break;
|
|
|
|
}else if(-1 == writebytes){
|
|
|
|
// error
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("write file error(%d).", errno);
|
Changes codes for performance(part 3)
* Summay
This revision includes big change about temporary file and local cache file.
By this big change, s3fs works with good performance when s3fs opens/
closes/syncs/reads object.
I made a big change about the handling about temporary file and local cache
file to do this implementation.
* Detail
1) About temporary file(local file)
s3fs uses a temporary file on local file system when s3fs does download/
upload/open/seek object on S3.
After this revision, s3fs calls ftruncate() function when s3fs makes the
temporary file.
In this way s3fs can set a file size of precisely length without downloading.
(Notice - ftruncate function is for XSI-compliant systems, so that possibly
you have a problem on non-XSI-compliant systems.)
By this change, s3fs can download a part of a object by requesting with
"Range" http header. It seems like downloading by each block unit.
The default block(part) size is 50MB, it is caused the result which is default
parallel requests count(5) by default multipart upload size(10MB).
If you need to change this block size, you can change by new option
"fd_page_size". This option can take from 1MB(1024 * 1024) to any bytes.
So that, you have to take care about that fdcache.cpp(and fdcache.h) were
changed a lot.
2) About local cache
Local cache files which are in directory specified by "use_cache" option do
not have always all of object data.
This cause is that s3fs uses ftruncate function and reads(writes) each block
unit of a temporary file.
s3fs manages each block unit's status which are "downloaded area" or "not".
For this status, s3fs makes new temporary file in cache directory which is
specified by "use_cache" option. This status files is in a directory which is
named "<use_cache sirectory>/.<bucket_name>/".
When s3fs opens this status file, s3fs locks this file for exclusive control by
calling flock function. You need to take care about this, the status files can
not be laid on network drive(like NFS).
This revision changes about file open mode, s3fs always opens a local cache
file and each status file with writable mode.
Last, this revision adds new option "del_cache", this option means that s3fs
deletes all local cache file when s3fs starts and exits.
3) Uploading
When s3fs writes data to file descriptor through FUSE request, old s3fs
revision downloads all of the object. But new revision does not download all,
it downloads only small percial area(some block units) including writing data
area.
And when s3fs closes or flushes the file descriptor, s3fs downloads other area
which is not downloaded from server. After that, s3fs uploads all of data.
Already r456 revision has parallel upload function, then this revision with
r456 and r457 are very big change for performance.
4) Downloading
By changing a temporary file and a local cache file, when s3fs downloads a
object, it downloads only the required range(some block units).
And s3fs downloads units by parallel GET request, it is same as a case of
uploading. (Maximum parallel request count and each download size are
specified same parameters for uploading.)
In the new revision, when s3fs opens file, s3fs returns file descriptor soon.
Because s3fs only opens(makes) the file descriptor with no downloading
data. And when s3fs reads a data, s3fs downloads only some block unit
including specified area.
This result is good for performance.
5) Changes option name
The option "parallel_upload" which added at r456 is changed to new option
name as "parallel_count". This reason is this option value is not only used by
uploading object, but a uploading object also uses this option. (For a while,
you can use old option name "parallel_upload" for compatibility.)
git-svn-id: http://s3fs.googlecode.com/svn/trunk@458 df820570-a93a-0410-bd06-b72b767a4274
2013-07-23 16:01:48 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
pCurl->partdata.startpos += totalwrite;
|
|
|
|
pCurl->partdata.size -= totalwrite;
|
|
|
|
|
|
|
|
return totalwrite;
|
|
|
|
}
|
|
|
|
|
2015-05-20 15:32:36 +00:00
|
|
|
bool S3fsCurl::SetCheckCertificate(bool isCertCheck) {
|
|
|
|
bool old = S3fsCurl::is_cert_check;
|
|
|
|
S3fsCurl::is_cert_check = isCertCheck;
|
|
|
|
return old;
|
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
bool S3fsCurl::SetDnsCache(bool isCache)
|
2013-03-30 13:37:14 +00:00
|
|
|
{
|
2013-07-05 02:28:31 +00:00
|
|
|
bool old = S3fsCurl::is_dns_cache;
|
|
|
|
S3fsCurl::is_dns_cache = isCache;
|
|
|
|
return old;
|
|
|
|
}
|
2011-03-01 19:35:55 +00:00
|
|
|
|
2013-09-14 21:50:39 +00:00
|
|
|
bool S3fsCurl::SetSslSessionCache(bool isCache)
|
|
|
|
{
|
|
|
|
bool old = S3fsCurl::is_ssl_session_cache;
|
|
|
|
S3fsCurl::is_ssl_session_cache = isCache;
|
|
|
|
return old;
|
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
long S3fsCurl::SetConnectTimeout(long timeout)
|
|
|
|
{
|
|
|
|
long old = S3fsCurl::connect_timeout;
|
|
|
|
S3fsCurl::connect_timeout = timeout;
|
|
|
|
return old;
|
|
|
|
}
|
2011-03-01 19:35:55 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
time_t S3fsCurl::SetReadwriteTimeout(time_t timeout)
|
|
|
|
{
|
|
|
|
time_t old = S3fsCurl::readwrite_timeout;
|
|
|
|
S3fsCurl::readwrite_timeout = timeout;
|
|
|
|
return old;
|
2011-03-01 19:35:55 +00:00
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
int S3fsCurl::SetRetries(int count)
|
2013-03-30 13:37:14 +00:00
|
|
|
{
|
2013-07-05 02:28:31 +00:00
|
|
|
int old = S3fsCurl::retries;
|
|
|
|
S3fsCurl::retries = count;
|
|
|
|
return old;
|
|
|
|
}
|
2011-03-01 19:35:55 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
bool S3fsCurl::SetPublicBucket(bool flag)
|
|
|
|
{
|
|
|
|
bool old = S3fsCurl::is_public_bucket;
|
|
|
|
S3fsCurl::is_public_bucket = flag;
|
|
|
|
return old;
|
2011-03-01 19:35:55 +00:00
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
string S3fsCurl::SetDefaultAcl(const char* acl)
|
2013-03-30 13:37:14 +00:00
|
|
|
{
|
2013-07-05 02:28:31 +00:00
|
|
|
string old = S3fsCurl::default_acl;
|
|
|
|
S3fsCurl::default_acl = acl ? acl : "";
|
|
|
|
return old;
|
|
|
|
}
|
2011-08-31 22:20:20 +00:00
|
|
|
|
2017-11-23 08:46:24 +00:00
|
|
|
string S3fsCurl::GetDefaultAcl()
|
|
|
|
{
|
|
|
|
return S3fsCurl::default_acl;
|
|
|
|
}
|
|
|
|
|
2015-09-17 20:10:45 +00:00
|
|
|
storage_class_t S3fsCurl::SetStorageClass(storage_class_t storage_class)
|
2013-07-05 02:28:31 +00:00
|
|
|
{
|
2015-09-17 20:10:45 +00:00
|
|
|
storage_class_t old = S3fsCurl::storage_class;
|
|
|
|
S3fsCurl::storage_class = storage_class;
|
2013-07-05 02:28:31 +00:00
|
|
|
return old;
|
2011-08-31 22:20:20 +00:00
|
|
|
}
|
|
|
|
|
2014-08-26 17:11:10 +00:00
|
|
|
bool S3fsCurl::PushbackSseKeys(string& onekey)
|
|
|
|
{
|
|
|
|
onekey = trim(onekey);
|
|
|
|
if(0 == onekey.size()){
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if('#' == onekey[0]){
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
// make base64
|
|
|
|
char* pbase64_key;
|
|
|
|
if(NULL == (pbase64_key = s3fs_base64((unsigned char*)onekey.c_str(), onekey.length()))){
|
2015-10-06 14:46:14 +00:00
|
|
|
S3FS_PRN_ERR("Failed to convert base64 from SSE-C key %s", onekey.c_str());
|
2014-08-26 17:11:10 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
string base64_key = pbase64_key;
|
|
|
|
free(pbase64_key);
|
|
|
|
|
|
|
|
// make MD5
|
|
|
|
string strMd5;
|
|
|
|
if(!make_md5_from_string(onekey.c_str(), strMd5)){
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("Could not make MD5 from SSE-C keys(%s).", onekey.c_str());
|
2014-08-26 17:11:10 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
// mapped MD5 = SSE Key
|
|
|
|
sseckeymap_t md5map;
|
|
|
|
md5map.clear();
|
|
|
|
md5map[strMd5] = base64_key;
|
|
|
|
S3fsCurl::sseckeys.push_back(md5map);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-10-06 14:46:14 +00:00
|
|
|
sse_type_t S3fsCurl::SetSseType(sse_type_t type)
|
|
|
|
{
|
|
|
|
sse_type_t old = S3fsCurl::ssetype;
|
|
|
|
S3fsCurl::ssetype = type;
|
|
|
|
return old;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool S3fsCurl::SetSseCKeys(const char* filepath)
|
2014-07-19 19:02:55 +00:00
|
|
|
{
|
|
|
|
if(!filepath){
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("SSE-C keys filepath is empty.");
|
2014-07-19 19:02:55 +00:00
|
|
|
return false;
|
|
|
|
}
|
2015-10-06 14:46:14 +00:00
|
|
|
struct stat st;
|
|
|
|
if(0 != stat(filepath, &st)){
|
|
|
|
S3FS_PRN_ERR("could not open use_sse keys file(%s).", filepath);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if(st.st_mode & (S_IXUSR | S_IRWXG | S_IRWXO)){
|
|
|
|
S3FS_PRN_ERR("use_sse keys file %s should be 0600 permissions.", filepath);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2014-07-19 19:02:55 +00:00
|
|
|
S3fsCurl::sseckeys.clear();
|
|
|
|
|
|
|
|
ifstream ssefs(filepath);
|
|
|
|
if(!ssefs.good()){
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("Could not open SSE-C keys file(%s).", filepath);
|
2014-07-19 19:02:55 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
string line;
|
|
|
|
while(getline(ssefs, line)){
|
2014-08-26 17:11:10 +00:00
|
|
|
S3fsCurl::PushbackSseKeys(line);
|
2014-07-19 19:02:55 +00:00
|
|
|
}
|
|
|
|
if(0 == S3fsCurl::sseckeys.size()){
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("There is no SSE Key in file(%s).", filepath);
|
2014-07-19 19:02:55 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
2015-10-06 14:46:14 +00:00
|
|
|
|
|
|
|
bool S3fsCurl::SetSseKmsid(const char* kmsid)
|
|
|
|
{
|
|
|
|
if(!kmsid || '\0' == kmsid[0]){
|
|
|
|
S3FS_PRN_ERR("SSE-KMS kms id is empty.");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
S3fsCurl::ssekmsid = kmsid;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// [NOTE]
|
|
|
|
// Because SSE is set by some options and environment,
|
|
|
|
// this function check the integrity of the SSE data finally.
|
|
|
|
bool S3fsCurl::FinalCheckSse(void)
|
|
|
|
{
|
|
|
|
if(SSE_DISABLE == S3fsCurl::ssetype){
|
|
|
|
S3fsCurl::ssekmsid.erase();
|
|
|
|
}else if(SSE_S3 == S3fsCurl::ssetype){
|
|
|
|
S3fsCurl::ssekmsid.erase();
|
|
|
|
}else if(SSE_C == S3fsCurl::ssetype){
|
|
|
|
if(0 == S3fsCurl::sseckeys.size()){
|
|
|
|
S3FS_PRN_ERR("sse type is SSE-C, but there is no custom key.");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
S3fsCurl::ssekmsid.erase();
|
|
|
|
}else if(SSE_KMS == S3fsCurl::ssetype){
|
|
|
|
if(S3fsCurl::ssekmsid.empty()){
|
|
|
|
S3FS_PRN_ERR("sse type is SSE-KMS, but there is no specified kms id.");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if(!S3fsCurl::IsSignatureV4()){
|
|
|
|
S3FS_PRN_ERR("sse type is SSE-KMS, but signature type is not v4. SSE-KMS require signature v4.");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}else{
|
|
|
|
S3FS_PRN_ERR("sse type is unknown(%d).", S3fsCurl::ssetype);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
2014-08-26 17:11:10 +00:00
|
|
|
|
2015-10-06 14:46:14 +00:00
|
|
|
bool S3fsCurl::LoadEnvSseCKeys(void)
|
2014-08-26 17:11:10 +00:00
|
|
|
{
|
|
|
|
char* envkeys = getenv("AWSSSECKEYS");
|
|
|
|
if(NULL == envkeys){
|
2015-10-06 14:46:14 +00:00
|
|
|
// nothing to do
|
|
|
|
return true;
|
2014-08-26 17:11:10 +00:00
|
|
|
}
|
|
|
|
S3fsCurl::sseckeys.clear();
|
|
|
|
|
|
|
|
istringstream fullkeys(envkeys);
|
|
|
|
string onekey;
|
|
|
|
while(getline(fullkeys, onekey, ':')){
|
|
|
|
S3fsCurl::PushbackSseKeys(onekey);
|
|
|
|
}
|
|
|
|
if(0 == S3fsCurl::sseckeys.size()){
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("There is no SSE Key in environment(AWSSSECKEYS=%s).", envkeys);
|
2014-08-26 17:11:10 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
2014-07-19 19:02:55 +00:00
|
|
|
|
2015-10-06 14:46:14 +00:00
|
|
|
bool S3fsCurl::LoadEnvSseKmsid(void)
|
|
|
|
{
|
|
|
|
char* envkmsid = getenv("AWSSSEKMSID");
|
|
|
|
if(NULL == envkmsid){
|
|
|
|
// nothing to do
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return S3fsCurl::SetSseKmsid(envkmsid);
|
|
|
|
}
|
|
|
|
|
2014-07-19 19:02:55 +00:00
|
|
|
//
|
|
|
|
// If md5 is empty, returns first(current) sse key.
|
|
|
|
//
|
|
|
|
bool S3fsCurl::GetSseKey(string& md5, string& ssekey)
|
|
|
|
{
|
2015-08-12 15:04:16 +00:00
|
|
|
for(sseckeylist_t::const_iterator iter = S3fsCurl::sseckeys.begin(); iter != S3fsCurl::sseckeys.end(); ++iter){
|
2014-07-19 19:02:55 +00:00
|
|
|
if(0 == md5.length() || md5 == (*iter).begin()->first){
|
|
|
|
md5 = iter->begin()->first;
|
|
|
|
ssekey = iter->begin()->second;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool S3fsCurl::GetSseKeyMd5(int pos, string& md5)
|
|
|
|
{
|
|
|
|
if(pos < 0){
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if(S3fsCurl::sseckeys.size() <= static_cast<size_t>(pos)){
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
int cnt = 0;
|
2015-08-12 15:04:16 +00:00
|
|
|
for(sseckeylist_t::const_iterator iter = S3fsCurl::sseckeys.begin(); iter != S3fsCurl::sseckeys.end(); ++iter, ++cnt){
|
2014-07-19 19:02:55 +00:00
|
|
|
if(pos == cnt){
|
|
|
|
md5 = iter->begin()->first;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
int S3fsCurl::GetSseKeyCount(void)
|
|
|
|
{
|
|
|
|
return S3fsCurl::sseckeys.size();
|
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
bool S3fsCurl::SetContentMd5(bool flag)
|
|
|
|
{
|
|
|
|
bool old = S3fsCurl::is_content_md5;
|
|
|
|
S3fsCurl::is_content_md5 = flag;
|
|
|
|
return old;
|
|
|
|
}
|
|
|
|
|
2013-08-23 16:28:50 +00:00
|
|
|
bool S3fsCurl::SetVerbose(bool flag)
|
|
|
|
{
|
|
|
|
bool old = S3fsCurl::is_verbose;
|
|
|
|
S3fsCurl::is_verbose = flag;
|
|
|
|
return old;
|
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
bool S3fsCurl::SetAccessKey(const char* AccessKeyId, const char* SecretAccessKey)
|
|
|
|
{
|
2017-11-23 08:46:24 +00:00
|
|
|
if((!S3fsCurl::is_ibm_iam_auth && (!AccessKeyId || '\0' == AccessKeyId[0])) || !SecretAccessKey || '\0' == SecretAccessKey[0]){
|
2013-07-05 02:28:31 +00:00
|
|
|
return false;
|
2013-03-30 13:37:14 +00:00
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
AWSAccessKeyId = AccessKeyId;
|
|
|
|
AWSSecretAccessKey = SecretAccessKey;
|
|
|
|
return true;
|
|
|
|
}
|
2011-09-01 19:24:12 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
long S3fsCurl::SetSslVerifyHostname(long value)
|
|
|
|
{
|
|
|
|
if(0 != value && 1 != value){
|
|
|
|
return -1;
|
2011-09-01 19:24:12 +00:00
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
long old = S3fsCurl::ssl_verify_hostname;
|
|
|
|
S3fsCurl::ssl_verify_hostname = value;
|
|
|
|
return old;
|
|
|
|
}
|
2011-09-01 19:24:12 +00:00
|
|
|
|
2017-11-23 08:46:24 +00:00
|
|
|
bool S3fsCurl::SetIsIBMIAMAuth(bool flag)
|
|
|
|
{
|
|
|
|
bool old = S3fsCurl::is_ibm_iam_auth;
|
|
|
|
S3fsCurl::is_ibm_iam_auth = flag;
|
|
|
|
return old;
|
|
|
|
}
|
|
|
|
|
2017-11-05 19:24:02 +00:00
|
|
|
bool S3fsCurl::SetIsECS(bool flag)
|
|
|
|
{
|
|
|
|
bool old = S3fsCurl::is_ecs;
|
|
|
|
S3fsCurl::is_ecs = flag;
|
|
|
|
return old;
|
|
|
|
}
|
|
|
|
|
2013-10-06 13:45:32 +00:00
|
|
|
string S3fsCurl::SetIAMRole(const char* role)
|
|
|
|
{
|
|
|
|
string old = S3fsCurl::IAM_role;
|
|
|
|
S3fsCurl::IAM_role = role ? role : "";
|
|
|
|
return old;
|
|
|
|
}
|
|
|
|
|
2017-11-23 08:46:24 +00:00
|
|
|
size_t S3fsCurl::SetIAMFieldCount(size_t field_count)
|
|
|
|
{
|
|
|
|
size_t old = S3fsCurl::IAM_field_count;
|
|
|
|
S3fsCurl::IAM_field_count = field_count;
|
|
|
|
return old;
|
|
|
|
}
|
|
|
|
|
|
|
|
string S3fsCurl::SetIAMCredentialsURL(const char* url)
|
|
|
|
{
|
|
|
|
string old = S3fsCurl::IAM_cred_url;
|
|
|
|
S3fsCurl::IAM_cred_url = url ? url : "";
|
|
|
|
return old;
|
|
|
|
}
|
|
|
|
|
|
|
|
string S3fsCurl::SetIAMTokenField(const char* token_field)
|
|
|
|
{
|
|
|
|
string old = S3fsCurl::IAM_token_field;
|
|
|
|
S3fsCurl::IAM_token_field = token_field ? token_field : "";
|
|
|
|
return old;
|
|
|
|
}
|
|
|
|
|
|
|
|
string S3fsCurl::SetIAMExpiryField(const char* expiry_field)
|
|
|
|
{
|
|
|
|
string old = S3fsCurl::IAM_expiry_field;
|
|
|
|
S3fsCurl::IAM_expiry_field = expiry_field ? expiry_field : "";
|
|
|
|
return old;
|
|
|
|
}
|
|
|
|
|
2014-03-30 07:53:41 +00:00
|
|
|
bool S3fsCurl::SetMultipartSize(off_t size)
|
|
|
|
{
|
|
|
|
size = size * 1024 * 1024;
|
2015-10-20 15:19:04 +00:00
|
|
|
if(size < MIN_MULTIPART_SIZE){
|
2014-03-30 07:53:41 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
S3fsCurl::multipart_size = size;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
Changes codes for performance(part 3)
* Summay
This revision includes big change about temporary file and local cache file.
By this big change, s3fs works with good performance when s3fs opens/
closes/syncs/reads object.
I made a big change about the handling about temporary file and local cache
file to do this implementation.
* Detail
1) About temporary file(local file)
s3fs uses a temporary file on local file system when s3fs does download/
upload/open/seek object on S3.
After this revision, s3fs calls ftruncate() function when s3fs makes the
temporary file.
In this way s3fs can set a file size of precisely length without downloading.
(Notice - ftruncate function is for XSI-compliant systems, so that possibly
you have a problem on non-XSI-compliant systems.)
By this change, s3fs can download a part of a object by requesting with
"Range" http header. It seems like downloading by each block unit.
The default block(part) size is 50MB, it is caused the result which is default
parallel requests count(5) by default multipart upload size(10MB).
If you need to change this block size, you can change by new option
"fd_page_size". This option can take from 1MB(1024 * 1024) to any bytes.
So that, you have to take care about that fdcache.cpp(and fdcache.h) were
changed a lot.
2) About local cache
Local cache files which are in directory specified by "use_cache" option do
not have always all of object data.
This cause is that s3fs uses ftruncate function and reads(writes) each block
unit of a temporary file.
s3fs manages each block unit's status which are "downloaded area" or "not".
For this status, s3fs makes new temporary file in cache directory which is
specified by "use_cache" option. This status files is in a directory which is
named "<use_cache sirectory>/.<bucket_name>/".
When s3fs opens this status file, s3fs locks this file for exclusive control by
calling flock function. You need to take care about this, the status files can
not be laid on network drive(like NFS).
This revision changes about file open mode, s3fs always opens a local cache
file and each status file with writable mode.
Last, this revision adds new option "del_cache", this option means that s3fs
deletes all local cache file when s3fs starts and exits.
3) Uploading
When s3fs writes data to file descriptor through FUSE request, old s3fs
revision downloads all of the object. But new revision does not download all,
it downloads only small percial area(some block units) including writing data
area.
And when s3fs closes or flushes the file descriptor, s3fs downloads other area
which is not downloaded from server. After that, s3fs uploads all of data.
Already r456 revision has parallel upload function, then this revision with
r456 and r457 are very big change for performance.
4) Downloading
By changing a temporary file and a local cache file, when s3fs downloads a
object, it downloads only the required range(some block units).
And s3fs downloads units by parallel GET request, it is same as a case of
uploading. (Maximum parallel request count and each download size are
specified same parameters for uploading.)
In the new revision, when s3fs opens file, s3fs returns file descriptor soon.
Because s3fs only opens(makes) the file descriptor with no downloading
data. And when s3fs reads a data, s3fs downloads only some block unit
including specified area.
This result is good for performance.
5) Changes option name
The option "parallel_upload" which added at r456 is changed to new option
name as "parallel_count". This reason is this option value is not only used by
uploading object, but a uploading object also uses this option. (For a while,
you can use old option name "parallel_upload" for compatibility.)
git-svn-id: http://s3fs.googlecode.com/svn/trunk@458 df820570-a93a-0410-bd06-b72b767a4274
2013-07-23 16:01:48 +00:00
|
|
|
int S3fsCurl::SetMaxParallelCount(int value)
|
2013-07-10 06:24:06 +00:00
|
|
|
{
|
Changes codes for performance(part 3)
* Summay
This revision includes big change about temporary file and local cache file.
By this big change, s3fs works with good performance when s3fs opens/
closes/syncs/reads object.
I made a big change about the handling about temporary file and local cache
file to do this implementation.
* Detail
1) About temporary file(local file)
s3fs uses a temporary file on local file system when s3fs does download/
upload/open/seek object on S3.
After this revision, s3fs calls ftruncate() function when s3fs makes the
temporary file.
In this way s3fs can set a file size of precisely length without downloading.
(Notice - ftruncate function is for XSI-compliant systems, so that possibly
you have a problem on non-XSI-compliant systems.)
By this change, s3fs can download a part of a object by requesting with
"Range" http header. It seems like downloading by each block unit.
The default block(part) size is 50MB, it is caused the result which is default
parallel requests count(5) by default multipart upload size(10MB).
If you need to change this block size, you can change by new option
"fd_page_size". This option can take from 1MB(1024 * 1024) to any bytes.
So that, you have to take care about that fdcache.cpp(and fdcache.h) were
changed a lot.
2) About local cache
Local cache files which are in directory specified by "use_cache" option do
not have always all of object data.
This cause is that s3fs uses ftruncate function and reads(writes) each block
unit of a temporary file.
s3fs manages each block unit's status which are "downloaded area" or "not".
For this status, s3fs makes new temporary file in cache directory which is
specified by "use_cache" option. This status files is in a directory which is
named "<use_cache sirectory>/.<bucket_name>/".
When s3fs opens this status file, s3fs locks this file for exclusive control by
calling flock function. You need to take care about this, the status files can
not be laid on network drive(like NFS).
This revision changes about file open mode, s3fs always opens a local cache
file and each status file with writable mode.
Last, this revision adds new option "del_cache", this option means that s3fs
deletes all local cache file when s3fs starts and exits.
3) Uploading
When s3fs writes data to file descriptor through FUSE request, old s3fs
revision downloads all of the object. But new revision does not download all,
it downloads only small percial area(some block units) including writing data
area.
And when s3fs closes or flushes the file descriptor, s3fs downloads other area
which is not downloaded from server. After that, s3fs uploads all of data.
Already r456 revision has parallel upload function, then this revision with
r456 and r457 are very big change for performance.
4) Downloading
By changing a temporary file and a local cache file, when s3fs downloads a
object, it downloads only the required range(some block units).
And s3fs downloads units by parallel GET request, it is same as a case of
uploading. (Maximum parallel request count and each download size are
specified same parameters for uploading.)
In the new revision, when s3fs opens file, s3fs returns file descriptor soon.
Because s3fs only opens(makes) the file descriptor with no downloading
data. And when s3fs reads a data, s3fs downloads only some block unit
including specified area.
This result is good for performance.
5) Changes option name
The option "parallel_upload" which added at r456 is changed to new option
name as "parallel_count". This reason is this option value is not only used by
uploading object, but a uploading object also uses this option. (For a while,
you can use old option name "parallel_upload" for compatibility.)
git-svn-id: http://s3fs.googlecode.com/svn/trunk@458 df820570-a93a-0410-bd06-b72b767a4274
2013-07-23 16:01:48 +00:00
|
|
|
int old = S3fsCurl::max_parallel_cnt;
|
|
|
|
S3fsCurl::max_parallel_cnt = value;
|
2013-07-10 06:24:06 +00:00
|
|
|
return old;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool S3fsCurl::UploadMultipartPostCallback(S3fsCurl* s3fscurl)
|
|
|
|
{
|
|
|
|
if(!s3fscurl){
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-04-02 07:27:43 +00:00
|
|
|
return s3fscurl->UploadMultipartPostComplete();
|
2013-07-10 06:24:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
S3fsCurl* S3fsCurl::UploadMultipartPostRetryCallback(S3fsCurl* s3fscurl)
|
|
|
|
{
|
|
|
|
if(!s3fscurl){
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
// parse and get part_num, upload_id.
|
|
|
|
string upload_id;
|
|
|
|
string part_num_str;
|
|
|
|
int part_num;
|
|
|
|
if(!get_keyword_value(s3fscurl->url, "uploadId", upload_id)){
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
if(!get_keyword_value(s3fscurl->url, "partNumber", part_num_str)){
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
part_num = atoi(part_num_str.c_str());
|
|
|
|
|
2013-11-11 13:45:35 +00:00
|
|
|
if(s3fscurl->retry_count >= S3fsCurl::retries){
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("Over retry count(%d) limit(%s:%d).", s3fscurl->retry_count, s3fscurl->path.c_str(), part_num);
|
2013-11-11 13:45:35 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2013-07-10 06:24:06 +00:00
|
|
|
// duplicate request
|
2013-10-09 01:44:56 +00:00
|
|
|
S3fsCurl* newcurl = new S3fsCurl(s3fscurl->IsUseAhbe());
|
|
|
|
newcurl->partdata.etaglist = s3fscurl->partdata.etaglist;
|
|
|
|
newcurl->partdata.etagpos = s3fscurl->partdata.etagpos;
|
|
|
|
newcurl->partdata.fd = s3fscurl->partdata.fd;
|
|
|
|
newcurl->partdata.startpos = s3fscurl->b_partdata_startpos;
|
|
|
|
newcurl->partdata.size = s3fscurl->b_partdata_size;
|
|
|
|
newcurl->b_partdata_startpos = s3fscurl->b_partdata_startpos;
|
|
|
|
newcurl->b_partdata_size = s3fscurl->b_partdata_size;
|
2013-11-11 13:45:35 +00:00
|
|
|
newcurl->retry_count = s3fscurl->retry_count + 1;
|
2013-07-10 06:24:06 +00:00
|
|
|
|
|
|
|
// setup new curl object
|
2013-10-08 08:19:10 +00:00
|
|
|
if(0 != newcurl->UploadMultipartPostSetup(s3fscurl->path.c_str(), part_num, upload_id)){
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("Could not duplicate curl object(%s:%d).", s3fscurl->path.c_str(), part_num);
|
2013-07-10 06:24:06 +00:00
|
|
|
delete newcurl;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
return newcurl;
|
|
|
|
}
|
|
|
|
|
2014-08-26 17:11:10 +00:00
|
|
|
int S3fsCurl::ParallelMultipartUploadRequest(const char* tpath, headers_t& meta, int fd)
|
2013-07-10 06:24:06 +00:00
|
|
|
{
|
|
|
|
int result;
|
|
|
|
string upload_id;
|
|
|
|
struct stat st;
|
|
|
|
int fd2;
|
|
|
|
etaglist_t list;
|
|
|
|
off_t remaining_bytes;
|
Fixed Issue 229 and Changes codes
1) Set metadata "Content-Encoding" automatically(Issue 292)
For this issue, s3fs is added new option "ahbe_conf".
New option means the configuration file path, and this file specifies
additional HTTP header by file(object) extension.
Thus you can specify any HTTP header for each object by extension.
* ahbe_conf file format:
-----------
line = [file suffix] HTTP-header [HTTP-header-values]
file suffix = file(object) suffix, if this field is empty,
it means "*"(all object).
HTTP-header = additional HTTP header name
HTTP-header-values = additional HTTP header value
-----------
* Example:
-----------
.gz Content-Encoding gzip
.Z Content-Encoding compress
X-S3FS-MYHTTPHEAD myvalue
-----------
A sample configuration file is uploaded in "test" directory.
If ahbe_conf parameter is specified, s3fs loads it's configuration
and compares extension(suffix) of object(file) when uploading
(PUT/POST) it. If the extension is same, s3fs adds/sends specified
HTTP header and value.
A case of sample configuration file, if a object(it's extension is
".gz") which already has Content-Encoding HTTP header is renamed
to ".txt" extension, s3fs does not set Content-Encoding. Because
".txt" is not match any line in configuration file.
So, s3fs matches the extension by each PUT/POST action.
* Please take care about "Content-Encoding".
This new option allows setting ANY HTTP header by object extension.
For example, you can specify "Content-Encoding" for ".gz"/etc
extension in configuration. But this means that S3 always returns
"Content-Encoding: gzip" when a client requests with other
"Accept-Encoding:" header. It SHOULD NOT be good.
Please see RFC 2616.
2) Changes about allow_other/uid/gid option for mount point
I reviewed about mount point permission and allow_other/uid/gid
options, and found bugs about these.
s3fs is fixed bugs and changed to the following specifications.
* s3fs only allows uid(gid) options as 0(root), when the effective
user is zero(root).
* A mount point(directory) must have a permission to allow
accessing by effective user/group.
* If allow_other option is specified, the mount point permission
is set 0777(all users allow all access).
In another case, the mount point is set 0700(only allows
effective user).
* When uid/gid option is specified, the mount point owner/group
is set uid/gid option value.
If uid/gid is not set, it is set effective user/group id.
This changes maybe fixes some issue(321, 338).
3) Changes a logic about (Issue 229)
The chmod command returns -EIO when changing the mount point.
It is correct, s3fs can not changed owner/group/mtime for the
mount point, but s3fs sends a request for changing the bucket.
This revision does not send the request, and returns EIO as
soon as possible.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@465 df820570-a93a-0410-bd06-b72b767a4274
2013-08-16 19:24:01 +00:00
|
|
|
S3fsCurl s3fscurl(true);
|
2013-07-10 06:24:06 +00:00
|
|
|
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_INFO3("[tpath=%s][fd=%d]", SAFESTRPTR(tpath), fd);
|
2013-07-10 06:24:06 +00:00
|
|
|
|
|
|
|
// duplicate fd
|
2013-09-14 21:50:39 +00:00
|
|
|
if(-1 == (fd2 = dup(fd)) || 0 != lseek(fd2, 0, SEEK_SET)){
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("Could not duplicate file descriptor(errno=%d)", errno);
|
2013-07-10 06:24:06 +00:00
|
|
|
if(-1 != fd2){
|
|
|
|
close(fd2);
|
|
|
|
}
|
|
|
|
return -errno;
|
|
|
|
}
|
|
|
|
if(-1 == fstat(fd2, &st)){
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("Invalid file descriptor(errno=%d)", errno);
|
2013-09-14 21:50:39 +00:00
|
|
|
close(fd2);
|
2013-07-10 06:24:06 +00:00
|
|
|
return -errno;
|
|
|
|
}
|
|
|
|
|
2014-08-26 17:11:10 +00:00
|
|
|
if(0 != (result = s3fscurl.PreMultipartPostRequest(tpath, meta, upload_id, false))){
|
2013-09-14 21:50:39 +00:00
|
|
|
close(fd2);
|
2013-07-10 06:24:06 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
s3fscurl.DestroyCurlHandle();
|
|
|
|
|
|
|
|
// cycle through open fd, pulling off 10MB chunks at a time
|
|
|
|
for(remaining_bytes = st.st_size; 0 < remaining_bytes; ){
|
|
|
|
S3fsMultiCurl curlmulti;
|
|
|
|
int para_cnt;
|
|
|
|
off_t chunk;
|
|
|
|
|
|
|
|
// Initialize S3fsMultiCurl
|
|
|
|
curlmulti.SetSuccessCallback(S3fsCurl::UploadMultipartPostCallback);
|
|
|
|
curlmulti.SetRetryCallback(S3fsCurl::UploadMultipartPostRetryCallback);
|
|
|
|
|
|
|
|
// Loop for setup parallel upload(multipart) request.
|
Changes codes for performance(part 3)
* Summay
This revision includes big change about temporary file and local cache file.
By this big change, s3fs works with good performance when s3fs opens/
closes/syncs/reads object.
I made a big change about the handling about temporary file and local cache
file to do this implementation.
* Detail
1) About temporary file(local file)
s3fs uses a temporary file on local file system when s3fs does download/
upload/open/seek object on S3.
After this revision, s3fs calls ftruncate() function when s3fs makes the
temporary file.
In this way s3fs can set a file size of precisely length without downloading.
(Notice - ftruncate function is for XSI-compliant systems, so that possibly
you have a problem on non-XSI-compliant systems.)
By this change, s3fs can download a part of a object by requesting with
"Range" http header. It seems like downloading by each block unit.
The default block(part) size is 50MB, it is caused the result which is default
parallel requests count(5) by default multipart upload size(10MB).
If you need to change this block size, you can change by new option
"fd_page_size". This option can take from 1MB(1024 * 1024) to any bytes.
So that, you have to take care about that fdcache.cpp(and fdcache.h) were
changed a lot.
2) About local cache
Local cache files which are in directory specified by "use_cache" option do
not have always all of object data.
This cause is that s3fs uses ftruncate function and reads(writes) each block
unit of a temporary file.
s3fs manages each block unit's status which are "downloaded area" or "not".
For this status, s3fs makes new temporary file in cache directory which is
specified by "use_cache" option. This status files is in a directory which is
named "<use_cache sirectory>/.<bucket_name>/".
When s3fs opens this status file, s3fs locks this file for exclusive control by
calling flock function. You need to take care about this, the status files can
not be laid on network drive(like NFS).
This revision changes about file open mode, s3fs always opens a local cache
file and each status file with writable mode.
Last, this revision adds new option "del_cache", this option means that s3fs
deletes all local cache file when s3fs starts and exits.
3) Uploading
When s3fs writes data to file descriptor through FUSE request, old s3fs
revision downloads all of the object. But new revision does not download all,
it downloads only small percial area(some block units) including writing data
area.
And when s3fs closes or flushes the file descriptor, s3fs downloads other area
which is not downloaded from server. After that, s3fs uploads all of data.
Already r456 revision has parallel upload function, then this revision with
r456 and r457 are very big change for performance.
4) Downloading
By changing a temporary file and a local cache file, when s3fs downloads a
object, it downloads only the required range(some block units).
And s3fs downloads units by parallel GET request, it is same as a case of
uploading. (Maximum parallel request count and each download size are
specified same parameters for uploading.)
In the new revision, when s3fs opens file, s3fs returns file descriptor soon.
Because s3fs only opens(makes) the file descriptor with no downloading
data. And when s3fs reads a data, s3fs downloads only some block unit
including specified area.
This result is good for performance.
5) Changes option name
The option "parallel_upload" which added at r456 is changed to new option
name as "parallel_count". This reason is this option value is not only used by
uploading object, but a uploading object also uses this option. (For a while,
you can use old option name "parallel_upload" for compatibility.)
git-svn-id: http://s3fs.googlecode.com/svn/trunk@458 df820570-a93a-0410-bd06-b72b767a4274
2013-07-23 16:01:48 +00:00
|
|
|
for(para_cnt = 0; para_cnt < S3fsCurl::max_parallel_cnt && 0 < remaining_bytes; para_cnt++, remaining_bytes -= chunk){
|
2013-07-10 06:24:06 +00:00
|
|
|
// chunk size
|
2014-03-30 07:53:41 +00:00
|
|
|
chunk = remaining_bytes > S3fsCurl::multipart_size ? S3fsCurl::multipart_size : remaining_bytes;
|
2013-07-10 06:24:06 +00:00
|
|
|
|
|
|
|
// s3fscurl sub object
|
2013-10-09 01:44:56 +00:00
|
|
|
S3fsCurl* s3fscurl_para = new S3fsCurl(true);
|
|
|
|
s3fscurl_para->partdata.fd = fd2;
|
|
|
|
s3fscurl_para->partdata.startpos = st.st_size - remaining_bytes;
|
|
|
|
s3fscurl_para->partdata.size = chunk;
|
|
|
|
s3fscurl_para->b_partdata_startpos = s3fscurl_para->partdata.startpos;
|
|
|
|
s3fscurl_para->b_partdata_size = s3fscurl_para->partdata.size;
|
2013-07-10 06:24:06 +00:00
|
|
|
s3fscurl_para->partdata.add_etag_list(&list);
|
|
|
|
|
|
|
|
// initiate upload part for parallel
|
|
|
|
if(0 != (result = s3fscurl_para->UploadMultipartPostSetup(tpath, list.size(), upload_id))){
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("failed uploading part setup(%d)", result);
|
2013-09-14 21:50:39 +00:00
|
|
|
close(fd2);
|
2013-07-10 06:24:06 +00:00
|
|
|
delete s3fscurl_para;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
// set into parallel object
|
|
|
|
if(!curlmulti.SetS3fsCurlObject(s3fscurl_para)){
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("Could not make curl object into multi curl(%s).", tpath);
|
2013-09-14 21:50:39 +00:00
|
|
|
close(fd2);
|
2013-07-10 06:24:06 +00:00
|
|
|
delete s3fscurl_para;
|
Changes codes for performance(part 3)
* Summay
This revision includes big change about temporary file and local cache file.
By this big change, s3fs works with good performance when s3fs opens/
closes/syncs/reads object.
I made a big change about the handling about temporary file and local cache
file to do this implementation.
* Detail
1) About temporary file(local file)
s3fs uses a temporary file on local file system when s3fs does download/
upload/open/seek object on S3.
After this revision, s3fs calls ftruncate() function when s3fs makes the
temporary file.
In this way s3fs can set a file size of precisely length without downloading.
(Notice - ftruncate function is for XSI-compliant systems, so that possibly
you have a problem on non-XSI-compliant systems.)
By this change, s3fs can download a part of a object by requesting with
"Range" http header. It seems like downloading by each block unit.
The default block(part) size is 50MB, it is caused the result which is default
parallel requests count(5) by default multipart upload size(10MB).
If you need to change this block size, you can change by new option
"fd_page_size". This option can take from 1MB(1024 * 1024) to any bytes.
So that, you have to take care about that fdcache.cpp(and fdcache.h) were
changed a lot.
2) About local cache
Local cache files which are in directory specified by "use_cache" option do
not have always all of object data.
This cause is that s3fs uses ftruncate function and reads(writes) each block
unit of a temporary file.
s3fs manages each block unit's status which are "downloaded area" or "not".
For this status, s3fs makes new temporary file in cache directory which is
specified by "use_cache" option. This status files is in a directory which is
named "<use_cache sirectory>/.<bucket_name>/".
When s3fs opens this status file, s3fs locks this file for exclusive control by
calling flock function. You need to take care about this, the status files can
not be laid on network drive(like NFS).
This revision changes about file open mode, s3fs always opens a local cache
file and each status file with writable mode.
Last, this revision adds new option "del_cache", this option means that s3fs
deletes all local cache file when s3fs starts and exits.
3) Uploading
When s3fs writes data to file descriptor through FUSE request, old s3fs
revision downloads all of the object. But new revision does not download all,
it downloads only small percial area(some block units) including writing data
area.
And when s3fs closes or flushes the file descriptor, s3fs downloads other area
which is not downloaded from server. After that, s3fs uploads all of data.
Already r456 revision has parallel upload function, then this revision with
r456 and r457 are very big change for performance.
4) Downloading
By changing a temporary file and a local cache file, when s3fs downloads a
object, it downloads only the required range(some block units).
And s3fs downloads units by parallel GET request, it is same as a case of
uploading. (Maximum parallel request count and each download size are
specified same parameters for uploading.)
In the new revision, when s3fs opens file, s3fs returns file descriptor soon.
Because s3fs only opens(makes) the file descriptor with no downloading
data. And when s3fs reads a data, s3fs downloads only some block unit
including specified area.
This result is good for performance.
5) Changes option name
The option "parallel_upload" which added at r456 is changed to new option
name as "parallel_count". This reason is this option value is not only used by
uploading object, but a uploading object also uses this option. (For a while,
you can use old option name "parallel_upload" for compatibility.)
git-svn-id: http://s3fs.googlecode.com/svn/trunk@458 df820570-a93a-0410-bd06-b72b767a4274
2013-07-23 16:01:48 +00:00
|
|
|
return -1;
|
2013-07-10 06:24:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Multi request
|
|
|
|
if(0 != (result = curlmulti.Request())){
|
2014-04-05 05:11:55 +00:00
|
|
|
S3FS_PRN_ERR("error occurred in multi request(errno=%d).", result);
|
2013-07-10 06:24:06 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// reinit for loop.
|
|
|
|
curlmulti.Clear();
|
|
|
|
}
|
2013-09-14 21:50:39 +00:00
|
|
|
close(fd2);
|
2013-07-10 06:24:06 +00:00
|
|
|
|
|
|
|
if(0 != (result = s3fscurl.CompleteMultipartPostRequest(tpath, upload_id, list))){
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
Changes codes for performance(part 3)
* Summay
This revision includes big change about temporary file and local cache file.
By this big change, s3fs works with good performance when s3fs opens/
closes/syncs/reads object.
I made a big change about the handling about temporary file and local cache
file to do this implementation.
* Detail
1) About temporary file(local file)
s3fs uses a temporary file on local file system when s3fs does download/
upload/open/seek object on S3.
After this revision, s3fs calls ftruncate() function when s3fs makes the
temporary file.
In this way s3fs can set a file size of precisely length without downloading.
(Notice - ftruncate function is for XSI-compliant systems, so that possibly
you have a problem on non-XSI-compliant systems.)
By this change, s3fs can download a part of a object by requesting with
"Range" http header. It seems like downloading by each block unit.
The default block(part) size is 50MB, it is caused the result which is default
parallel requests count(5) by default multipart upload size(10MB).
If you need to change this block size, you can change by new option
"fd_page_size". This option can take from 1MB(1024 * 1024) to any bytes.
So that, you have to take care about that fdcache.cpp(and fdcache.h) were
changed a lot.
2) About local cache
Local cache files which are in directory specified by "use_cache" option do
not have always all of object data.
This cause is that s3fs uses ftruncate function and reads(writes) each block
unit of a temporary file.
s3fs manages each block unit's status which are "downloaded area" or "not".
For this status, s3fs makes new temporary file in cache directory which is
specified by "use_cache" option. This status files is in a directory which is
named "<use_cache sirectory>/.<bucket_name>/".
When s3fs opens this status file, s3fs locks this file for exclusive control by
calling flock function. You need to take care about this, the status files can
not be laid on network drive(like NFS).
This revision changes about file open mode, s3fs always opens a local cache
file and each status file with writable mode.
Last, this revision adds new option "del_cache", this option means that s3fs
deletes all local cache file when s3fs starts and exits.
3) Uploading
When s3fs writes data to file descriptor through FUSE request, old s3fs
revision downloads all of the object. But new revision does not download all,
it downloads only small percial area(some block units) including writing data
area.
And when s3fs closes or flushes the file descriptor, s3fs downloads other area
which is not downloaded from server. After that, s3fs uploads all of data.
Already r456 revision has parallel upload function, then this revision with
r456 and r457 are very big change for performance.
4) Downloading
By changing a temporary file and a local cache file, when s3fs downloads a
object, it downloads only the required range(some block units).
And s3fs downloads units by parallel GET request, it is same as a case of
uploading. (Maximum parallel request count and each download size are
specified same parameters for uploading.)
In the new revision, when s3fs opens file, s3fs returns file descriptor soon.
Because s3fs only opens(makes) the file descriptor with no downloading
data. And when s3fs reads a data, s3fs downloads only some block unit
including specified area.
This result is good for performance.
5) Changes option name
The option "parallel_upload" which added at r456 is changed to new option
name as "parallel_count". This reason is this option value is not only used by
uploading object, but a uploading object also uses this option. (For a while,
you can use old option name "parallel_upload" for compatibility.)
git-svn-id: http://s3fs.googlecode.com/svn/trunk@458 df820570-a93a-0410-bd06-b72b767a4274
2013-07-23 16:01:48 +00:00
|
|
|
S3fsCurl* S3fsCurl::ParallelGetObjectRetryCallback(S3fsCurl* s3fscurl)
|
|
|
|
{
|
|
|
|
int result;
|
|
|
|
|
|
|
|
if(!s3fscurl){
|
|
|
|
return NULL;
|
|
|
|
}
|
2013-11-11 13:45:35 +00:00
|
|
|
if(s3fscurl->retry_count >= S3fsCurl::retries){
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("Over retry count(%d) limit(%s).", s3fscurl->retry_count, s3fscurl->path.c_str());
|
2013-11-11 13:45:35 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
Changes codes for performance(part 3)
* Summay
This revision includes big change about temporary file and local cache file.
By this big change, s3fs works with good performance when s3fs opens/
closes/syncs/reads object.
I made a big change about the handling about temporary file and local cache
file to do this implementation.
* Detail
1) About temporary file(local file)
s3fs uses a temporary file on local file system when s3fs does download/
upload/open/seek object on S3.
After this revision, s3fs calls ftruncate() function when s3fs makes the
temporary file.
In this way s3fs can set a file size of precisely length without downloading.
(Notice - ftruncate function is for XSI-compliant systems, so that possibly
you have a problem on non-XSI-compliant systems.)
By this change, s3fs can download a part of a object by requesting with
"Range" http header. It seems like downloading by each block unit.
The default block(part) size is 50MB, it is caused the result which is default
parallel requests count(5) by default multipart upload size(10MB).
If you need to change this block size, you can change by new option
"fd_page_size". This option can take from 1MB(1024 * 1024) to any bytes.
So that, you have to take care about that fdcache.cpp(and fdcache.h) were
changed a lot.
2) About local cache
Local cache files which are in directory specified by "use_cache" option do
not have always all of object data.
This cause is that s3fs uses ftruncate function and reads(writes) each block
unit of a temporary file.
s3fs manages each block unit's status which are "downloaded area" or "not".
For this status, s3fs makes new temporary file in cache directory which is
specified by "use_cache" option. This status files is in a directory which is
named "<use_cache sirectory>/.<bucket_name>/".
When s3fs opens this status file, s3fs locks this file for exclusive control by
calling flock function. You need to take care about this, the status files can
not be laid on network drive(like NFS).
This revision changes about file open mode, s3fs always opens a local cache
file and each status file with writable mode.
Last, this revision adds new option "del_cache", this option means that s3fs
deletes all local cache file when s3fs starts and exits.
3) Uploading
When s3fs writes data to file descriptor through FUSE request, old s3fs
revision downloads all of the object. But new revision does not download all,
it downloads only small percial area(some block units) including writing data
area.
And when s3fs closes or flushes the file descriptor, s3fs downloads other area
which is not downloaded from server. After that, s3fs uploads all of data.
Already r456 revision has parallel upload function, then this revision with
r456 and r457 are very big change for performance.
4) Downloading
By changing a temporary file and a local cache file, when s3fs downloads a
object, it downloads only the required range(some block units).
And s3fs downloads units by parallel GET request, it is same as a case of
uploading. (Maximum parallel request count and each download size are
specified same parameters for uploading.)
In the new revision, when s3fs opens file, s3fs returns file descriptor soon.
Because s3fs only opens(makes) the file descriptor with no downloading
data. And when s3fs reads a data, s3fs downloads only some block unit
including specified area.
This result is good for performance.
5) Changes option name
The option "parallel_upload" which added at r456 is changed to new option
name as "parallel_count". This reason is this option value is not only used by
uploading object, but a uploading object also uses this option. (For a while,
you can use old option name "parallel_upload" for compatibility.)
git-svn-id: http://s3fs.googlecode.com/svn/trunk@458 df820570-a93a-0410-bd06-b72b767a4274
2013-07-23 16:01:48 +00:00
|
|
|
// duplicate request(setup new curl object)
|
Fixed Issue 229 and Changes codes
1) Set metadata "Content-Encoding" automatically(Issue 292)
For this issue, s3fs is added new option "ahbe_conf".
New option means the configuration file path, and this file specifies
additional HTTP header by file(object) extension.
Thus you can specify any HTTP header for each object by extension.
* ahbe_conf file format:
-----------
line = [file suffix] HTTP-header [HTTP-header-values]
file suffix = file(object) suffix, if this field is empty,
it means "*"(all object).
HTTP-header = additional HTTP header name
HTTP-header-values = additional HTTP header value
-----------
* Example:
-----------
.gz Content-Encoding gzip
.Z Content-Encoding compress
X-S3FS-MYHTTPHEAD myvalue
-----------
A sample configuration file is uploaded in "test" directory.
If ahbe_conf parameter is specified, s3fs loads it's configuration
and compares extension(suffix) of object(file) when uploading
(PUT/POST) it. If the extension is same, s3fs adds/sends specified
HTTP header and value.
A case of sample configuration file, if a object(it's extension is
".gz") which already has Content-Encoding HTTP header is renamed
to ".txt" extension, s3fs does not set Content-Encoding. Because
".txt" is not match any line in configuration file.
So, s3fs matches the extension by each PUT/POST action.
* Please take care about "Content-Encoding".
This new option allows setting ANY HTTP header by object extension.
For example, you can specify "Content-Encoding" for ".gz"/etc
extension in configuration. But this means that S3 always returns
"Content-Encoding: gzip" when a client requests with other
"Accept-Encoding:" header. It SHOULD NOT be good.
Please see RFC 2616.
2) Changes about allow_other/uid/gid option for mount point
I reviewed about mount point permission and allow_other/uid/gid
options, and found bugs about these.
s3fs is fixed bugs and changed to the following specifications.
* s3fs only allows uid(gid) options as 0(root), when the effective
user is zero(root).
* A mount point(directory) must have a permission to allow
accessing by effective user/group.
* If allow_other option is specified, the mount point permission
is set 0777(all users allow all access).
In another case, the mount point is set 0700(only allows
effective user).
* When uid/gid option is specified, the mount point owner/group
is set uid/gid option value.
If uid/gid is not set, it is set effective user/group id.
This changes maybe fixes some issue(321, 338).
3) Changes a logic about (Issue 229)
The chmod command returns -EIO when changing the mount point.
It is correct, s3fs can not changed owner/group/mtime for the
mount point, but s3fs sends a request for changing the bucket.
This revision does not send the request, and returns EIO as
soon as possible.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@465 df820570-a93a-0410-bd06-b72b767a4274
2013-08-16 19:24:01 +00:00
|
|
|
S3fsCurl* newcurl = new S3fsCurl(s3fscurl->IsUseAhbe());
|
2015-10-06 14:46:14 +00:00
|
|
|
if(0 != (result = newcurl->PreGetObjectRequest(s3fscurl->path.c_str(), s3fscurl->partdata.fd,
|
|
|
|
s3fscurl->partdata.startpos, s3fscurl->partdata.size, s3fscurl->b_ssetype, s3fscurl->b_ssevalue)))
|
|
|
|
{
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("failed downloading part setup(%d)", result);
|
Changes codes for performance(part 3)
* Summay
This revision includes big change about temporary file and local cache file.
By this big change, s3fs works with good performance when s3fs opens/
closes/syncs/reads object.
I made a big change about the handling about temporary file and local cache
file to do this implementation.
* Detail
1) About temporary file(local file)
s3fs uses a temporary file on local file system when s3fs does download/
upload/open/seek object on S3.
After this revision, s3fs calls ftruncate() function when s3fs makes the
temporary file.
In this way s3fs can set a file size of precisely length without downloading.
(Notice - ftruncate function is for XSI-compliant systems, so that possibly
you have a problem on non-XSI-compliant systems.)
By this change, s3fs can download a part of a object by requesting with
"Range" http header. It seems like downloading by each block unit.
The default block(part) size is 50MB, it is caused the result which is default
parallel requests count(5) by default multipart upload size(10MB).
If you need to change this block size, you can change by new option
"fd_page_size". This option can take from 1MB(1024 * 1024) to any bytes.
So that, you have to take care about that fdcache.cpp(and fdcache.h) were
changed a lot.
2) About local cache
Local cache files which are in directory specified by "use_cache" option do
not have always all of object data.
This cause is that s3fs uses ftruncate function and reads(writes) each block
unit of a temporary file.
s3fs manages each block unit's status which are "downloaded area" or "not".
For this status, s3fs makes new temporary file in cache directory which is
specified by "use_cache" option. This status files is in a directory which is
named "<use_cache sirectory>/.<bucket_name>/".
When s3fs opens this status file, s3fs locks this file for exclusive control by
calling flock function. You need to take care about this, the status files can
not be laid on network drive(like NFS).
This revision changes about file open mode, s3fs always opens a local cache
file and each status file with writable mode.
Last, this revision adds new option "del_cache", this option means that s3fs
deletes all local cache file when s3fs starts and exits.
3) Uploading
When s3fs writes data to file descriptor through FUSE request, old s3fs
revision downloads all of the object. But new revision does not download all,
it downloads only small percial area(some block units) including writing data
area.
And when s3fs closes or flushes the file descriptor, s3fs downloads other area
which is not downloaded from server. After that, s3fs uploads all of data.
Already r456 revision has parallel upload function, then this revision with
r456 and r457 are very big change for performance.
4) Downloading
By changing a temporary file and a local cache file, when s3fs downloads a
object, it downloads only the required range(some block units).
And s3fs downloads units by parallel GET request, it is same as a case of
uploading. (Maximum parallel request count and each download size are
specified same parameters for uploading.)
In the new revision, when s3fs opens file, s3fs returns file descriptor soon.
Because s3fs only opens(makes) the file descriptor with no downloading
data. And when s3fs reads a data, s3fs downloads only some block unit
including specified area.
This result is good for performance.
5) Changes option name
The option "parallel_upload" which added at r456 is changed to new option
name as "parallel_count". This reason is this option value is not only used by
uploading object, but a uploading object also uses this option. (For a while,
you can use old option name "parallel_upload" for compatibility.)
git-svn-id: http://s3fs.googlecode.com/svn/trunk@458 df820570-a93a-0410-bd06-b72b767a4274
2013-07-23 16:01:48 +00:00
|
|
|
delete newcurl;
|
|
|
|
return NULL;;
|
|
|
|
}
|
2013-11-11 13:45:35 +00:00
|
|
|
newcurl->retry_count = s3fscurl->retry_count + 1;
|
|
|
|
|
Changes codes for performance(part 3)
* Summay
This revision includes big change about temporary file and local cache file.
By this big change, s3fs works with good performance when s3fs opens/
closes/syncs/reads object.
I made a big change about the handling about temporary file and local cache
file to do this implementation.
* Detail
1) About temporary file(local file)
s3fs uses a temporary file on local file system when s3fs does download/
upload/open/seek object on S3.
After this revision, s3fs calls ftruncate() function when s3fs makes the
temporary file.
In this way s3fs can set a file size of precisely length without downloading.
(Notice - ftruncate function is for XSI-compliant systems, so that possibly
you have a problem on non-XSI-compliant systems.)
By this change, s3fs can download a part of a object by requesting with
"Range" http header. It seems like downloading by each block unit.
The default block(part) size is 50MB, it is caused the result which is default
parallel requests count(5) by default multipart upload size(10MB).
If you need to change this block size, you can change by new option
"fd_page_size". This option can take from 1MB(1024 * 1024) to any bytes.
So that, you have to take care about that fdcache.cpp(and fdcache.h) were
changed a lot.
2) About local cache
Local cache files which are in directory specified by "use_cache" option do
not have always all of object data.
This cause is that s3fs uses ftruncate function and reads(writes) each block
unit of a temporary file.
s3fs manages each block unit's status which are "downloaded area" or "not".
For this status, s3fs makes new temporary file in cache directory which is
specified by "use_cache" option. This status files is in a directory which is
named "<use_cache sirectory>/.<bucket_name>/".
When s3fs opens this status file, s3fs locks this file for exclusive control by
calling flock function. You need to take care about this, the status files can
not be laid on network drive(like NFS).
This revision changes about file open mode, s3fs always opens a local cache
file and each status file with writable mode.
Last, this revision adds new option "del_cache", this option means that s3fs
deletes all local cache file when s3fs starts and exits.
3) Uploading
When s3fs writes data to file descriptor through FUSE request, old s3fs
revision downloads all of the object. But new revision does not download all,
it downloads only small percial area(some block units) including writing data
area.
And when s3fs closes or flushes the file descriptor, s3fs downloads other area
which is not downloaded from server. After that, s3fs uploads all of data.
Already r456 revision has parallel upload function, then this revision with
r456 and r457 are very big change for performance.
4) Downloading
By changing a temporary file and a local cache file, when s3fs downloads a
object, it downloads only the required range(some block units).
And s3fs downloads units by parallel GET request, it is same as a case of
uploading. (Maximum parallel request count and each download size are
specified same parameters for uploading.)
In the new revision, when s3fs opens file, s3fs returns file descriptor soon.
Because s3fs only opens(makes) the file descriptor with no downloading
data. And when s3fs reads a data, s3fs downloads only some block unit
including specified area.
This result is good for performance.
5) Changes option name
The option "parallel_upload" which added at r456 is changed to new option
name as "parallel_count". This reason is this option value is not only used by
uploading object, but a uploading object also uses this option. (For a while,
you can use old option name "parallel_upload" for compatibility.)
git-svn-id: http://s3fs.googlecode.com/svn/trunk@458 df820570-a93a-0410-bd06-b72b767a4274
2013-07-23 16:01:48 +00:00
|
|
|
return newcurl;
|
|
|
|
}
|
|
|
|
|
|
|
|
int S3fsCurl::ParallelGetObjectRequest(const char* tpath, int fd, off_t start, ssize_t size)
|
|
|
|
{
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_INFO3("[tpath=%s][fd=%d]", SAFESTRPTR(tpath), fd);
|
Changes codes for performance(part 3)
* Summay
This revision includes big change about temporary file and local cache file.
By this big change, s3fs works with good performance when s3fs opens/
closes/syncs/reads object.
I made a big change about the handling about temporary file and local cache
file to do this implementation.
* Detail
1) About temporary file(local file)
s3fs uses a temporary file on local file system when s3fs does download/
upload/open/seek object on S3.
After this revision, s3fs calls ftruncate() function when s3fs makes the
temporary file.
In this way s3fs can set a file size of precisely length without downloading.
(Notice - ftruncate function is for XSI-compliant systems, so that possibly
you have a problem on non-XSI-compliant systems.)
By this change, s3fs can download a part of a object by requesting with
"Range" http header. It seems like downloading by each block unit.
The default block(part) size is 50MB, it is caused the result which is default
parallel requests count(5) by default multipart upload size(10MB).
If you need to change this block size, you can change by new option
"fd_page_size". This option can take from 1MB(1024 * 1024) to any bytes.
So that, you have to take care about that fdcache.cpp(and fdcache.h) were
changed a lot.
2) About local cache
Local cache files which are in directory specified by "use_cache" option do
not have always all of object data.
This cause is that s3fs uses ftruncate function and reads(writes) each block
unit of a temporary file.
s3fs manages each block unit's status which are "downloaded area" or "not".
For this status, s3fs makes new temporary file in cache directory which is
specified by "use_cache" option. This status files is in a directory which is
named "<use_cache sirectory>/.<bucket_name>/".
When s3fs opens this status file, s3fs locks this file for exclusive control by
calling flock function. You need to take care about this, the status files can
not be laid on network drive(like NFS).
This revision changes about file open mode, s3fs always opens a local cache
file and each status file with writable mode.
Last, this revision adds new option "del_cache", this option means that s3fs
deletes all local cache file when s3fs starts and exits.
3) Uploading
When s3fs writes data to file descriptor through FUSE request, old s3fs
revision downloads all of the object. But new revision does not download all,
it downloads only small percial area(some block units) including writing data
area.
And when s3fs closes or flushes the file descriptor, s3fs downloads other area
which is not downloaded from server. After that, s3fs uploads all of data.
Already r456 revision has parallel upload function, then this revision with
r456 and r457 are very big change for performance.
4) Downloading
By changing a temporary file and a local cache file, when s3fs downloads a
object, it downloads only the required range(some block units).
And s3fs downloads units by parallel GET request, it is same as a case of
uploading. (Maximum parallel request count and each download size are
specified same parameters for uploading.)
In the new revision, when s3fs opens file, s3fs returns file descriptor soon.
Because s3fs only opens(makes) the file descriptor with no downloading
data. And when s3fs reads a data, s3fs downloads only some block unit
including specified area.
This result is good for performance.
5) Changes option name
The option "parallel_upload" which added at r456 is changed to new option
name as "parallel_count". This reason is this option value is not only used by
uploading object, but a uploading object also uses this option. (For a while,
you can use old option name "parallel_upload" for compatibility.)
git-svn-id: http://s3fs.googlecode.com/svn/trunk@458 df820570-a93a-0410-bd06-b72b767a4274
2013-07-23 16:01:48 +00:00
|
|
|
|
2015-10-06 14:46:14 +00:00
|
|
|
sse_type_t ssetype;
|
|
|
|
string ssevalue;
|
|
|
|
if(!get_object_sse_type(tpath, ssetype, ssevalue)){
|
|
|
|
S3FS_PRN_WARN("Failed to get SSE type for file(%s).", SAFESTRPTR(tpath));
|
2014-07-19 19:02:55 +00:00
|
|
|
}
|
2015-10-06 14:46:14 +00:00
|
|
|
int result = 0;
|
|
|
|
ssize_t remaining_bytes;
|
Changes codes for performance(part 3)
* Summay
This revision includes big change about temporary file and local cache file.
By this big change, s3fs works with good performance when s3fs opens/
closes/syncs/reads object.
I made a big change about the handling about temporary file and local cache
file to do this implementation.
* Detail
1) About temporary file(local file)
s3fs uses a temporary file on local file system when s3fs does download/
upload/open/seek object on S3.
After this revision, s3fs calls ftruncate() function when s3fs makes the
temporary file.
In this way s3fs can set a file size of precisely length without downloading.
(Notice - ftruncate function is for XSI-compliant systems, so that possibly
you have a problem on non-XSI-compliant systems.)
By this change, s3fs can download a part of a object by requesting with
"Range" http header. It seems like downloading by each block unit.
The default block(part) size is 50MB, it is caused the result which is default
parallel requests count(5) by default multipart upload size(10MB).
If you need to change this block size, you can change by new option
"fd_page_size". This option can take from 1MB(1024 * 1024) to any bytes.
So that, you have to take care about that fdcache.cpp(and fdcache.h) were
changed a lot.
2) About local cache
Local cache files which are in directory specified by "use_cache" option do
not have always all of object data.
This cause is that s3fs uses ftruncate function and reads(writes) each block
unit of a temporary file.
s3fs manages each block unit's status which are "downloaded area" or "not".
For this status, s3fs makes new temporary file in cache directory which is
specified by "use_cache" option. This status files is in a directory which is
named "<use_cache sirectory>/.<bucket_name>/".
When s3fs opens this status file, s3fs locks this file for exclusive control by
calling flock function. You need to take care about this, the status files can
not be laid on network drive(like NFS).
This revision changes about file open mode, s3fs always opens a local cache
file and each status file with writable mode.
Last, this revision adds new option "del_cache", this option means that s3fs
deletes all local cache file when s3fs starts and exits.
3) Uploading
When s3fs writes data to file descriptor through FUSE request, old s3fs
revision downloads all of the object. But new revision does not download all,
it downloads only small percial area(some block units) including writing data
area.
And when s3fs closes or flushes the file descriptor, s3fs downloads other area
which is not downloaded from server. After that, s3fs uploads all of data.
Already r456 revision has parallel upload function, then this revision with
r456 and r457 are very big change for performance.
4) Downloading
By changing a temporary file and a local cache file, when s3fs downloads a
object, it downloads only the required range(some block units).
And s3fs downloads units by parallel GET request, it is same as a case of
uploading. (Maximum parallel request count and each download size are
specified same parameters for uploading.)
In the new revision, when s3fs opens file, s3fs returns file descriptor soon.
Because s3fs only opens(makes) the file descriptor with no downloading
data. And when s3fs reads a data, s3fs downloads only some block unit
including specified area.
This result is good for performance.
5) Changes option name
The option "parallel_upload" which added at r456 is changed to new option
name as "parallel_count". This reason is this option value is not only used by
uploading object, but a uploading object also uses this option. (For a while,
you can use old option name "parallel_upload" for compatibility.)
git-svn-id: http://s3fs.googlecode.com/svn/trunk@458 df820570-a93a-0410-bd06-b72b767a4274
2013-07-23 16:01:48 +00:00
|
|
|
|
|
|
|
// cycle through open fd, pulling off 10MB chunks at a time
|
|
|
|
for(remaining_bytes = size; 0 < remaining_bytes; ){
|
|
|
|
S3fsMultiCurl curlmulti;
|
|
|
|
int para_cnt;
|
|
|
|
off_t chunk;
|
|
|
|
|
|
|
|
// Initialize S3fsMultiCurl
|
|
|
|
//curlmulti.SetSuccessCallback(NULL); // not need to set success callback
|
|
|
|
curlmulti.SetRetryCallback(S3fsCurl::ParallelGetObjectRetryCallback);
|
|
|
|
|
|
|
|
// Loop for setup parallel upload(multipart) request.
|
|
|
|
for(para_cnt = 0; para_cnt < S3fsCurl::max_parallel_cnt && 0 < remaining_bytes; para_cnt++, remaining_bytes -= chunk){
|
|
|
|
// chunk size
|
2014-03-30 07:53:41 +00:00
|
|
|
chunk = remaining_bytes > S3fsCurl::multipart_size ? S3fsCurl::multipart_size : remaining_bytes;
|
Changes codes for performance(part 3)
* Summay
This revision includes big change about temporary file and local cache file.
By this big change, s3fs works with good performance when s3fs opens/
closes/syncs/reads object.
I made a big change about the handling about temporary file and local cache
file to do this implementation.
* Detail
1) About temporary file(local file)
s3fs uses a temporary file on local file system when s3fs does download/
upload/open/seek object on S3.
After this revision, s3fs calls ftruncate() function when s3fs makes the
temporary file.
In this way s3fs can set a file size of precisely length without downloading.
(Notice - ftruncate function is for XSI-compliant systems, so that possibly
you have a problem on non-XSI-compliant systems.)
By this change, s3fs can download a part of a object by requesting with
"Range" http header. It seems like downloading by each block unit.
The default block(part) size is 50MB, it is caused the result which is default
parallel requests count(5) by default multipart upload size(10MB).
If you need to change this block size, you can change by new option
"fd_page_size". This option can take from 1MB(1024 * 1024) to any bytes.
So that, you have to take care about that fdcache.cpp(and fdcache.h) were
changed a lot.
2) About local cache
Local cache files which are in directory specified by "use_cache" option do
not have always all of object data.
This cause is that s3fs uses ftruncate function and reads(writes) each block
unit of a temporary file.
s3fs manages each block unit's status which are "downloaded area" or "not".
For this status, s3fs makes new temporary file in cache directory which is
specified by "use_cache" option. This status files is in a directory which is
named "<use_cache sirectory>/.<bucket_name>/".
When s3fs opens this status file, s3fs locks this file for exclusive control by
calling flock function. You need to take care about this, the status files can
not be laid on network drive(like NFS).
This revision changes about file open mode, s3fs always opens a local cache
file and each status file with writable mode.
Last, this revision adds new option "del_cache", this option means that s3fs
deletes all local cache file when s3fs starts and exits.
3) Uploading
When s3fs writes data to file descriptor through FUSE request, old s3fs
revision downloads all of the object. But new revision does not download all,
it downloads only small percial area(some block units) including writing data
area.
And when s3fs closes or flushes the file descriptor, s3fs downloads other area
which is not downloaded from server. After that, s3fs uploads all of data.
Already r456 revision has parallel upload function, then this revision with
r456 and r457 are very big change for performance.
4) Downloading
By changing a temporary file and a local cache file, when s3fs downloads a
object, it downloads only the required range(some block units).
And s3fs downloads units by parallel GET request, it is same as a case of
uploading. (Maximum parallel request count and each download size are
specified same parameters for uploading.)
In the new revision, when s3fs opens file, s3fs returns file descriptor soon.
Because s3fs only opens(makes) the file descriptor with no downloading
data. And when s3fs reads a data, s3fs downloads only some block unit
including specified area.
This result is good for performance.
5) Changes option name
The option "parallel_upload" which added at r456 is changed to new option
name as "parallel_count". This reason is this option value is not only used by
uploading object, but a uploading object also uses this option. (For a while,
you can use old option name "parallel_upload" for compatibility.)
git-svn-id: http://s3fs.googlecode.com/svn/trunk@458 df820570-a93a-0410-bd06-b72b767a4274
2013-07-23 16:01:48 +00:00
|
|
|
|
|
|
|
// s3fscurl sub object
|
|
|
|
S3fsCurl* s3fscurl_para = new S3fsCurl();
|
2015-10-06 14:46:14 +00:00
|
|
|
if(0 != (result = s3fscurl_para->PreGetObjectRequest(tpath, fd, (start + size - remaining_bytes), chunk, ssetype, ssevalue))){
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("failed downloading part setup(%d)", result);
|
Changes codes for performance(part 3)
* Summay
This revision includes big change about temporary file and local cache file.
By this big change, s3fs works with good performance when s3fs opens/
closes/syncs/reads object.
I made a big change about the handling about temporary file and local cache
file to do this implementation.
* Detail
1) About temporary file(local file)
s3fs uses a temporary file on local file system when s3fs does download/
upload/open/seek object on S3.
After this revision, s3fs calls ftruncate() function when s3fs makes the
temporary file.
In this way s3fs can set a file size of precisely length without downloading.
(Notice - ftruncate function is for XSI-compliant systems, so that possibly
you have a problem on non-XSI-compliant systems.)
By this change, s3fs can download a part of a object by requesting with
"Range" http header. It seems like downloading by each block unit.
The default block(part) size is 50MB, it is caused the result which is default
parallel requests count(5) by default multipart upload size(10MB).
If you need to change this block size, you can change by new option
"fd_page_size". This option can take from 1MB(1024 * 1024) to any bytes.
So that, you have to take care about that fdcache.cpp(and fdcache.h) were
changed a lot.
2) About local cache
Local cache files which are in directory specified by "use_cache" option do
not have always all of object data.
This cause is that s3fs uses ftruncate function and reads(writes) each block
unit of a temporary file.
s3fs manages each block unit's status which are "downloaded area" or "not".
For this status, s3fs makes new temporary file in cache directory which is
specified by "use_cache" option. This status files is in a directory which is
named "<use_cache sirectory>/.<bucket_name>/".
When s3fs opens this status file, s3fs locks this file for exclusive control by
calling flock function. You need to take care about this, the status files can
not be laid on network drive(like NFS).
This revision changes about file open mode, s3fs always opens a local cache
file and each status file with writable mode.
Last, this revision adds new option "del_cache", this option means that s3fs
deletes all local cache file when s3fs starts and exits.
3) Uploading
When s3fs writes data to file descriptor through FUSE request, old s3fs
revision downloads all of the object. But new revision does not download all,
it downloads only small percial area(some block units) including writing data
area.
And when s3fs closes or flushes the file descriptor, s3fs downloads other area
which is not downloaded from server. After that, s3fs uploads all of data.
Already r456 revision has parallel upload function, then this revision with
r456 and r457 are very big change for performance.
4) Downloading
By changing a temporary file and a local cache file, when s3fs downloads a
object, it downloads only the required range(some block units).
And s3fs downloads units by parallel GET request, it is same as a case of
uploading. (Maximum parallel request count and each download size are
specified same parameters for uploading.)
In the new revision, when s3fs opens file, s3fs returns file descriptor soon.
Because s3fs only opens(makes) the file descriptor with no downloading
data. And when s3fs reads a data, s3fs downloads only some block unit
including specified area.
This result is good for performance.
5) Changes option name
The option "parallel_upload" which added at r456 is changed to new option
name as "parallel_count". This reason is this option value is not only used by
uploading object, but a uploading object also uses this option. (For a while,
you can use old option name "parallel_upload" for compatibility.)
git-svn-id: http://s3fs.googlecode.com/svn/trunk@458 df820570-a93a-0410-bd06-b72b767a4274
2013-07-23 16:01:48 +00:00
|
|
|
delete s3fscurl_para;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
// set into parallel object
|
|
|
|
if(!curlmulti.SetS3fsCurlObject(s3fscurl_para)){
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("Could not make curl object into multi curl(%s).", tpath);
|
Changes codes for performance(part 3)
* Summay
This revision includes big change about temporary file and local cache file.
By this big change, s3fs works with good performance when s3fs opens/
closes/syncs/reads object.
I made a big change about the handling about temporary file and local cache
file to do this implementation.
* Detail
1) About temporary file(local file)
s3fs uses a temporary file on local file system when s3fs does download/
upload/open/seek object on S3.
After this revision, s3fs calls ftruncate() function when s3fs makes the
temporary file.
In this way s3fs can set a file size of precisely length without downloading.
(Notice - ftruncate function is for XSI-compliant systems, so that possibly
you have a problem on non-XSI-compliant systems.)
By this change, s3fs can download a part of a object by requesting with
"Range" http header. It seems like downloading by each block unit.
The default block(part) size is 50MB, it is caused the result which is default
parallel requests count(5) by default multipart upload size(10MB).
If you need to change this block size, you can change by new option
"fd_page_size". This option can take from 1MB(1024 * 1024) to any bytes.
So that, you have to take care about that fdcache.cpp(and fdcache.h) were
changed a lot.
2) About local cache
Local cache files which are in directory specified by "use_cache" option do
not have always all of object data.
This cause is that s3fs uses ftruncate function and reads(writes) each block
unit of a temporary file.
s3fs manages each block unit's status which are "downloaded area" or "not".
For this status, s3fs makes new temporary file in cache directory which is
specified by "use_cache" option. This status files is in a directory which is
named "<use_cache sirectory>/.<bucket_name>/".
When s3fs opens this status file, s3fs locks this file for exclusive control by
calling flock function. You need to take care about this, the status files can
not be laid on network drive(like NFS).
This revision changes about file open mode, s3fs always opens a local cache
file and each status file with writable mode.
Last, this revision adds new option "del_cache", this option means that s3fs
deletes all local cache file when s3fs starts and exits.
3) Uploading
When s3fs writes data to file descriptor through FUSE request, old s3fs
revision downloads all of the object. But new revision does not download all,
it downloads only small percial area(some block units) including writing data
area.
And when s3fs closes or flushes the file descriptor, s3fs downloads other area
which is not downloaded from server. After that, s3fs uploads all of data.
Already r456 revision has parallel upload function, then this revision with
r456 and r457 are very big change for performance.
4) Downloading
By changing a temporary file and a local cache file, when s3fs downloads a
object, it downloads only the required range(some block units).
And s3fs downloads units by parallel GET request, it is same as a case of
uploading. (Maximum parallel request count and each download size are
specified same parameters for uploading.)
In the new revision, when s3fs opens file, s3fs returns file descriptor soon.
Because s3fs only opens(makes) the file descriptor with no downloading
data. And when s3fs reads a data, s3fs downloads only some block unit
including specified area.
This result is good for performance.
5) Changes option name
The option "parallel_upload" which added at r456 is changed to new option
name as "parallel_count". This reason is this option value is not only used by
uploading object, but a uploading object also uses this option. (For a while,
you can use old option name "parallel_upload" for compatibility.)
git-svn-id: http://s3fs.googlecode.com/svn/trunk@458 df820570-a93a-0410-bd06-b72b767a4274
2013-07-23 16:01:48 +00:00
|
|
|
delete s3fscurl_para;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Multi request
|
|
|
|
if(0 != (result = curlmulti.Request())){
|
2014-04-05 05:11:55 +00:00
|
|
|
S3FS_PRN_ERR("error occurred in multi request(errno=%d).", result);
|
Changes codes for performance(part 3)
* Summay
This revision includes big change about temporary file and local cache file.
By this big change, s3fs works with good performance when s3fs opens/
closes/syncs/reads object.
I made a big change about the handling about temporary file and local cache
file to do this implementation.
* Detail
1) About temporary file(local file)
s3fs uses a temporary file on local file system when s3fs does download/
upload/open/seek object on S3.
After this revision, s3fs calls ftruncate() function when s3fs makes the
temporary file.
In this way s3fs can set a file size of precisely length without downloading.
(Notice - ftruncate function is for XSI-compliant systems, so that possibly
you have a problem on non-XSI-compliant systems.)
By this change, s3fs can download a part of a object by requesting with
"Range" http header. It seems like downloading by each block unit.
The default block(part) size is 50MB, it is caused the result which is default
parallel requests count(5) by default multipart upload size(10MB).
If you need to change this block size, you can change by new option
"fd_page_size". This option can take from 1MB(1024 * 1024) to any bytes.
So that, you have to take care about that fdcache.cpp(and fdcache.h) were
changed a lot.
2) About local cache
Local cache files which are in directory specified by "use_cache" option do
not have always all of object data.
This cause is that s3fs uses ftruncate function and reads(writes) each block
unit of a temporary file.
s3fs manages each block unit's status which are "downloaded area" or "not".
For this status, s3fs makes new temporary file in cache directory which is
specified by "use_cache" option. This status files is in a directory which is
named "<use_cache sirectory>/.<bucket_name>/".
When s3fs opens this status file, s3fs locks this file for exclusive control by
calling flock function. You need to take care about this, the status files can
not be laid on network drive(like NFS).
This revision changes about file open mode, s3fs always opens a local cache
file and each status file with writable mode.
Last, this revision adds new option "del_cache", this option means that s3fs
deletes all local cache file when s3fs starts and exits.
3) Uploading
When s3fs writes data to file descriptor through FUSE request, old s3fs
revision downloads all of the object. But new revision does not download all,
it downloads only small percial area(some block units) including writing data
area.
And when s3fs closes or flushes the file descriptor, s3fs downloads other area
which is not downloaded from server. After that, s3fs uploads all of data.
Already r456 revision has parallel upload function, then this revision with
r456 and r457 are very big change for performance.
4) Downloading
By changing a temporary file and a local cache file, when s3fs downloads a
object, it downloads only the required range(some block units).
And s3fs downloads units by parallel GET request, it is same as a case of
uploading. (Maximum parallel request count and each download size are
specified same parameters for uploading.)
In the new revision, when s3fs opens file, s3fs returns file descriptor soon.
Because s3fs only opens(makes) the file descriptor with no downloading
data. And when s3fs reads a data, s3fs downloads only some block unit
including specified area.
This result is good for performance.
5) Changes option name
The option "parallel_upload" which added at r456 is changed to new option
name as "parallel_count". This reason is this option value is not only used by
uploading object, but a uploading object also uses this option. (For a while,
you can use old option name "parallel_upload" for compatibility.)
git-svn-id: http://s3fs.googlecode.com/svn/trunk@458 df820570-a93a-0410-bd06-b72b767a4274
2013-07-23 16:01:48 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// reinit for loop.
|
|
|
|
curlmulti.Clear();
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2013-10-06 13:45:32 +00:00
|
|
|
bool S3fsCurl::ParseIAMCredentialResponse(const char* response, iamcredmap_t& keyval)
|
|
|
|
{
|
|
|
|
if(!response){
|
|
|
|
return false;
|
|
|
|
}
|
2017-11-22 11:21:57 +00:00
|
|
|
istringstream sscred(response);
|
|
|
|
string oneline;
|
|
|
|
keyval.clear();
|
2017-11-23 08:46:24 +00:00
|
|
|
while(getline(sscred, oneline, ',')){
|
2017-11-22 11:21:57 +00:00
|
|
|
string::size_type pos;
|
|
|
|
string key;
|
|
|
|
string val;
|
|
|
|
if(string::npos != (pos = oneline.find(IAMCRED_ACCESSKEYID))){
|
|
|
|
key = IAMCRED_ACCESSKEYID;
|
|
|
|
}else if(string::npos != (pos = oneline.find(IAMCRED_SECRETACCESSKEY))){
|
|
|
|
key = IAMCRED_SECRETACCESSKEY;
|
2017-11-23 08:46:24 +00:00
|
|
|
}else if(string::npos != (pos = oneline.find(S3fsCurl::IAM_token_field))){
|
|
|
|
key = S3fsCurl::IAM_token_field;
|
|
|
|
}else if(string::npos != (pos = oneline.find(S3fsCurl::IAM_expiry_field))){
|
|
|
|
key = S3fsCurl::IAM_expiry_field;
|
2017-11-22 11:21:57 +00:00
|
|
|
}else if(string::npos != (pos = oneline.find(IAMCRED_ROLEARN))){
|
|
|
|
key = IAMCRED_ROLEARN;
|
|
|
|
}else{
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if(string::npos == (pos = oneline.find(':', pos + key.length()))){
|
|
|
|
continue;
|
|
|
|
}
|
2017-11-23 08:46:24 +00:00
|
|
|
|
|
|
|
if(S3fsCurl::is_ibm_iam_auth && key == S3fsCurl::IAM_expiry_field){
|
|
|
|
// parse integer value
|
|
|
|
if(string::npos == (pos = oneline.find_first_of("0123456789", pos))){
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
oneline = oneline.substr(pos);
|
|
|
|
if(string::npos == (pos = oneline.find_last_of("0123456789"))){
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
val = oneline.substr(0, pos+1);
|
|
|
|
}else{
|
|
|
|
// parse string value (starts and ends with quotes)
|
|
|
|
if(string::npos == (pos = oneline.find('\"', pos))){
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
oneline = oneline.substr(pos + sizeof(char));
|
|
|
|
if(string::npos == (pos = oneline.find('\"'))){
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
val = oneline.substr(0, pos);
|
2017-11-22 11:21:57 +00:00
|
|
|
}
|
|
|
|
keyval[key] = val;
|
2017-11-07 21:20:02 +00:00
|
|
|
}
|
2013-10-06 13:45:32 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool S3fsCurl::SetIAMCredentials(const char* response)
|
|
|
|
{
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_INFO3("IAM credential response = \"%s\"", response);
|
2013-10-06 13:45:32 +00:00
|
|
|
|
|
|
|
iamcredmap_t keyval;
|
|
|
|
|
|
|
|
if(!ParseIAMCredentialResponse(response, keyval)){
|
|
|
|
return false;
|
|
|
|
}
|
2017-11-06 21:45:58 +00:00
|
|
|
|
2017-11-23 08:46:24 +00:00
|
|
|
if(S3fsCurl::IAM_field_count != keyval.size()){
|
2013-10-06 13:45:32 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-11-23 08:46:24 +00:00
|
|
|
S3fsCurl::AWSAccessToken = keyval[string(S3fsCurl::IAM_token_field)];
|
|
|
|
|
|
|
|
if(S3fsCurl::is_ibm_iam_auth){
|
|
|
|
S3fsCurl::AWSAccessTokenExpire = strtol(keyval[string(S3fsCurl::IAM_expiry_field)].c_str(), NULL, 10);
|
|
|
|
}else{
|
|
|
|
S3fsCurl::AWSAccessKeyId = keyval[string(IAMCRED_ACCESSKEYID)];
|
|
|
|
S3fsCurl::AWSSecretAccessKey = keyval[string(IAMCRED_SECRETACCESSKEY)];
|
|
|
|
S3fsCurl::AWSAccessTokenExpire = cvtIAMExpireStringToTime(keyval[S3fsCurl::IAM_expiry_field].c_str());
|
|
|
|
}
|
2013-10-06 13:45:32 +00:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool S3fsCurl::CheckIAMCredentialUpdate(void)
|
|
|
|
{
|
2017-11-23 08:46:24 +00:00
|
|
|
if(0 == S3fsCurl::IAM_role.size() && !S3fsCurl::is_ecs && !S3fsCurl::is_ibm_iam_auth){
|
2013-10-06 13:45:32 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
if(time(NULL) + IAM_EXPIRE_MERGIN <= S3fsCurl::AWSAccessTokenExpire){
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
// update
|
|
|
|
S3fsCurl s3fscurl;
|
|
|
|
if(0 != s3fscurl.GetIAMCredentials()){
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-05-06 04:37:32 +00:00
|
|
|
bool S3fsCurl::ParseIAMRoleFromMetaDataResponse(const char* response, string& rolename)
|
|
|
|
{
|
|
|
|
if(!response){
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
// [NOTE]
|
|
|
|
// expected following strings.
|
|
|
|
//
|
2016-05-24 01:09:16 +00:00
|
|
|
// myrolename
|
2016-05-06 04:37:32 +00:00
|
|
|
//
|
|
|
|
istringstream ssrole(response);
|
|
|
|
string oneline;
|
2016-05-24 01:09:16 +00:00
|
|
|
if (getline(ssrole, oneline, '\n')){
|
|
|
|
rolename = oneline;
|
|
|
|
return !rolename.empty();
|
2016-05-06 04:37:32 +00:00
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool S3fsCurl::SetIAMRoleFromMetaData(const char* response)
|
|
|
|
{
|
|
|
|
S3FS_PRN_INFO3("IAM role name response = \"%s\"", response);
|
|
|
|
|
|
|
|
string rolename;
|
|
|
|
|
|
|
|
if(!S3fsCurl::ParseIAMRoleFromMetaDataResponse(response, rolename)){
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
SetIAMRole(rolename.c_str());
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-04-17 07:44:03 +00:00
|
|
|
bool S3fsCurl::AddUserAgent(CURL* hCurl)
|
|
|
|
{
|
|
|
|
if(!hCurl){
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if(S3fsCurl::IsUserAgentFlag()){
|
2017-09-17 09:16:05 +00:00
|
|
|
curl_easy_setopt(hCurl, CURLOPT_USERAGENT, S3fsCurl::userAgent.c_str());
|
2016-04-17 07:44:03 +00:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-09-30 19:41:27 +00:00
|
|
|
int S3fsCurl::CurlDebugFunc(CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr)
|
|
|
|
{
|
|
|
|
if(!hcurl){
|
|
|
|
// something wrong...
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
switch(type){
|
|
|
|
case CURLINFO_TEXT:
|
2016-10-22 12:02:37 +00:00
|
|
|
// Swap tab indentation with spaces so it stays pretty in syslog
|
|
|
|
int indent;
|
|
|
|
indent = 0;
|
|
|
|
while (*data == '\t' && size > 0) {
|
|
|
|
indent += 4;
|
|
|
|
size--;
|
|
|
|
data++;
|
|
|
|
}
|
|
|
|
S3FS_PRN_CURL("* %*s%.*s", indent, "", (int)size, data);
|
|
|
|
break;
|
2015-09-30 19:41:27 +00:00
|
|
|
case CURLINFO_HEADER_IN:
|
|
|
|
case CURLINFO_HEADER_OUT:
|
2018-02-28 12:29:58 +00:00
|
|
|
size_t remaining;
|
|
|
|
char* p;
|
|
|
|
|
2016-10-22 12:02:37 +00:00
|
|
|
// Print each line individually for tidy output
|
2018-02-28 12:29:58 +00:00
|
|
|
remaining = size;
|
|
|
|
p = data;
|
2016-10-22 12:02:37 +00:00
|
|
|
do {
|
2018-02-28 12:29:58 +00:00
|
|
|
char* eol = (char*)memchr(p, '\n', remaining);
|
|
|
|
int newline = 0;
|
2016-10-22 12:02:37 +00:00
|
|
|
if (eol == NULL) {
|
|
|
|
eol = (char*)memchr(p, '\r', remaining);
|
2018-06-24 02:38:59 +00:00
|
|
|
} else {
|
|
|
|
if (eol > p && *(eol - 1) == '\r') {
|
|
|
|
newline++;
|
|
|
|
}
|
2016-10-22 12:02:37 +00:00
|
|
|
newline++;
|
|
|
|
eol++;
|
|
|
|
}
|
2018-02-28 12:06:06 +00:00
|
|
|
size_t length = eol - p;
|
2016-10-22 12:02:37 +00:00
|
|
|
S3FS_PRN_CURL("%c %.*s", CURLINFO_HEADER_IN == type ? '<' : '>', (int)length - newline, p);
|
|
|
|
remaining -= length;
|
|
|
|
p = eol;
|
|
|
|
} while (p != NULL && remaining > 0);
|
2015-09-30 19:41:27 +00:00
|
|
|
break;
|
|
|
|
case CURLINFO_DATA_IN:
|
|
|
|
case CURLINFO_DATA_OUT:
|
|
|
|
case CURLINFO_SSL_DATA_IN:
|
|
|
|
case CURLINFO_SSL_DATA_OUT:
|
|
|
|
// not put
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
// why
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
//-------------------------------------------------------------------
|
|
|
|
// Methods for S3fsCurl
|
|
|
|
//-------------------------------------------------------------------
|
Fixed Issue 229 and Changes codes
1) Set metadata "Content-Encoding" automatically(Issue 292)
For this issue, s3fs is added new option "ahbe_conf".
New option means the configuration file path, and this file specifies
additional HTTP header by file(object) extension.
Thus you can specify any HTTP header for each object by extension.
* ahbe_conf file format:
-----------
line = [file suffix] HTTP-header [HTTP-header-values]
file suffix = file(object) suffix, if this field is empty,
it means "*"(all object).
HTTP-header = additional HTTP header name
HTTP-header-values = additional HTTP header value
-----------
* Example:
-----------
.gz Content-Encoding gzip
.Z Content-Encoding compress
X-S3FS-MYHTTPHEAD myvalue
-----------
A sample configuration file is uploaded in "test" directory.
If ahbe_conf parameter is specified, s3fs loads it's configuration
and compares extension(suffix) of object(file) when uploading
(PUT/POST) it. If the extension is same, s3fs adds/sends specified
HTTP header and value.
A case of sample configuration file, if a object(it's extension is
".gz") which already has Content-Encoding HTTP header is renamed
to ".txt" extension, s3fs does not set Content-Encoding. Because
".txt" is not match any line in configuration file.
So, s3fs matches the extension by each PUT/POST action.
* Please take care about "Content-Encoding".
This new option allows setting ANY HTTP header by object extension.
For example, you can specify "Content-Encoding" for ".gz"/etc
extension in configuration. But this means that S3 always returns
"Content-Encoding: gzip" when a client requests with other
"Accept-Encoding:" header. It SHOULD NOT be good.
Please see RFC 2616.
2) Changes about allow_other/uid/gid option for mount point
I reviewed about mount point permission and allow_other/uid/gid
options, and found bugs about these.
s3fs is fixed bugs and changed to the following specifications.
* s3fs only allows uid(gid) options as 0(root), when the effective
user is zero(root).
* A mount point(directory) must have a permission to allow
accessing by effective user/group.
* If allow_other option is specified, the mount point permission
is set 0777(all users allow all access).
In another case, the mount point is set 0700(only allows
effective user).
* When uid/gid option is specified, the mount point owner/group
is set uid/gid option value.
If uid/gid is not set, it is set effective user/group id.
This changes maybe fixes some issue(321, 338).
3) Changes a logic about (Issue 229)
The chmod command returns -EIO when changing the mount point.
It is correct, s3fs can not changed owner/group/mtime for the
mount point, but s3fs sends a request for changing the bucket.
This revision does not send the request, and returns EIO as
soon as possible.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@465 df820570-a93a-0410-bd06-b72b767a4274
2013-08-16 19:24:01 +00:00
|
|
|
S3fsCurl::S3fsCurl(bool ahbe) :
|
2016-05-05 04:53:26 +00:00
|
|
|
hCurl(NULL), type(REQTYPE_UNSET), path(""), base_path(""), saved_path(""), url(""), requestHeaders(NULL),
|
2013-08-21 07:43:32 +00:00
|
|
|
bodydata(NULL), headdata(NULL), LastResponseCode(-1), postdata(NULL), postdata_remaining(0), is_use_ahbe(ahbe),
|
2014-07-19 19:02:55 +00:00
|
|
|
retry_count(0), b_infile(NULL), b_postdata(NULL), b_postdata_remaining(0), b_partdata_startpos(0), b_partdata_size(0),
|
2017-10-26 14:21:48 +00:00
|
|
|
b_ssekey_pos(-1), b_ssevalue(""), b_ssetype(SSE_DISABLE), op(""), query_string("")
|
2013-07-05 02:28:31 +00:00
|
|
|
{
|
2011-09-01 19:24:12 +00:00
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
S3fsCurl::~S3fsCurl()
|
2013-03-30 13:37:14 +00:00
|
|
|
{
|
2013-07-05 02:28:31 +00:00
|
|
|
DestroyCurlHandle();
|
|
|
|
}
|
2013-03-30 13:37:14 +00:00
|
|
|
|
2013-08-21 07:43:32 +00:00
|
|
|
bool S3fsCurl::ResetHandle(void)
|
2013-07-05 02:28:31 +00:00
|
|
|
{
|
|
|
|
curl_easy_reset(hCurl);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_NOSIGNAL, 1);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_FOLLOWLOCATION, true);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_CONNECTTIMEOUT, S3fsCurl::connect_timeout);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_NOPROGRESS, 0);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_PROGRESSFUNCTION, S3fsCurl::CurlProgress);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_PROGRESSDATA, hCurl);
|
|
|
|
// curl_easy_setopt(hCurl, CURLOPT_FORBID_REUSE, 1);
|
2013-09-14 21:50:39 +00:00
|
|
|
|
2016-05-06 04:37:32 +00:00
|
|
|
if(type != REQTYPE_IAMCRED && type != REQTYPE_IAMROLE){
|
|
|
|
// REQTYPE_IAMCRED and REQTYPE_IAMROLE are always HTTP
|
2013-10-06 13:45:32 +00:00
|
|
|
if(0 == S3fsCurl::ssl_verify_hostname){
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_SSL_VERIFYHOST, 0);
|
|
|
|
}
|
|
|
|
if(S3fsCurl::curl_ca_bundle.size() != 0){
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_CAINFO, S3fsCurl::curl_ca_bundle.c_str());
|
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
}
|
2013-09-14 21:50:39 +00:00
|
|
|
if((S3fsCurl::is_dns_cache || S3fsCurl::is_ssl_session_cache) && S3fsCurl::hCurlShare){
|
2013-07-05 02:28:31 +00:00
|
|
|
curl_easy_setopt(hCurl, CURLOPT_SHARE, S3fsCurl::hCurlShare);
|
|
|
|
}
|
2015-05-20 15:32:36 +00:00
|
|
|
if(!S3fsCurl::is_cert_check) {
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_DBG("'no_check_certificate' option in effect.")
|
|
|
|
S3FS_PRN_DBG("The server certificate won't be checked against the available certificate authorities.")
|
2015-05-20 15:32:36 +00:00
|
|
|
curl_easy_setopt(hCurl, CURLOPT_SSL_VERIFYPEER, false);
|
|
|
|
}
|
2013-08-23 16:28:50 +00:00
|
|
|
if(S3fsCurl::is_verbose){
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_VERBOSE, true);
|
2015-09-30 19:41:27 +00:00
|
|
|
if(!foreground){
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_DEBUGFUNCTION, S3fsCurl::CurlDebugFunc);
|
|
|
|
}
|
2013-08-23 16:28:50 +00:00
|
|
|
}
|
2017-04-04 12:32:53 +00:00
|
|
|
if(!cipher_suites.empty()) {
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_SSL_CIPHER_LIST, cipher_suites.c_str());
|
|
|
|
}
|
2013-03-30 13:37:14 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
S3fsCurl::curl_times[hCurl] = time(0);
|
|
|
|
S3fsCurl::curl_progress[hCurl] = progress_t(-1, -1);
|
|
|
|
|
2013-08-21 07:43:32 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool S3fsCurl::CreateCurlHandle(bool force)
|
|
|
|
{
|
|
|
|
pthread_mutex_lock(&S3fsCurl::curl_handles_lock);
|
|
|
|
|
|
|
|
if(hCurl){
|
|
|
|
if(!force){
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_WARN("already create handle.");
|
2013-08-21 07:43:32 +00:00
|
|
|
return false;
|
|
|
|
}
|
2018-05-27 10:48:03 +00:00
|
|
|
if(!DestroyCurlHandle(true)){
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("could not destroy handle.");
|
2013-08-21 07:43:32 +00:00
|
|
|
return false;
|
|
|
|
}
|
2016-11-11 23:27:17 +00:00
|
|
|
S3FS_PRN_INFO3("already has handle, so destroyed it.");
|
2013-08-21 07:43:32 +00:00
|
|
|
}
|
|
|
|
|
2016-04-22 06:57:31 +00:00
|
|
|
if(NULL == (hCurl = sCurlPool->GetHandler())){
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("Failed to create handle.");
|
2013-08-21 07:43:32 +00:00
|
|
|
return false;
|
|
|
|
}
|
2016-01-24 05:01:50 +00:00
|
|
|
|
|
|
|
// [NOTE]
|
2016-05-06 04:37:32 +00:00
|
|
|
// If type is REQTYPE_IAMCRED or REQTYPE_IAMROLE, do not clear type.
|
2016-01-24 05:01:50 +00:00
|
|
|
// Because that type only uses HTTP protocol, then the special
|
|
|
|
// logic in ResetHandle function.
|
|
|
|
//
|
2016-05-06 04:37:32 +00:00
|
|
|
if(type != REQTYPE_IAMCRED && type != REQTYPE_IAMROLE){
|
2016-01-24 05:01:50 +00:00
|
|
|
type = REQTYPE_UNSET;
|
|
|
|
}
|
|
|
|
|
2013-08-21 07:43:32 +00:00
|
|
|
ResetHandle();
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
pthread_mutex_unlock(&S3fsCurl::curl_handles_lock);
|
2013-03-30 13:37:14 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
return true;
|
2013-03-30 13:37:14 +00:00
|
|
|
}
|
|
|
|
|
2018-05-27 10:48:03 +00:00
|
|
|
bool S3fsCurl::DestroyCurlHandle(bool force)
|
2013-03-30 13:37:14 +00:00
|
|
|
{
|
2013-07-05 02:28:31 +00:00
|
|
|
ClearInternalData();
|
|
|
|
|
2018-05-27 10:48:03 +00:00
|
|
|
if(hCurl){
|
|
|
|
pthread_mutex_lock(&S3fsCurl::curl_handles_lock);
|
|
|
|
|
|
|
|
S3fsCurl::curl_times.erase(hCurl);
|
|
|
|
S3fsCurl::curl_progress.erase(hCurl);
|
|
|
|
if(retry_count == 0 || force){
|
|
|
|
sCurlPool->ReturnHandler(hCurl);
|
|
|
|
}else{
|
|
|
|
curl_easy_cleanup(hCurl);
|
|
|
|
}
|
|
|
|
hCurl = NULL;
|
|
|
|
|
|
|
|
pthread_mutex_unlock(&S3fsCurl::curl_handles_lock);
|
|
|
|
}else{
|
|
|
|
return false;
|
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool S3fsCurl::ClearInternalData(void)
|
|
|
|
{
|
2018-05-27 10:48:03 +00:00
|
|
|
// Always clear internal data
|
|
|
|
//
|
2017-10-26 14:21:48 +00:00
|
|
|
type = REQTYPE_UNSET;
|
|
|
|
path = "";
|
|
|
|
base_path = "";
|
|
|
|
saved_path = "";
|
|
|
|
url = "";
|
|
|
|
op = "";
|
|
|
|
query_string= "";
|
2013-07-05 02:28:31 +00:00
|
|
|
if(requestHeaders){
|
|
|
|
curl_slist_free_all(requestHeaders);
|
|
|
|
requestHeaders = NULL;
|
|
|
|
}
|
|
|
|
responseHeaders.clear();
|
|
|
|
if(bodydata){
|
|
|
|
delete bodydata;
|
|
|
|
bodydata = NULL;
|
|
|
|
}
|
|
|
|
if(headdata){
|
|
|
|
delete headdata;
|
|
|
|
headdata = NULL;
|
|
|
|
}
|
2013-08-21 07:43:32 +00:00
|
|
|
LastResponseCode = -1;
|
|
|
|
postdata = NULL;
|
|
|
|
postdata_remaining = 0;
|
2013-11-11 13:45:35 +00:00
|
|
|
retry_count = 0;
|
2013-08-21 07:43:32 +00:00
|
|
|
b_infile = NULL;
|
|
|
|
b_postdata = NULL;
|
|
|
|
b_postdata_remaining = 0;
|
|
|
|
b_partdata_startpos = 0;
|
|
|
|
b_partdata_size = 0;
|
2013-07-10 06:24:06 +00:00
|
|
|
partdata.clear();
|
|
|
|
|
2013-09-14 21:50:39 +00:00
|
|
|
S3FS_MALLOCTRIM(0);
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
return true;
|
|
|
|
}
|
2011-03-01 19:35:55 +00:00
|
|
|
|
Fixed Issue 229 and Changes codes
1) Set metadata "Content-Encoding" automatically(Issue 292)
For this issue, s3fs is added new option "ahbe_conf".
New option means the configuration file path, and this file specifies
additional HTTP header by file(object) extension.
Thus you can specify any HTTP header for each object by extension.
* ahbe_conf file format:
-----------
line = [file suffix] HTTP-header [HTTP-header-values]
file suffix = file(object) suffix, if this field is empty,
it means "*"(all object).
HTTP-header = additional HTTP header name
HTTP-header-values = additional HTTP header value
-----------
* Example:
-----------
.gz Content-Encoding gzip
.Z Content-Encoding compress
X-S3FS-MYHTTPHEAD myvalue
-----------
A sample configuration file is uploaded in "test" directory.
If ahbe_conf parameter is specified, s3fs loads it's configuration
and compares extension(suffix) of object(file) when uploading
(PUT/POST) it. If the extension is same, s3fs adds/sends specified
HTTP header and value.
A case of sample configuration file, if a object(it's extension is
".gz") which already has Content-Encoding HTTP header is renamed
to ".txt" extension, s3fs does not set Content-Encoding. Because
".txt" is not match any line in configuration file.
So, s3fs matches the extension by each PUT/POST action.
* Please take care about "Content-Encoding".
This new option allows setting ANY HTTP header by object extension.
For example, you can specify "Content-Encoding" for ".gz"/etc
extension in configuration. But this means that S3 always returns
"Content-Encoding: gzip" when a client requests with other
"Accept-Encoding:" header. It SHOULD NOT be good.
Please see RFC 2616.
2) Changes about allow_other/uid/gid option for mount point
I reviewed about mount point permission and allow_other/uid/gid
options, and found bugs about these.
s3fs is fixed bugs and changed to the following specifications.
* s3fs only allows uid(gid) options as 0(root), when the effective
user is zero(root).
* A mount point(directory) must have a permission to allow
accessing by effective user/group.
* If allow_other option is specified, the mount point permission
is set 0777(all users allow all access).
In another case, the mount point is set 0700(only allows
effective user).
* When uid/gid option is specified, the mount point owner/group
is set uid/gid option value.
If uid/gid is not set, it is set effective user/group id.
This changes maybe fixes some issue(321, 338).
3) Changes a logic about (Issue 229)
The chmod command returns -EIO when changing the mount point.
It is correct, s3fs can not changed owner/group/mtime for the
mount point, but s3fs sends a request for changing the bucket.
This revision does not send the request, and returns EIO as
soon as possible.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@465 df820570-a93a-0410-bd06-b72b767a4274
2013-08-16 19:24:01 +00:00
|
|
|
bool S3fsCurl::SetUseAhbe(bool ahbe)
|
|
|
|
{
|
|
|
|
bool old = is_use_ahbe;
|
|
|
|
is_use_ahbe = ahbe;
|
|
|
|
return old;
|
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
bool S3fsCurl::GetResponseCode(long& responseCode)
|
|
|
|
{
|
|
|
|
if(!hCurl){
|
|
|
|
return false;
|
2013-03-30 13:37:14 +00:00
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
responseCode = -1;
|
|
|
|
if(CURLE_OK != curl_easy_getinfo(hCurl, CURLINFO_RESPONSE_CODE, &LastResponseCode)){
|
|
|
|
return false;
|
2013-03-30 13:37:14 +00:00
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
responseCode = LastResponseCode;
|
|
|
|
return true;
|
|
|
|
}
|
2011-03-01 19:35:55 +00:00
|
|
|
|
2013-08-21 07:43:32 +00:00
|
|
|
//
|
|
|
|
// Reset all options for retrying
|
|
|
|
//
|
|
|
|
bool S3fsCurl::RemakeHandle(void)
|
|
|
|
{
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_INFO3("Retry request. [type=%d][url=%s][path=%s]", type, url.c_str(), path.c_str());
|
2013-08-21 07:43:32 +00:00
|
|
|
|
|
|
|
if(REQTYPE_UNSET == type){
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// rewind file
|
|
|
|
struct stat st;
|
|
|
|
if(b_infile){
|
|
|
|
rewind(b_infile);
|
|
|
|
if(-1 == fstat(fileno(b_infile), &st)){
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_WARN("Could not get file stat(fd=%d)", fileno(b_infile));
|
2013-08-21 07:43:32 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// reinitialize internal data
|
|
|
|
responseHeaders.clear();
|
|
|
|
if(bodydata){
|
|
|
|
bodydata->Clear();
|
|
|
|
}
|
|
|
|
if(headdata){
|
|
|
|
headdata->Clear();
|
|
|
|
}
|
|
|
|
LastResponseCode = -1;
|
|
|
|
|
2013-11-11 13:45:35 +00:00
|
|
|
// count up(only use for multipart)
|
|
|
|
retry_count++;
|
|
|
|
|
2013-08-21 07:43:32 +00:00
|
|
|
// set from backup
|
|
|
|
postdata = b_postdata;
|
|
|
|
postdata_remaining = b_postdata_remaining;
|
|
|
|
partdata.startpos = b_partdata_startpos;
|
|
|
|
partdata.size = b_partdata_size;
|
|
|
|
|
|
|
|
// reset handle
|
2018-03-01 11:21:27 +00:00
|
|
|
curl_easy_cleanup(hCurl);
|
|
|
|
hCurl = curl_easy_init();
|
2013-08-21 07:43:32 +00:00
|
|
|
ResetHandle();
|
2018-03-01 11:21:27 +00:00
|
|
|
// disable ssl cache, so that a new session will be created
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_SSL_SESSIONID_CACHE, 0);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_SHARE, NULL);
|
2013-08-21 07:43:32 +00:00
|
|
|
|
|
|
|
// set options
|
|
|
|
switch(type){
|
|
|
|
case REQTYPE_DELETE:
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str());
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_CUSTOMREQUEST, "DELETE");
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HTTPHEADER, requestHeaders);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case REQTYPE_HEAD:
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str());
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_NOBODY, true);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_FILETIME, true);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HTTPHEADER, requestHeaders);
|
|
|
|
// responseHeaders
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HEADERDATA, (void*)&responseHeaders);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HEADERFUNCTION, HeaderCallback);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case REQTYPE_PUTHEAD:
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str());
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_UPLOAD, true);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)bodydata);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_INFILESIZE, 0);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HTTPHEADER, requestHeaders);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case REQTYPE_PUT:
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str());
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_UPLOAD, true);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)bodydata);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HTTPHEADER, requestHeaders);
|
|
|
|
if(b_infile){
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_INFILESIZE_LARGE, static_cast<curl_off_t>(st.st_size));
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_INFILE, b_infile);
|
|
|
|
}else{
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_INFILESIZE, 0);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case REQTYPE_GET:
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str());
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HTTPHEADER, requestHeaders);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, S3fsCurl::DownloadWriteCallback);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)this);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case REQTYPE_CHKBUCKET:
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str());
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)bodydata);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HTTPHEADER, requestHeaders);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case REQTYPE_LISTBUCKET:
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str());
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)bodydata);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HTTPHEADER, requestHeaders);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case REQTYPE_PREMULTIPOST:
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str());
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_POST, true);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)bodydata);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_POSTFIELDSIZE, 0);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HTTPHEADER, requestHeaders);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case REQTYPE_COMPLETEMULTIPOST:
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str());
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HTTPHEADER, requestHeaders);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_POST, true);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)bodydata);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback);
|
2013-09-27 07:39:07 +00:00
|
|
|
curl_easy_setopt(hCurl, CURLOPT_POSTFIELDSIZE, static_cast<curl_off_t>(postdata_remaining));
|
2013-08-21 07:43:32 +00:00
|
|
|
curl_easy_setopt(hCurl, CURLOPT_READDATA, (void*)this);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_READFUNCTION, S3fsCurl::ReadCallback);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case REQTYPE_UPLOADMULTIPOST:
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str());
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_UPLOAD, true);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)bodydata);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback);
|
2016-11-23 20:27:15 +00:00
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HEADERDATA, (void*)&responseHeaders);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HEADERFUNCTION, HeaderCallback);
|
2013-09-27 07:39:07 +00:00
|
|
|
curl_easy_setopt(hCurl, CURLOPT_INFILESIZE_LARGE, static_cast<curl_off_t>(partdata.size));
|
2013-08-21 07:43:32 +00:00
|
|
|
curl_easy_setopt(hCurl, CURLOPT_READFUNCTION, S3fsCurl::UploadReadCallback);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_READDATA, (void*)this);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HTTPHEADER, requestHeaders);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case REQTYPE_COPYMULTIPOST:
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str());
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_UPLOAD, true);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)bodydata);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HEADERDATA, (void*)headdata);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HEADERFUNCTION, WriteMemoryCallback);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_INFILESIZE, 0);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HTTPHEADER, requestHeaders);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case REQTYPE_MULTILIST:
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str());
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)bodydata);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HTTPHEADER, requestHeaders);
|
|
|
|
break;
|
|
|
|
|
2013-10-06 13:45:32 +00:00
|
|
|
case REQTYPE_IAMCRED:
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str());
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)bodydata);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback);
|
2017-11-23 08:46:24 +00:00
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HTTPHEADER, requestHeaders);
|
|
|
|
if(S3fsCurl::is_ibm_iam_auth){
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_POST, true);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_POSTFIELDSIZE, static_cast<curl_off_t>(postdata_remaining));
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_READDATA, (void*)this);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_READFUNCTION, S3fsCurl::ReadCallback);
|
|
|
|
}
|
2013-10-06 13:45:32 +00:00
|
|
|
break;
|
|
|
|
|
2013-11-11 13:45:35 +00:00
|
|
|
case REQTYPE_ABORTMULTIUPLOAD:
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str());
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_CUSTOMREQUEST, "DELETE");
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HTTPHEADER, requestHeaders);
|
|
|
|
break;
|
|
|
|
|
2016-05-06 04:37:32 +00:00
|
|
|
case REQTYPE_IAMROLE:
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str());
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)bodydata);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback);
|
|
|
|
break;
|
|
|
|
|
2013-08-21 07:43:32 +00:00
|
|
|
default:
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("request type is unknown(%d)", type);
|
2013-08-21 07:43:32 +00:00
|
|
|
return false;
|
|
|
|
}
|
2016-04-17 07:44:03 +00:00
|
|
|
S3fsCurl::AddUserAgent(hCurl); // put User-Agent
|
|
|
|
|
2013-08-21 07:43:32 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
//
|
|
|
|
// returns curl return code
|
|
|
|
//
|
2013-08-21 07:43:32 +00:00
|
|
|
int S3fsCurl::RequestPerform(void)
|
2013-07-05 02:28:31 +00:00
|
|
|
{
|
2015-09-30 19:41:27 +00:00
|
|
|
if(IS_S3FS_LOG_DBG()){
|
2013-07-05 02:28:31 +00:00
|
|
|
char* ptr_url = NULL;
|
|
|
|
curl_easy_getinfo(hCurl, CURLINFO_EFFECTIVE_URL , &ptr_url);
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_DBG("connecting to URL %s", SAFESTRPTR(ptr_url));
|
2013-07-05 02:28:31 +00:00
|
|
|
}
|
2011-03-01 19:35:55 +00:00
|
|
|
|
|
|
|
// 1 attempt + retries...
|
2013-07-05 02:28:31 +00:00
|
|
|
for(int retrycnt = S3fsCurl::retries; 0 < retrycnt; retrycnt--){
|
|
|
|
// Requests
|
|
|
|
CURLcode curlCode = curl_easy_perform(hCurl);
|
|
|
|
|
|
|
|
// Check result
|
|
|
|
switch(curlCode){
|
2011-03-01 19:35:55 +00:00
|
|
|
case CURLE_OK:
|
|
|
|
// Need to look at the HTTP response code
|
2013-07-05 02:28:31 +00:00
|
|
|
if(0 != curl_easy_getinfo(hCurl, CURLINFO_RESPONSE_CODE, &LastResponseCode)){
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("curl_easy_getinfo failed while trying to retrieve HTTP response code");
|
2011-03-01 19:35:55 +00:00
|
|
|
return -EIO;
|
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
if(400 > LastResponseCode){
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_INFO3("HTTP response code %ld", LastResponseCode);
|
2011-03-01 19:35:55 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
if(500 <= LastResponseCode){
|
2018-07-04 07:50:45 +00:00
|
|
|
S3FS_PRN_ERR("HTTP response code = %ld Body Text: %s", LastResponseCode, (bodydata ? bodydata->str() : ""));
|
2011-03-01 19:35:55 +00:00
|
|
|
sleep(4);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Service response codes which are >= 400 && < 500
|
2013-07-05 02:28:31 +00:00
|
|
|
switch(LastResponseCode){
|
2011-03-01 19:35:55 +00:00
|
|
|
case 400:
|
2018-07-06 07:03:57 +00:00
|
|
|
S3FS_PRN_ERR("HTTP response code %ld, Returning EIO, Body Text: %s", LastResponseCode, (bodydata ? bodydata->str() : ""));
|
2011-03-01 19:35:55 +00:00
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
case 403:
|
2018-07-06 07:03:57 +00:00
|
|
|
S3FS_PRN_ERR("HTTP response code %ld, Returning EPERM, Body Text: %s", LastResponseCode, (bodydata ? bodydata->str() : ""));
|
2011-08-30 19:44:26 +00:00
|
|
|
return -EPERM;
|
2011-03-01 19:35:55 +00:00
|
|
|
|
|
|
|
case 404:
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_INFO3("HTTP response code 404 was returned, returning ENOENT");
|
|
|
|
S3FS_PRN_DBG("Body Text: %s", (bodydata ? bodydata->str() : ""));
|
2011-03-01 19:35:55 +00:00
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
default:
|
2018-07-06 07:03:57 +00:00
|
|
|
S3FS_PRN_ERR("HTTP response code %ld, Returning EIO, Body Text: %s", LastResponseCode, (bodydata ? bodydata->str() : ""));
|
2011-03-01 19:35:55 +00:00
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case CURLE_WRITE_ERROR:
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("### CURLE_WRITE_ERROR");
|
2011-03-01 19:35:55 +00:00
|
|
|
sleep(2);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case CURLE_OPERATION_TIMEDOUT:
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("### CURLE_OPERATION_TIMEDOUT");
|
2011-03-01 19:35:55 +00:00
|
|
|
sleep(2);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case CURLE_COULDNT_RESOLVE_HOST:
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("### CURLE_COULDNT_RESOLVE_HOST");
|
2011-03-01 19:35:55 +00:00
|
|
|
sleep(2);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case CURLE_COULDNT_CONNECT:
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("### CURLE_COULDNT_CONNECT");
|
2011-03-01 19:35:55 +00:00
|
|
|
sleep(4);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case CURLE_GOT_NOTHING:
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("### CURLE_GOT_NOTHING");
|
2011-03-01 19:35:55 +00:00
|
|
|
sleep(4);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case CURLE_ABORTED_BY_CALLBACK:
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("### CURLE_ABORTED_BY_CALLBACK");
|
2011-03-01 19:35:55 +00:00
|
|
|
sleep(4);
|
2013-07-05 02:28:31 +00:00
|
|
|
S3fsCurl::curl_times[hCurl] = time(0);
|
2011-03-01 19:35:55 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case CURLE_PARTIAL_FILE:
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("### CURLE_PARTIAL_FILE");
|
2011-03-01 19:35:55 +00:00
|
|
|
sleep(4);
|
|
|
|
break;
|
|
|
|
|
2011-07-29 15:48:15 +00:00
|
|
|
case CURLE_SEND_ERROR:
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("### CURLE_SEND_ERROR");
|
2011-07-29 15:48:15 +00:00
|
|
|
sleep(2);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case CURLE_RECV_ERROR:
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("### CURLE_RECV_ERROR");
|
2011-07-29 15:48:15 +00:00
|
|
|
sleep(2);
|
|
|
|
break;
|
|
|
|
|
2014-04-04 16:23:56 +00:00
|
|
|
case CURLE_SSL_CONNECT_ERROR:
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("### CURLE_SSL_CONNECT_ERROR");
|
2014-04-04 16:23:56 +00:00
|
|
|
sleep(2);
|
|
|
|
break;
|
|
|
|
|
2011-03-01 19:35:55 +00:00
|
|
|
case CURLE_SSL_CACERT:
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("### CURLE_SSL_CACERT");
|
2015-04-18 13:32:04 +00:00
|
|
|
|
2011-03-01 19:35:55 +00:00
|
|
|
// try to locate cert, if successful, then set the
|
|
|
|
// option and continue
|
2013-07-05 02:28:31 +00:00
|
|
|
if(0 == S3fsCurl::curl_ca_bundle.size()){
|
|
|
|
if(!S3fsCurl::LocateBundle()){
|
2016-06-27 10:38:49 +00:00
|
|
|
S3FS_PRN_ERR("could not get CURL_CA_BUNDLE.");
|
|
|
|
return -EIO;
|
2013-07-05 02:28:31 +00:00
|
|
|
}
|
2013-09-14 21:50:39 +00:00
|
|
|
break; // retry with CAINFO
|
2011-03-01 19:35:55 +00:00
|
|
|
}
|
2016-06-27 10:38:49 +00:00
|
|
|
S3FS_PRN_ERR("curlCode: %d msg: %s", curlCode, curl_easy_strerror(curlCode));
|
|
|
|
return -EIO;
|
2011-03-01 19:35:55 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
#ifdef CURLE_PEER_FAILED_VERIFICATION
|
|
|
|
case CURLE_PEER_FAILED_VERIFICATION:
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("### CURLE_PEER_FAILED_VERIFICATION");
|
2015-04-18 13:32:04 +00:00
|
|
|
|
2011-03-01 19:35:55 +00:00
|
|
|
first_pos = bucket.find_first_of(".");
|
2013-07-05 02:28:31 +00:00
|
|
|
if(first_pos != string::npos){
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_INFO("curl returned a CURL_PEER_FAILED_VERIFICATION error");
|
|
|
|
S3FS_PRN_INFO("security issue found: buckets with periods in their name are incompatible with http");
|
|
|
|
S3FS_PRN_INFO("This check can be over-ridden by using the -o ssl_verify_hostname=0");
|
|
|
|
S3FS_PRN_INFO("The certificate will still be checked but the hostname will not be verified.");
|
|
|
|
S3FS_PRN_INFO("A more secure method would be to use a bucket name without periods.");
|
2015-08-12 15:04:16 +00:00
|
|
|
}else{
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_INFO("my_curl_easy_perform: curlCode: %d -- %s", curlCode, curl_easy_strerror(curlCode));
|
2011-03-01 19:35:55 +00:00
|
|
|
}
|
2016-06-27 10:38:49 +00:00
|
|
|
return -EIO;
|
2011-03-01 19:35:55 +00:00
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
// This should be invalid since curl option HTTP FAILONERROR is now off
|
|
|
|
case CURLE_HTTP_RETURNED_ERROR:
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("### CURLE_HTTP_RETURNED_ERROR");
|
2011-03-01 19:35:55 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
if(0 != curl_easy_getinfo(hCurl, CURLINFO_RESPONSE_CODE, &LastResponseCode)){
|
2011-03-01 19:35:55 +00:00
|
|
|
return -EIO;
|
|
|
|
}
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_INFO3("HTTP response code =%ld", LastResponseCode);
|
2011-03-01 19:35:55 +00:00
|
|
|
|
|
|
|
// Let's try to retrieve the
|
2013-07-05 02:28:31 +00:00
|
|
|
if(404 == LastResponseCode){
|
2011-03-01 19:35:55 +00:00
|
|
|
return -ENOENT;
|
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
if(500 > LastResponseCode){
|
2011-03-01 19:35:55 +00:00
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
// Unknown CURL return code
|
|
|
|
default:
|
2016-06-27 10:38:49 +00:00
|
|
|
S3FS_PRN_ERR("###curlCode: %d msg: %s", curlCode, curl_easy_strerror(curlCode));
|
|
|
|
return -EIO;
|
2011-03-01 19:35:55 +00:00
|
|
|
break;
|
|
|
|
}
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_INFO("### retrying...");
|
2013-08-21 07:43:32 +00:00
|
|
|
|
|
|
|
if(!RemakeHandle()){
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_INFO("Failed to reset handle and internal data for retrying.");
|
2013-08-21 07:43:32 +00:00
|
|
|
return -EIO;
|
|
|
|
}
|
2011-03-01 19:35:55 +00:00
|
|
|
}
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("### giving up");
|
|
|
|
|
2011-03-01 19:35:55 +00:00
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
//
|
|
|
|
// Returns the Amazon AWS signature for the given parameters.
|
|
|
|
//
|
|
|
|
// @param method e.g., "GET"
|
|
|
|
// @param content_type e.g., "application/x-directory"
|
2015-01-28 17:13:11 +00:00
|
|
|
// @param date e.g., get_date_rfc850()
|
2013-07-05 02:28:31 +00:00
|
|
|
// @param resource e.g., "/pub"
|
|
|
|
//
|
2015-08-17 04:42:45 +00:00
|
|
|
string S3fsCurl::CalcSignatureV2(const string& method, const string& strMD5, const string& content_type, const string& date, const string& resource)
|
2015-01-20 16:31:36 +00:00
|
|
|
{
|
|
|
|
string Signature;
|
|
|
|
string StringToSign;
|
|
|
|
|
2017-11-08 15:21:49 +00:00
|
|
|
if(0 < S3fsCurl::IAM_role.size() || S3fsCurl::is_ecs){
|
2015-01-28 17:13:11 +00:00
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-security-token", S3fsCurl::AWSAccessToken.c_str());
|
2015-01-20 16:31:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
StringToSign += method + "\n";
|
|
|
|
StringToSign += strMD5 + "\n"; // md5
|
|
|
|
StringToSign += content_type + "\n";
|
|
|
|
StringToSign += date + "\n";
|
2015-01-28 17:13:11 +00:00
|
|
|
StringToSign += get_canonical_headers(requestHeaders, true);
|
2015-01-20 16:31:36 +00:00
|
|
|
StringToSign += resource;
|
|
|
|
|
|
|
|
const void* key = S3fsCurl::AWSSecretAccessKey.data();
|
|
|
|
int key_len = S3fsCurl::AWSSecretAccessKey.size();
|
|
|
|
const unsigned char* sdata = reinterpret_cast<const unsigned char*>(StringToSign.data());
|
|
|
|
int sdata_len = StringToSign.size();
|
|
|
|
unsigned char* md = NULL;
|
|
|
|
unsigned int md_len = 0;;
|
|
|
|
|
|
|
|
s3fs_HMAC(key, key_len, sdata, sdata_len, &md, &md_len);
|
|
|
|
|
|
|
|
char* base64;
|
|
|
|
if(NULL == (base64 = s3fs_base64(md, md_len))){
|
|
|
|
free(md);
|
|
|
|
return string(""); // ENOMEM
|
|
|
|
}
|
|
|
|
free(md);
|
|
|
|
|
|
|
|
Signature = base64;
|
|
|
|
free(base64);
|
|
|
|
|
|
|
|
return Signature;
|
|
|
|
}
|
|
|
|
|
2015-08-17 04:42:45 +00:00
|
|
|
string S3fsCurl::CalcSignature(const string& method, const string& canonical_uri, const string& query_string, const string& strdate, const string& payload_hash, const string& date8601)
|
2015-01-20 16:31:36 +00:00
|
|
|
{
|
|
|
|
string Signature, StringCQ, StringToSign;
|
|
|
|
string uriencode;
|
|
|
|
|
2017-11-08 15:21:49 +00:00
|
|
|
if(0 < S3fsCurl::IAM_role.size() || S3fsCurl::is_ecs){
|
2015-01-28 17:13:11 +00:00
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-security-token", S3fsCurl::AWSAccessToken.c_str());
|
2015-01-20 16:31:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
uriencode = urlEncode(canonical_uri);
|
2015-01-24 16:36:30 +00:00
|
|
|
StringCQ = method + "\n";
|
2015-01-20 16:31:36 +00:00
|
|
|
if(0 == strcmp(method.c_str(),"HEAD") || 0 == strcmp(method.c_str(),"PUT") || 0 == strcmp(method.c_str(),"DELETE")){
|
2016-01-10 22:28:09 +00:00
|
|
|
StringCQ += uriencode + "\n";
|
2015-01-20 16:31:36 +00:00
|
|
|
}else if (0 == strcmp(method.c_str(), "GET") && 0 == strcmp(uriencode.c_str(), "")) {
|
2016-01-10 22:28:09 +00:00
|
|
|
StringCQ +="/\n";
|
2015-01-24 16:36:30 +00:00
|
|
|
}else if (0 == strcmp(method.c_str(), "GET") && 0 == strncmp(uriencode.c_str(), "/", 1)) {
|
2016-01-10 22:28:09 +00:00
|
|
|
StringCQ += uriencode +"\n";
|
2015-01-24 16:36:30 +00:00
|
|
|
}else if (0 == strcmp(method.c_str(), "GET") && 0 != strncmp(uriencode.c_str(), "/", 1)) {
|
2015-01-20 16:31:36 +00:00
|
|
|
StringCQ += "/\n" + urlEncode2(canonical_uri) +"\n";
|
|
|
|
}else if (0 == strcmp(method.c_str(), "POST")) {
|
2016-01-10 22:28:09 +00:00
|
|
|
StringCQ += uriencode + "\n";
|
2015-01-20 16:31:36 +00:00
|
|
|
}
|
2016-01-10 22:28:09 +00:00
|
|
|
StringCQ += urlEncode2(query_string) + "\n";
|
2015-01-28 17:13:11 +00:00
|
|
|
StringCQ += get_canonical_headers(requestHeaders) + "\n";
|
|
|
|
StringCQ += get_sorted_header_keys(requestHeaders) + "\n";
|
2015-01-20 16:31:36 +00:00
|
|
|
StringCQ += payload_hash;
|
|
|
|
|
2015-01-24 16:36:30 +00:00
|
|
|
char kSecret[128];
|
|
|
|
unsigned char *kDate, *kRegion, *kService, *kSigning, *sRequest = NULL;
|
|
|
|
unsigned int kDate_len,kRegion_len, kService_len, kSigning_len, sRequest_len = 0;
|
|
|
|
char hexsRequest[64 + 1];
|
|
|
|
int kSecret_len = snprintf(kSecret, sizeof(kSecret), "AWS4%s", S3fsCurl::AWSSecretAccessKey.c_str());
|
2015-01-28 17:13:11 +00:00
|
|
|
unsigned int cnt;
|
2015-01-24 16:36:30 +00:00
|
|
|
|
2015-01-28 17:13:11 +00:00
|
|
|
s3fs_HMAC256(kSecret, kSecret_len, reinterpret_cast<const unsigned char*>(strdate.data()), strdate.size(), &kDate, &kDate_len);
|
|
|
|
s3fs_HMAC256(kDate, kDate_len, reinterpret_cast<const unsigned char*>(endpoint.c_str()), endpoint.size(), &kRegion, &kRegion_len);
|
|
|
|
s3fs_HMAC256(kRegion, kRegion_len, reinterpret_cast<const unsigned char*>("s3"), sizeof("s3") - 1, &kService, &kService_len);
|
|
|
|
s3fs_HMAC256(kService, kService_len, reinterpret_cast<const unsigned char*>("aws4_request"), sizeof("aws4_request") - 1, &kSigning, &kSigning_len);
|
2015-01-24 16:36:30 +00:00
|
|
|
free(kDate);
|
|
|
|
free(kRegion);
|
|
|
|
free(kService);
|
|
|
|
|
2015-01-28 17:13:11 +00:00
|
|
|
const unsigned char* cRequest = reinterpret_cast<const unsigned char*>(StringCQ.c_str());
|
|
|
|
unsigned int cRequest_len = StringCQ.size();
|
2015-01-20 16:31:36 +00:00
|
|
|
s3fs_sha256(cRequest, cRequest_len, &sRequest, &sRequest_len);
|
2015-01-28 17:13:11 +00:00
|
|
|
for(cnt = 0; cnt < sRequest_len; cnt++){
|
|
|
|
sprintf(&hexsRequest[cnt * 2], "%02x", sRequest[cnt]);
|
2015-01-24 16:36:30 +00:00
|
|
|
}
|
|
|
|
free(sRequest);
|
2015-01-20 16:31:36 +00:00
|
|
|
|
2015-01-28 17:13:11 +00:00
|
|
|
StringToSign = "AWS4-HMAC-SHA256\n";
|
|
|
|
StringToSign += date8601 + "\n";
|
|
|
|
StringToSign += strdate + "/" + endpoint + "/s3/aws4_request\n";
|
2015-01-20 16:31:36 +00:00
|
|
|
StringToSign += hexsRequest;
|
|
|
|
|
2015-01-28 17:13:11 +00:00
|
|
|
const unsigned char* cscope = reinterpret_cast<const unsigned char*>(StringToSign.c_str());
|
|
|
|
unsigned int cscope_len = StringToSign.size();
|
|
|
|
unsigned char* md = NULL;
|
|
|
|
unsigned int md_len = 0;
|
|
|
|
char hexSig[64 + 1];
|
2015-01-20 16:31:36 +00:00
|
|
|
|
2015-01-28 17:13:11 +00:00
|
|
|
s3fs_HMAC256(kSigning, kSigning_len, cscope, cscope_len, &md, &md_len);
|
|
|
|
for(cnt = 0; cnt < md_len; cnt++){
|
|
|
|
sprintf(&hexSig[cnt * 2], "%02x", md[cnt]);
|
2015-01-24 16:36:30 +00:00
|
|
|
}
|
|
|
|
free(kSigning);
|
|
|
|
free(md);
|
2015-01-20 16:31:36 +00:00
|
|
|
|
|
|
|
Signature = hexSig;
|
|
|
|
|
|
|
|
return Signature;
|
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
// XML in BodyData has UploadId, Parse XML body for UploadId
|
|
|
|
bool S3fsCurl::GetUploadId(string& upload_id)
|
2013-03-30 13:37:14 +00:00
|
|
|
{
|
2013-07-05 02:28:31 +00:00
|
|
|
bool result = false;
|
2011-03-01 19:35:55 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
if(!bodydata){
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
upload_id.clear();
|
|
|
|
|
2013-09-14 21:50:39 +00:00
|
|
|
xmlDocPtr doc;
|
|
|
|
if(NULL == (doc = xmlReadMemory(bodydata->str(), bodydata->size(), "", NULL, 0))){
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
if(NULL == doc->children){
|
|
|
|
S3FS_XMLFREEDOC(doc);
|
2013-07-05 02:28:31 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
for(xmlNodePtr cur_node = doc->children->children; NULL != cur_node; cur_node = cur_node->next){
|
|
|
|
// For DEBUG
|
|
|
|
// string cur_node_name(reinterpret_cast<const char *>(cur_node->name));
|
|
|
|
// printf("cur_node_name: %s\n", cur_node_name.c_str());
|
|
|
|
|
|
|
|
if(XML_ELEMENT_NODE == cur_node->type){
|
|
|
|
string elementName = reinterpret_cast<const char*>(cur_node->name);
|
|
|
|
// For DEBUG
|
|
|
|
// printf("elementName: %s\n", elementName.c_str());
|
|
|
|
|
|
|
|
if(cur_node->children){
|
|
|
|
if(XML_TEXT_NODE == cur_node->children->type){
|
|
|
|
if(elementName == "UploadId") {
|
|
|
|
upload_id = reinterpret_cast<const char *>(cur_node->children->content);
|
|
|
|
result = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2011-03-01 19:35:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-09-14 21:50:39 +00:00
|
|
|
S3FS_XMLFREEDOC(doc);
|
2011-03-01 19:35:55 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
return result;
|
|
|
|
}
|
2011-03-01 19:35:55 +00:00
|
|
|
|
2017-10-26 14:21:48 +00:00
|
|
|
void S3fsCurl::insertV4Headers()
|
2015-04-09 22:04:39 +00:00
|
|
|
{
|
2017-10-26 14:21:48 +00:00
|
|
|
string server_path = type == REQTYPE_LISTBUCKET ? "/" : path;
|
|
|
|
string payload_hash;
|
|
|
|
switch (type) {
|
|
|
|
case REQTYPE_PUT:
|
|
|
|
payload_hash = s3fs_sha256sum(b_infile == NULL ? -1 : fileno(b_infile), 0, -1);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case REQTYPE_COMPLETEMULTIPOST:
|
|
|
|
{
|
|
|
|
unsigned int cRequest_len = strlen(reinterpret_cast<const char *>(b_postdata));
|
|
|
|
unsigned char* sRequest = NULL;
|
|
|
|
unsigned int sRequest_len = 0;
|
|
|
|
char hexsRequest[64 + 1];
|
|
|
|
unsigned int cnt;
|
|
|
|
s3fs_sha256(b_postdata, cRequest_len, &sRequest, &sRequest_len);
|
|
|
|
for(cnt = 0; cnt < sRequest_len; cnt++){
|
|
|
|
sprintf(&hexsRequest[cnt * 2], "%02x", sRequest[cnt]);
|
|
|
|
}
|
|
|
|
free(sRequest);
|
|
|
|
payload_hash.assign(hexsRequest, &hexsRequest[sRequest_len * 2]);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case REQTYPE_UPLOADMULTIPOST:
|
|
|
|
payload_hash = s3fs_sha256sum(partdata.fd, partdata.startpos, partdata.size);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
S3FS_PRN_INFO3("computing signature [%s] [%s] [%s] [%s]", op.c_str(), server_path.c_str(), query_string.c_str(), payload_hash.c_str());
|
2015-04-09 22:04:39 +00:00
|
|
|
string strdate;
|
|
|
|
string date8601;
|
|
|
|
get_date_sigv3(strdate, date8601);
|
|
|
|
|
|
|
|
string contentSHA256 = payload_hash.empty() ? empty_payload_hash : payload_hash;
|
2017-10-26 14:21:48 +00:00
|
|
|
const std::string realpath = pathrequeststyle ? "/" + bucket + server_path : server_path;
|
2015-04-09 22:04:39 +00:00
|
|
|
|
|
|
|
//string canonical_headers, signed_headers;
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "host", get_bucket_host().c_str());
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-content-sha256", contentSHA256.c_str());
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-date", date8601.c_str());
|
2017-11-08 15:21:49 +00:00
|
|
|
|
2015-04-09 22:04:39 +00:00
|
|
|
if(!S3fsCurl::IsPublicBucket()){
|
2017-10-26 14:21:48 +00:00
|
|
|
string Signature = CalcSignature(op, realpath, query_string + (type == REQTYPE_PREMULTIPOST ? "=" : ""), strdate, contentSHA256, date8601);
|
2015-04-09 22:04:39 +00:00
|
|
|
string auth = "AWS4-HMAC-SHA256 Credential=" + AWSAccessKeyId + "/" + strdate + "/" + endpoint +
|
|
|
|
"/s3/aws4_request, SignedHeaders=" + get_sorted_header_keys(requestHeaders) + ", Signature=" + Signature;
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "Authorization", auth.c_str());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-26 14:21:48 +00:00
|
|
|
void S3fsCurl::insertV2Headers()
|
|
|
|
{
|
|
|
|
string resource;
|
|
|
|
string turl;
|
|
|
|
string server_path = type == REQTYPE_LISTBUCKET ? "/" : path;
|
|
|
|
MakeUrlResource(server_path.c_str(), resource, turl);
|
|
|
|
if(!query_string.empty() && type != REQTYPE_LISTBUCKET){
|
|
|
|
resource += "?" + query_string;
|
|
|
|
}
|
|
|
|
|
|
|
|
string date = get_date_rfc850();
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "Date", date.c_str());
|
|
|
|
if(op != "PUT" && op != "POST"){
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "Content-Type", NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
if(!S3fsCurl::IsPublicBucket()){
|
|
|
|
string Signature = CalcSignatureV2(op, get_header_value(requestHeaders, "Content-MD5"), get_header_value(requestHeaders, "Content-Type"), date, resource);
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "Authorization", string("AWS " + AWSAccessKeyId + ":" + Signature).c_str());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-23 08:46:24 +00:00
|
|
|
void S3fsCurl::insertIBMIAMHeaders()
|
|
|
|
{
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "Authorization", ("Bearer " + S3fsCurl::AWSAccessToken).c_str());
|
|
|
|
|
|
|
|
if(op == "PUT" && path == mount_prefix + "/"){
|
|
|
|
// ibm-service-instance-id header is required for bucket creation requests
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "ibm-service-instance-id", S3fsCurl::AWSAccessKeyId.c_str());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-26 14:21:48 +00:00
|
|
|
void S3fsCurl::insertAuthHeaders()
|
|
|
|
{
|
2017-11-23 08:46:24 +00:00
|
|
|
if(!S3fsCurl::CheckIAMCredentialUpdate()){
|
|
|
|
S3FS_PRN_ERR("An error occurred in checking IAM credential.");
|
|
|
|
return; // do not insert auth headers on error
|
|
|
|
}
|
|
|
|
|
|
|
|
if(S3fsCurl::is_ibm_iam_auth){
|
|
|
|
insertIBMIAMHeaders();
|
|
|
|
}else if(!S3fsCurl::is_sigv4){
|
2017-10-26 14:21:48 +00:00
|
|
|
insertV2Headers();
|
|
|
|
}else{
|
|
|
|
insertV4Headers();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
int S3fsCurl::DeleteRequest(const char* tpath)
|
|
|
|
{
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_INFO3("[tpath=%s]", SAFESTRPTR(tpath));
|
2013-07-05 02:28:31 +00:00
|
|
|
|
|
|
|
if(!tpath){
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if(!CreateCurlHandle(true)){
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
string resource;
|
|
|
|
string turl;
|
|
|
|
MakeUrlResource(get_realpath(tpath).c_str(), resource, turl);
|
|
|
|
|
|
|
|
url = prepare_url(turl.c_str());
|
2015-03-21 04:31:59 +00:00
|
|
|
path = get_realpath(tpath);
|
2013-07-05 02:28:31 +00:00
|
|
|
requestHeaders = NULL;
|
|
|
|
responseHeaders.clear();
|
|
|
|
|
2017-10-26 14:21:48 +00:00
|
|
|
op = "DELETE";
|
|
|
|
type = REQTYPE_DELETE;
|
|
|
|
insertAuthHeaders();
|
2011-03-01 19:35:55 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str());
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_CUSTOMREQUEST, "DELETE");
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HTTPHEADER, requestHeaders);
|
2016-04-17 07:44:03 +00:00
|
|
|
S3fsCurl::AddUserAgent(hCurl); // put User-Agent
|
2013-07-05 02:28:31 +00:00
|
|
|
|
|
|
|
return RequestPerform();
|
2011-03-01 19:35:55 +00:00
|
|
|
}
|
2013-03-30 13:37:14 +00:00
|
|
|
|
2013-10-06 13:45:32 +00:00
|
|
|
//
|
|
|
|
// Get AccessKeyId/SecretAccessKey/AccessToken/Expiration by IAM role,
|
2014-04-05 05:11:55 +00:00
|
|
|
// and Set these value to class variable.
|
2013-10-06 13:45:32 +00:00
|
|
|
//
|
|
|
|
int S3fsCurl::GetIAMCredentials(void)
|
|
|
|
{
|
2017-11-23 08:46:24 +00:00
|
|
|
if (!S3fsCurl::is_ecs && !S3fsCurl::is_ibm_iam_auth) {
|
2017-11-06 21:45:58 +00:00
|
|
|
S3FS_PRN_INFO3("[IAM role=%s]", S3fsCurl::IAM_role.c_str());
|
2013-10-06 13:45:32 +00:00
|
|
|
|
2017-11-06 21:45:58 +00:00
|
|
|
if(0 == S3fsCurl::IAM_role.size()) {
|
|
|
|
S3FS_PRN_ERR("IAM role name is empty.");
|
|
|
|
return -EIO;
|
|
|
|
}
|
2013-10-06 13:45:32 +00:00
|
|
|
}
|
2017-11-06 21:45:58 +00:00
|
|
|
|
2013-10-06 13:45:32 +00:00
|
|
|
// at first set type for handle
|
|
|
|
type = REQTYPE_IAMCRED;
|
|
|
|
|
|
|
|
if(!CreateCurlHandle(true)){
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
// url
|
2017-11-05 19:24:02 +00:00
|
|
|
if (is_ecs) {
|
2017-11-23 08:46:24 +00:00
|
|
|
url = string(S3fsCurl::IAM_cred_url) + std::getenv(ECS_IAM_ENV_VAR.c_str());
|
2017-11-05 19:24:02 +00:00
|
|
|
}
|
|
|
|
else {
|
2017-11-23 08:46:24 +00:00
|
|
|
url = string(S3fsCurl::IAM_cred_url) + S3fsCurl::IAM_role;
|
2017-11-05 19:24:02 +00:00
|
|
|
}
|
2017-11-06 21:45:58 +00:00
|
|
|
|
2013-10-06 13:45:32 +00:00
|
|
|
requestHeaders = NULL;
|
|
|
|
responseHeaders.clear();
|
|
|
|
bodydata = new BodyData();
|
2017-11-23 08:46:24 +00:00
|
|
|
string postContent;
|
|
|
|
|
|
|
|
if(S3fsCurl::is_ibm_iam_auth){
|
|
|
|
url = string(S3fsCurl::IAM_cred_url);
|
|
|
|
|
|
|
|
// make contents
|
|
|
|
postContent += "grant_type=urn:ibm:params:oauth:grant-type:apikey";
|
|
|
|
postContent += "&response_type=cloud_iam";
|
|
|
|
postContent += "&apikey=" + S3fsCurl::AWSSecretAccessKey;
|
|
|
|
|
|
|
|
// set postdata
|
|
|
|
postdata = reinterpret_cast<const unsigned char*>(postContent.c_str());
|
|
|
|
b_postdata = postdata;
|
|
|
|
postdata_remaining = postContent.size(); // without null
|
|
|
|
b_postdata_remaining = postdata_remaining;
|
|
|
|
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "Authorization", "Basic Yng6Yng=");
|
|
|
|
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_POST, true); // POST
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_POSTFIELDSIZE, static_cast<curl_off_t>(postdata_remaining));
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_READDATA, (void*)this);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_READFUNCTION, S3fsCurl::ReadCallback);
|
|
|
|
}
|
2013-10-06 13:45:32 +00:00
|
|
|
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str());
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)bodydata);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback);
|
2017-11-23 08:46:24 +00:00
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HTTPHEADER, requestHeaders);
|
2016-04-17 07:44:03 +00:00
|
|
|
S3fsCurl::AddUserAgent(hCurl); // put User-Agent
|
2013-10-06 13:45:32 +00:00
|
|
|
|
|
|
|
int result = RequestPerform();
|
|
|
|
|
2014-04-05 05:11:55 +00:00
|
|
|
// analyzing response
|
2013-10-06 13:45:32 +00:00
|
|
|
if(0 == result && !S3fsCurl::SetIAMCredentials(bodydata->str())){
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("Something error occurred, could not get IAM credential.");
|
2017-11-23 08:46:24 +00:00
|
|
|
result = -EIO;
|
2013-10-06 13:45:32 +00:00
|
|
|
}
|
|
|
|
delete bodydata;
|
|
|
|
bodydata = NULL;
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2016-05-06 04:37:32 +00:00
|
|
|
//
|
|
|
|
// Get IAM role name automatically.
|
|
|
|
//
|
|
|
|
bool S3fsCurl::LoadIAMRoleFromMetaData(void)
|
|
|
|
{
|
|
|
|
S3FS_PRN_INFO3("Get IAM Role name");
|
|
|
|
|
|
|
|
// at first set type for handle
|
|
|
|
type = REQTYPE_IAMROLE;
|
|
|
|
|
|
|
|
if(!CreateCurlHandle(true)){
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// url
|
2017-11-23 08:46:24 +00:00
|
|
|
url = string(S3fsCurl::IAM_cred_url);
|
2016-05-06 04:37:32 +00:00
|
|
|
requestHeaders = NULL;
|
|
|
|
responseHeaders.clear();
|
|
|
|
bodydata = new BodyData();
|
|
|
|
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str());
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)bodydata);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback);
|
|
|
|
S3fsCurl::AddUserAgent(hCurl); // put User-Agent
|
|
|
|
|
|
|
|
int result = RequestPerform();
|
|
|
|
|
2014-04-05 05:11:55 +00:00
|
|
|
// analyzing response
|
2016-05-06 04:37:32 +00:00
|
|
|
if(0 == result && !S3fsCurl::SetIAMRoleFromMetaData(bodydata->str())){
|
|
|
|
S3FS_PRN_ERR("Something error occurred, could not get IAM role name.");
|
|
|
|
result = -EIO;
|
|
|
|
}
|
|
|
|
delete bodydata;
|
|
|
|
bodydata = NULL;
|
|
|
|
|
|
|
|
return (0 == result);
|
|
|
|
}
|
|
|
|
|
2015-10-06 14:46:14 +00:00
|
|
|
bool S3fsCurl::AddSseRequestHead(sse_type_t ssetype, string& ssevalue, bool is_only_c, bool is_copy)
|
2014-07-19 19:02:55 +00:00
|
|
|
{
|
2015-10-06 14:46:14 +00:00
|
|
|
if(SSE_S3 == ssetype){
|
|
|
|
if(!is_only_c){
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-server-side-encryption", "AES256");
|
|
|
|
}
|
|
|
|
}else if(SSE_C == ssetype){
|
|
|
|
string sseckey;
|
|
|
|
if(S3fsCurl::GetSseKey(ssevalue, sseckey)){
|
|
|
|
if(is_copy){
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-copy-source-server-side-encryption-customer-algorithm", "AES256");
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-copy-source-server-side-encryption-customer-key", sseckey.c_str());
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-copy-source-server-side-encryption-customer-key-md5", ssevalue.c_str());
|
|
|
|
}else{
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-server-side-encryption-customer-algorithm", "AES256");
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-server-side-encryption-customer-key", sseckey.c_str());
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-server-side-encryption-customer-key-md5", ssevalue.c_str());
|
|
|
|
}
|
2014-07-19 19:02:55 +00:00
|
|
|
}else{
|
2015-10-06 14:46:14 +00:00
|
|
|
S3FS_PRN_WARN("Failed to insert SSE-C header.");
|
|
|
|
}
|
|
|
|
|
|
|
|
}else if(SSE_KMS == ssetype){
|
|
|
|
if(!is_only_c){
|
|
|
|
if(ssevalue.empty()){
|
|
|
|
ssevalue = S3fsCurl::GetSseKmsId();
|
|
|
|
}
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-server-side-encryption", "aws:kms");
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-server-side-encryption-aws-kms-key-id", ssevalue.c_str());
|
2014-07-19 19:02:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
//
|
|
|
|
// tpath : target path for head request
|
|
|
|
// bpath : saved into base_path
|
|
|
|
// savedpath : saved into saved_path
|
2015-10-06 14:46:14 +00:00
|
|
|
// ssekey_pos : -1 means "not" SSE-C type
|
|
|
|
// 0 - X means SSE-C type and position for SSE-C key(0 is latest key)
|
2013-07-05 02:28:31 +00:00
|
|
|
//
|
2014-07-19 19:02:55 +00:00
|
|
|
bool S3fsCurl::PreHeadRequest(const char* tpath, const char* bpath, const char* savedpath, int ssekey_pos)
|
2013-05-16 02:02:55 +00:00
|
|
|
{
|
2015-10-06 14:46:14 +00:00
|
|
|
S3FS_PRN_INFO3("[tpath=%s][bpath=%s][save=%s][sseckeypos=%d]", SAFESTRPTR(tpath), SAFESTRPTR(bpath), SAFESTRPTR(savedpath), ssekey_pos);
|
2013-05-16 02:02:55 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
if(!tpath){
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if(!CreateCurlHandle(true)){
|
|
|
|
return false;
|
2013-05-16 02:02:55 +00:00
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
string resource;
|
|
|
|
string turl;
|
|
|
|
MakeUrlResource(get_realpath(tpath).c_str(), resource, turl);
|
2013-05-16 02:02:55 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
// libcurl 7.17 does deep copy of url, deep copy "stable" url
|
|
|
|
url = prepare_url(turl.c_str());
|
2015-03-21 04:31:59 +00:00
|
|
|
path = get_realpath(tpath);
|
2013-07-05 02:28:31 +00:00
|
|
|
base_path = SAFESTRPTR(bpath);
|
|
|
|
saved_path = SAFESTRPTR(savedpath);
|
|
|
|
requestHeaders = NULL;
|
|
|
|
responseHeaders.clear();
|
2013-05-16 02:02:55 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
// requestHeaders
|
2015-10-06 14:46:14 +00:00
|
|
|
if(0 <= ssekey_pos){
|
|
|
|
string md5("");
|
|
|
|
if(!S3fsCurl::GetSseKeyMd5(ssekey_pos, md5) || !AddSseRequestHead(SSE_C, md5, true, false)){
|
|
|
|
S3FS_PRN_ERR("Failed to set SSE-C headers for sse-c key pos(%d)(=md5(%s)).", ssekey_pos, md5.c_str());
|
|
|
|
return false;
|
2014-07-19 19:02:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
b_ssekey_pos = ssekey_pos;
|
|
|
|
|
2017-10-26 14:21:48 +00:00
|
|
|
op = "HEAD";
|
|
|
|
type = REQTYPE_HEAD;
|
|
|
|
insertAuthHeaders();
|
2013-05-16 02:02:55 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str());
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_NOBODY, true); // HEAD
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_FILETIME, true); // Last-Modified
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HTTPHEADER, requestHeaders);
|
2013-05-16 02:02:55 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
// responseHeaders
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HEADERDATA, (void*)&responseHeaders);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HEADERFUNCTION, HeaderCallback);
|
2016-04-17 07:44:03 +00:00
|
|
|
S3fsCurl::AddUserAgent(hCurl); // put User-Agent
|
2013-05-16 02:02:55 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
return true;
|
2013-05-16 02:02:55 +00:00
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
int S3fsCurl::HeadRequest(const char* tpath, headers_t& meta)
|
2013-03-30 13:37:14 +00:00
|
|
|
{
|
2014-07-19 19:02:55 +00:00
|
|
|
int result = -1;
|
2013-05-16 02:02:55 +00:00
|
|
|
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_INFO3("[tpath=%s]", SAFESTRPTR(tpath));
|
2013-05-16 02:02:55 +00:00
|
|
|
|
2015-10-06 14:46:14 +00:00
|
|
|
// At first, try to get without SSE-C headers
|
|
|
|
if(!PreHeadRequest(tpath) || 0 != (result = RequestPerform())){
|
|
|
|
// If has SSE-C keys, try to get with all SSE-C keys.
|
|
|
|
for(int pos = 0; static_cast<size_t>(pos) < S3fsCurl::sseckeys.size(); pos++){
|
|
|
|
if(!DestroyCurlHandle()){
|
2018-05-27 10:48:03 +00:00
|
|
|
break;
|
2014-07-19 19:02:55 +00:00
|
|
|
}
|
|
|
|
if(!PreHeadRequest(tpath, NULL, NULL, pos)){
|
2018-05-27 10:48:03 +00:00
|
|
|
break;
|
2014-07-19 19:02:55 +00:00
|
|
|
}
|
|
|
|
if(0 == (result = RequestPerform())){
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2015-10-06 14:46:14 +00:00
|
|
|
if(0 != result){
|
|
|
|
DestroyCurlHandle(); // not check result.
|
2014-07-19 19:02:55 +00:00
|
|
|
return result;
|
|
|
|
}
|
2013-05-16 02:02:55 +00:00
|
|
|
}
|
2014-07-19 19:02:55 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
// file exists in s3
|
|
|
|
// fixme: clean this up.
|
|
|
|
meta.clear();
|
|
|
|
for(headers_t::iterator iter = responseHeaders.begin(); iter != responseHeaders.end(); ++iter){
|
2015-01-28 17:13:11 +00:00
|
|
|
string key = lower(iter->first);
|
|
|
|
string value = iter->second;
|
|
|
|
if(key == "content-type"){
|
|
|
|
meta[iter->first] = value;
|
|
|
|
}else if(key == "content-length"){
|
|
|
|
meta[iter->first] = value;
|
|
|
|
}else if(key == "etag"){
|
|
|
|
meta[iter->first] = value;
|
|
|
|
}else if(key == "last-modified"){
|
|
|
|
meta[iter->first] = value;
|
|
|
|
}else if(key.substr(0, 5) == "x-amz"){
|
|
|
|
meta[key] = value; // key is lower case for "x-amz"
|
2013-07-05 02:28:31 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
2013-05-16 02:02:55 +00:00
|
|
|
}
|
|
|
|
|
2014-08-26 17:11:10 +00:00
|
|
|
int S3fsCurl::PutHeadRequest(const char* tpath, headers_t& meta, bool is_copy)
|
2013-05-16 02:02:55 +00:00
|
|
|
{
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_INFO3("[tpath=%s]", SAFESTRPTR(tpath));
|
2013-05-16 02:02:55 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
if(!tpath){
|
|
|
|
return -1;
|
2013-05-16 02:02:55 +00:00
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
if(!CreateCurlHandle(true)){
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
string resource;
|
|
|
|
string turl;
|
|
|
|
MakeUrlResource(get_realpath(tpath).c_str(), resource, turl);
|
2013-05-16 02:02:55 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
url = prepare_url(turl.c_str());
|
2015-03-21 04:31:59 +00:00
|
|
|
path = get_realpath(tpath);
|
2013-07-05 02:28:31 +00:00
|
|
|
requestHeaders = NULL;
|
|
|
|
responseHeaders.clear();
|
|
|
|
bodydata = new BodyData();
|
|
|
|
|
|
|
|
// Make request headers
|
|
|
|
for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){
|
2015-01-28 17:13:11 +00:00
|
|
|
string key = lower(iter->first);
|
|
|
|
string value = iter->second;
|
|
|
|
if(key == "content-type"){
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, iter->first.c_str(), value.c_str());
|
|
|
|
}else if(key.substr(0, 9) == "x-amz-acl"){
|
2013-07-05 02:28:31 +00:00
|
|
|
// not set value, but after set it.
|
2015-01-28 17:13:11 +00:00
|
|
|
}else if(key.substr(0, 10) == "x-amz-meta"){
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, iter->first.c_str(), value.c_str());
|
|
|
|
}else if(key == "x-amz-copy-source"){
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, iter->first.c_str(), value.c_str());
|
2017-05-07 09:29:08 +00:00
|
|
|
}else if(key == "x-amz-server-side-encryption" && value != "aws:kms"){
|
2015-11-01 13:54:47 +00:00
|
|
|
// Only copy mode.
|
|
|
|
if(is_copy && !AddSseRequestHead(SSE_S3, value, false, true)){
|
|
|
|
S3FS_PRN_WARN("Failed to insert SSE-S3 header.");
|
|
|
|
}
|
2017-05-07 09:29:08 +00:00
|
|
|
}else if(key == "x-amz-server-side-encryption-aws-kms-key-id"){
|
2014-08-26 17:11:10 +00:00
|
|
|
// Only copy mode.
|
2015-11-01 13:54:47 +00:00
|
|
|
if(is_copy && !value.empty() && !AddSseRequestHead(SSE_KMS, value, false, true)){
|
|
|
|
S3FS_PRN_WARN("Failed to insert SSE-KMS header.");
|
|
|
|
}
|
|
|
|
}else if(key == "x-amz-server-side-encryption-customer-key-md5"){
|
|
|
|
// Only copy mode.
|
|
|
|
if(is_copy){
|
|
|
|
if(!AddSseRequestHead(SSE_C, value, true, true) || !AddSseRequestHead(SSE_C, value, true, false)){
|
|
|
|
S3FS_PRN_WARN("Failed to insert SSE-C header.");
|
|
|
|
}
|
2014-07-19 19:02:55 +00:00
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
}
|
|
|
|
}
|
2015-01-20 16:31:36 +00:00
|
|
|
|
2015-09-17 20:10:45 +00:00
|
|
|
// "x-amz-acl", storage class, sse
|
2017-04-22 23:15:02 +00:00
|
|
|
if(!S3fsCurl::default_acl.empty()){
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-acl", S3fsCurl::default_acl.c_str());
|
|
|
|
}
|
2015-09-17 20:10:45 +00:00
|
|
|
if(REDUCED_REDUNDANCY == GetStorageClass()){
|
2015-01-28 17:13:11 +00:00
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-storage-class", "REDUCED_REDUNDANCY");
|
2015-09-17 20:10:45 +00:00
|
|
|
} else if(STANDARD_IA == GetStorageClass()){
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-storage-class", "STANDARD_IA");
|
2013-07-05 02:28:31 +00:00
|
|
|
}
|
2015-10-06 14:46:14 +00:00
|
|
|
// SSE
|
2015-11-01 13:54:47 +00:00
|
|
|
if(!is_copy){
|
|
|
|
string ssevalue("");
|
|
|
|
if(!AddSseRequestHead(S3fsCurl::GetSseType(), ssevalue, false, false)){
|
|
|
|
S3FS_PRN_WARN("Failed to set SSE header, but continue...");
|
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
}
|
Fixed Issue 229 and Changes codes
1) Set metadata "Content-Encoding" automatically(Issue 292)
For this issue, s3fs is added new option "ahbe_conf".
New option means the configuration file path, and this file specifies
additional HTTP header by file(object) extension.
Thus you can specify any HTTP header for each object by extension.
* ahbe_conf file format:
-----------
line = [file suffix] HTTP-header [HTTP-header-values]
file suffix = file(object) suffix, if this field is empty,
it means "*"(all object).
HTTP-header = additional HTTP header name
HTTP-header-values = additional HTTP header value
-----------
* Example:
-----------
.gz Content-Encoding gzip
.Z Content-Encoding compress
X-S3FS-MYHTTPHEAD myvalue
-----------
A sample configuration file is uploaded in "test" directory.
If ahbe_conf parameter is specified, s3fs loads it's configuration
and compares extension(suffix) of object(file) when uploading
(PUT/POST) it. If the extension is same, s3fs adds/sends specified
HTTP header and value.
A case of sample configuration file, if a object(it's extension is
".gz") which already has Content-Encoding HTTP header is renamed
to ".txt" extension, s3fs does not set Content-Encoding. Because
".txt" is not match any line in configuration file.
So, s3fs matches the extension by each PUT/POST action.
* Please take care about "Content-Encoding".
This new option allows setting ANY HTTP header by object extension.
For example, you can specify "Content-Encoding" for ".gz"/etc
extension in configuration. But this means that S3 always returns
"Content-Encoding: gzip" when a client requests with other
"Accept-Encoding:" header. It SHOULD NOT be good.
Please see RFC 2616.
2) Changes about allow_other/uid/gid option for mount point
I reviewed about mount point permission and allow_other/uid/gid
options, and found bugs about these.
s3fs is fixed bugs and changed to the following specifications.
* s3fs only allows uid(gid) options as 0(root), when the effective
user is zero(root).
* A mount point(directory) must have a permission to allow
accessing by effective user/group.
* If allow_other option is specified, the mount point permission
is set 0777(all users allow all access).
In another case, the mount point is set 0700(only allows
effective user).
* When uid/gid option is specified, the mount point owner/group
is set uid/gid option value.
If uid/gid is not set, it is set effective user/group id.
This changes maybe fixes some issue(321, 338).
3) Changes a logic about (Issue 229)
The chmod command returns -EIO when changing the mount point.
It is correct, s3fs can not changed owner/group/mtime for the
mount point, but s3fs sends a request for changing the bucket.
This revision does not send the request, and returns EIO as
soon as possible.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@465 df820570-a93a-0410-bd06-b72b767a4274
2013-08-16 19:24:01 +00:00
|
|
|
if(is_use_ahbe){
|
|
|
|
// set additional header by ahbe conf
|
|
|
|
requestHeaders = AdditionalHeader::get()->AddHeader(requestHeaders, tpath);
|
|
|
|
}
|
2015-01-20 16:31:36 +00:00
|
|
|
|
2017-10-26 14:21:48 +00:00
|
|
|
op = "PUT";
|
|
|
|
type = REQTYPE_PUTHEAD;
|
|
|
|
insertAuthHeaders();
|
2013-03-30 13:37:14 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
// setopt
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str());
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_UPLOAD, true); // HTTP PUT
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)bodydata);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_INFILESIZE, 0); // Content-Length
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HTTPHEADER, requestHeaders);
|
2016-04-17 07:44:03 +00:00
|
|
|
S3fsCurl::AddUserAgent(hCurl); // put User-Agent
|
2013-07-05 02:28:31 +00:00
|
|
|
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_INFO3("copying... [path=%s]", tpath);
|
2013-07-05 02:28:31 +00:00
|
|
|
|
|
|
|
int result = RequestPerform();
|
2017-05-06 02:15:53 +00:00
|
|
|
if(0 == result){
|
|
|
|
// PUT returns 200 status code with something error, thus
|
|
|
|
// we need to check body.
|
|
|
|
//
|
|
|
|
// example error body:
|
|
|
|
// <?xml version="1.0" encoding="UTF-8"?>
|
|
|
|
// <Error>
|
|
|
|
// <Code>AccessDenied</Code>
|
|
|
|
// <Message>Access Denied</Message>
|
|
|
|
// <RequestId>E4CA6F6767D6685C</RequestId>
|
|
|
|
// <HostId>BHzLOATeDuvN8Es1wI8IcERq4kl4dc2A9tOB8Yqr39Ys6fl7N4EJ8sjGiVvu6wLP</HostId>
|
|
|
|
// </Error>
|
|
|
|
//
|
|
|
|
const char* pstrbody = bodydata->str();
|
|
|
|
if(!pstrbody || NULL != strcasestr(pstrbody, "<Error>")){
|
|
|
|
S3FS_PRN_ERR("PutHeadRequest get 200 status response, but it included error body(or NULL). The request failed during copying the object in S3.");
|
|
|
|
S3FS_PRN_DBG("PutHeadRequest Response Body : %s", (pstrbody ? pstrbody : "(null)"));
|
|
|
|
result = -EIO;
|
|
|
|
}
|
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
delete bodydata;
|
|
|
|
bodydata = NULL;
|
|
|
|
|
|
|
|
return result;
|
2013-03-30 13:37:14 +00:00
|
|
|
}
|
|
|
|
|
2014-08-26 17:11:10 +00:00
|
|
|
int S3fsCurl::PutRequest(const char* tpath, headers_t& meta, int fd)
|
2013-03-30 13:37:14 +00:00
|
|
|
{
|
2013-07-05 02:28:31 +00:00
|
|
|
struct stat st;
|
|
|
|
FILE* file = NULL;
|
2013-03-30 13:37:14 +00:00
|
|
|
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_INFO3("[tpath=%s]", SAFESTRPTR(tpath));
|
2013-03-30 13:37:14 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
if(!tpath){
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if(-1 != fd){
|
2013-07-08 01:25:11 +00:00
|
|
|
// duplicate fd
|
2018-02-28 12:06:06 +00:00
|
|
|
int fd2;
|
2013-07-08 01:25:11 +00:00
|
|
|
if(-1 == (fd2 = dup(fd)) || -1 == fstat(fd2, &st) || 0 != lseek(fd2, 0, SEEK_SET) || NULL == (file = fdopen(fd2, "rb"))){
|
2015-11-06 08:49:37 +00:00
|
|
|
S3FS_PRN_ERR("Could not duplicate file descriptor(errno=%d)", errno);
|
2015-08-06 05:05:52 +00:00
|
|
|
if(-1 != fd2){
|
|
|
|
close(fd2);
|
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
return -errno;
|
|
|
|
}
|
2013-08-21 07:43:32 +00:00
|
|
|
b_infile = file;
|
2013-07-05 02:28:31 +00:00
|
|
|
}else{
|
2014-04-05 05:11:55 +00:00
|
|
|
// This case is creating zero byte object.(calling by create_file_object())
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_INFO3("create zero byte file object.");
|
2013-07-05 02:28:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if(!CreateCurlHandle(true)){
|
2013-07-08 01:25:11 +00:00
|
|
|
if(file){
|
|
|
|
fclose(file);
|
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
string resource;
|
|
|
|
string turl;
|
|
|
|
MakeUrlResource(get_realpath(tpath).c_str(), resource, turl);
|
|
|
|
|
|
|
|
url = prepare_url(turl.c_str());
|
2015-03-21 04:31:59 +00:00
|
|
|
path = get_realpath(tpath);
|
2013-07-05 02:28:31 +00:00
|
|
|
requestHeaders = NULL;
|
|
|
|
responseHeaders.clear();
|
|
|
|
bodydata = new BodyData();
|
|
|
|
|
|
|
|
// Make request headers
|
|
|
|
string strMD5;
|
|
|
|
if(-1 != fd && S3fsCurl::is_content_md5){
|
2014-05-06 14:23:05 +00:00
|
|
|
strMD5 = s3fs_get_content_md5(fd);
|
2015-01-28 17:13:11 +00:00
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "Content-MD5", strMD5.c_str());
|
2013-07-05 02:28:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){
|
2015-01-28 17:13:11 +00:00
|
|
|
string key = lower(iter->first);
|
|
|
|
string value = iter->second;
|
|
|
|
if(key == "content-type"){
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, iter->first.c_str(), value.c_str());
|
|
|
|
}else if(key.substr(0, 9) == "x-amz-acl"){
|
2013-07-05 02:28:31 +00:00
|
|
|
// not set value, but after set it.
|
2015-01-28 17:13:11 +00:00
|
|
|
}else if(key.substr(0, 10) == "x-amz-meta"){
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, iter->first.c_str(), value.c_str());
|
2017-05-07 09:29:08 +00:00
|
|
|
}else if(key == "x-amz-server-side-encryption" && value != "aws:kms"){
|
2014-08-26 17:11:10 +00:00
|
|
|
// skip this header, because this header is specified after logic.
|
2017-05-07 09:29:08 +00:00
|
|
|
}else if(key == "x-amz-server-side-encryption-aws-kms-key-id"){
|
2014-08-26 17:11:10 +00:00
|
|
|
// skip this header, because this header is specified after logic.
|
2015-01-28 17:13:11 +00:00
|
|
|
}else if(key == "x-amz-server-side-encryption-customer-key-md5"){
|
2014-08-26 17:11:10 +00:00
|
|
|
// skip this header, because this header is specified after logic.
|
2013-07-05 02:28:31 +00:00
|
|
|
}
|
|
|
|
}
|
2015-09-17 20:10:45 +00:00
|
|
|
// "x-amz-acl", storage class, sse
|
2017-04-22 23:15:02 +00:00
|
|
|
if(!S3fsCurl::default_acl.empty()){
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-acl", S3fsCurl::default_acl.c_str());
|
|
|
|
}
|
2015-09-17 20:10:45 +00:00
|
|
|
if(REDUCED_REDUNDANCY == GetStorageClass()){
|
2015-01-28 17:13:11 +00:00
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-storage-class", "REDUCED_REDUNDANCY");
|
2015-09-17 20:10:45 +00:00
|
|
|
} else if(STANDARD_IA == GetStorageClass()){
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-storage-class", "STANDARD_IA");
|
2013-07-05 02:28:31 +00:00
|
|
|
}
|
2015-10-06 14:46:14 +00:00
|
|
|
// SSE
|
|
|
|
string ssevalue("");
|
|
|
|
if(!AddSseRequestHead(S3fsCurl::GetSseType(), ssevalue, false, false)){
|
|
|
|
S3FS_PRN_WARN("Failed to set SSE header, but continue...");
|
2013-07-05 02:28:31 +00:00
|
|
|
}
|
Fixed Issue 229 and Changes codes
1) Set metadata "Content-Encoding" automatically(Issue 292)
For this issue, s3fs is added new option "ahbe_conf".
New option means the configuration file path, and this file specifies
additional HTTP header by file(object) extension.
Thus you can specify any HTTP header for each object by extension.
* ahbe_conf file format:
-----------
line = [file suffix] HTTP-header [HTTP-header-values]
file suffix = file(object) suffix, if this field is empty,
it means "*"(all object).
HTTP-header = additional HTTP header name
HTTP-header-values = additional HTTP header value
-----------
* Example:
-----------
.gz Content-Encoding gzip
.Z Content-Encoding compress
X-S3FS-MYHTTPHEAD myvalue
-----------
A sample configuration file is uploaded in "test" directory.
If ahbe_conf parameter is specified, s3fs loads it's configuration
and compares extension(suffix) of object(file) when uploading
(PUT/POST) it. If the extension is same, s3fs adds/sends specified
HTTP header and value.
A case of sample configuration file, if a object(it's extension is
".gz") which already has Content-Encoding HTTP header is renamed
to ".txt" extension, s3fs does not set Content-Encoding. Because
".txt" is not match any line in configuration file.
So, s3fs matches the extension by each PUT/POST action.
* Please take care about "Content-Encoding".
This new option allows setting ANY HTTP header by object extension.
For example, you can specify "Content-Encoding" for ".gz"/etc
extension in configuration. But this means that S3 always returns
"Content-Encoding: gzip" when a client requests with other
"Accept-Encoding:" header. It SHOULD NOT be good.
Please see RFC 2616.
2) Changes about allow_other/uid/gid option for mount point
I reviewed about mount point permission and allow_other/uid/gid
options, and found bugs about these.
s3fs is fixed bugs and changed to the following specifications.
* s3fs only allows uid(gid) options as 0(root), when the effective
user is zero(root).
* A mount point(directory) must have a permission to allow
accessing by effective user/group.
* If allow_other option is specified, the mount point permission
is set 0777(all users allow all access).
In another case, the mount point is set 0700(only allows
effective user).
* When uid/gid option is specified, the mount point owner/group
is set uid/gid option value.
If uid/gid is not set, it is set effective user/group id.
This changes maybe fixes some issue(321, 338).
3) Changes a logic about (Issue 229)
The chmod command returns -EIO when changing the mount point.
It is correct, s3fs can not changed owner/group/mtime for the
mount point, but s3fs sends a request for changing the bucket.
This revision does not send the request, and returns EIO as
soon as possible.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@465 df820570-a93a-0410-bd06-b72b767a4274
2013-08-16 19:24:01 +00:00
|
|
|
if(is_use_ahbe){
|
|
|
|
// set additional header by ahbe conf
|
|
|
|
requestHeaders = AdditionalHeader::get()->AddHeader(requestHeaders, tpath);
|
|
|
|
}
|
2015-01-20 16:31:36 +00:00
|
|
|
|
2017-10-26 14:21:48 +00:00
|
|
|
op = "PUT";
|
|
|
|
type = REQTYPE_PUT;
|
|
|
|
insertAuthHeaders();
|
2013-07-05 02:28:31 +00:00
|
|
|
|
|
|
|
// setopt
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str());
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_UPLOAD, true); // HTTP PUT
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)bodydata);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HTTPHEADER, requestHeaders);
|
|
|
|
if(file){
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_INFILESIZE_LARGE, static_cast<curl_off_t>(st.st_size)); // Content-Length
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_INFILE, file);
|
|
|
|
}else{
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_INFILESIZE, 0); // Content-Length: 0
|
|
|
|
}
|
2016-04-17 07:44:03 +00:00
|
|
|
S3fsCurl::AddUserAgent(hCurl); // put User-Agent
|
2013-07-05 02:28:31 +00:00
|
|
|
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_INFO3("uploading... [path=%s][fd=%d][size=%jd]", tpath, fd, (intmax_t)(-1 != fd ? st.st_size : 0));
|
2013-07-05 02:28:31 +00:00
|
|
|
|
|
|
|
int result = RequestPerform();
|
|
|
|
delete bodydata;
|
|
|
|
bodydata = NULL;
|
2013-07-08 01:25:11 +00:00
|
|
|
if(file){
|
|
|
|
fclose(file);
|
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2015-10-06 14:46:14 +00:00
|
|
|
int S3fsCurl::PreGetObjectRequest(const char* tpath, int fd, off_t start, ssize_t size, sse_type_t ssetype, string& ssevalue)
|
2013-07-05 02:28:31 +00:00
|
|
|
{
|
2018-02-25 13:08:41 +00:00
|
|
|
S3FS_PRN_INFO3("[tpath=%s][start=%jd][size=%jd]", SAFESTRPTR(tpath), (intmax_t)start, (intmax_t)size);
|
2013-07-05 02:28:31 +00:00
|
|
|
|
2016-04-22 06:49:37 +00:00
|
|
|
if(!tpath || -1 == fd || 0 > start || 0 > size){
|
2013-07-05 02:28:31 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if(!CreateCurlHandle(true)){
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
string resource;
|
|
|
|
string turl;
|
|
|
|
MakeUrlResource(get_realpath(tpath).c_str(), resource, turl);
|
|
|
|
|
|
|
|
url = prepare_url(turl.c_str());
|
2015-03-21 04:31:59 +00:00
|
|
|
path = get_realpath(tpath);
|
2013-07-05 02:28:31 +00:00
|
|
|
requestHeaders = NULL;
|
|
|
|
responseHeaders.clear();
|
|
|
|
|
2016-04-22 06:49:37 +00:00
|
|
|
if(-1 != start && 0 < size){
|
2015-01-28 17:13:11 +00:00
|
|
|
string range = "bytes=";
|
Changes codes for performance(part 3)
* Summay
This revision includes big change about temporary file and local cache file.
By this big change, s3fs works with good performance when s3fs opens/
closes/syncs/reads object.
I made a big change about the handling about temporary file and local cache
file to do this implementation.
* Detail
1) About temporary file(local file)
s3fs uses a temporary file on local file system when s3fs does download/
upload/open/seek object on S3.
After this revision, s3fs calls ftruncate() function when s3fs makes the
temporary file.
In this way s3fs can set a file size of precisely length without downloading.
(Notice - ftruncate function is for XSI-compliant systems, so that possibly
you have a problem on non-XSI-compliant systems.)
By this change, s3fs can download a part of a object by requesting with
"Range" http header. It seems like downloading by each block unit.
The default block(part) size is 50MB, it is caused the result which is default
parallel requests count(5) by default multipart upload size(10MB).
If you need to change this block size, you can change by new option
"fd_page_size". This option can take from 1MB(1024 * 1024) to any bytes.
So that, you have to take care about that fdcache.cpp(and fdcache.h) were
changed a lot.
2) About local cache
Local cache files which are in directory specified by "use_cache" option do
not have always all of object data.
This cause is that s3fs uses ftruncate function and reads(writes) each block
unit of a temporary file.
s3fs manages each block unit's status which are "downloaded area" or "not".
For this status, s3fs makes new temporary file in cache directory which is
specified by "use_cache" option. This status files is in a directory which is
named "<use_cache sirectory>/.<bucket_name>/".
When s3fs opens this status file, s3fs locks this file for exclusive control by
calling flock function. You need to take care about this, the status files can
not be laid on network drive(like NFS).
This revision changes about file open mode, s3fs always opens a local cache
file and each status file with writable mode.
Last, this revision adds new option "del_cache", this option means that s3fs
deletes all local cache file when s3fs starts and exits.
3) Uploading
When s3fs writes data to file descriptor through FUSE request, old s3fs
revision downloads all of the object. But new revision does not download all,
it downloads only small percial area(some block units) including writing data
area.
And when s3fs closes or flushes the file descriptor, s3fs downloads other area
which is not downloaded from server. After that, s3fs uploads all of data.
Already r456 revision has parallel upload function, then this revision with
r456 and r457 are very big change for performance.
4) Downloading
By changing a temporary file and a local cache file, when s3fs downloads a
object, it downloads only the required range(some block units).
And s3fs downloads units by parallel GET request, it is same as a case of
uploading. (Maximum parallel request count and each download size are
specified same parameters for uploading.)
In the new revision, when s3fs opens file, s3fs returns file descriptor soon.
Because s3fs only opens(makes) the file descriptor with no downloading
data. And when s3fs reads a data, s3fs downloads only some block unit
including specified area.
This result is good for performance.
5) Changes option name
The option "parallel_upload" which added at r456 is changed to new option
name as "parallel_count". This reason is this option value is not only used by
uploading object, but a uploading object also uses this option. (For a while,
you can use old option name "parallel_upload" for compatibility.)
git-svn-id: http://s3fs.googlecode.com/svn/trunk@458 df820570-a93a-0410-bd06-b72b767a4274
2013-07-23 16:01:48 +00:00
|
|
|
range += str(start);
|
|
|
|
range += "-";
|
|
|
|
range += str(start + size - 1);
|
2015-01-28 17:13:11 +00:00
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "Range", range.c_str());
|
Changes codes for performance(part 3)
* Summay
This revision includes big change about temporary file and local cache file.
By this big change, s3fs works with good performance when s3fs opens/
closes/syncs/reads object.
I made a big change about the handling about temporary file and local cache
file to do this implementation.
* Detail
1) About temporary file(local file)
s3fs uses a temporary file on local file system when s3fs does download/
upload/open/seek object on S3.
After this revision, s3fs calls ftruncate() function when s3fs makes the
temporary file.
In this way s3fs can set a file size of precisely length without downloading.
(Notice - ftruncate function is for XSI-compliant systems, so that possibly
you have a problem on non-XSI-compliant systems.)
By this change, s3fs can download a part of a object by requesting with
"Range" http header. It seems like downloading by each block unit.
The default block(part) size is 50MB, it is caused the result which is default
parallel requests count(5) by default multipart upload size(10MB).
If you need to change this block size, you can change by new option
"fd_page_size". This option can take from 1MB(1024 * 1024) to any bytes.
So that, you have to take care about that fdcache.cpp(and fdcache.h) were
changed a lot.
2) About local cache
Local cache files which are in directory specified by "use_cache" option do
not have always all of object data.
This cause is that s3fs uses ftruncate function and reads(writes) each block
unit of a temporary file.
s3fs manages each block unit's status which are "downloaded area" or "not".
For this status, s3fs makes new temporary file in cache directory which is
specified by "use_cache" option. This status files is in a directory which is
named "<use_cache sirectory>/.<bucket_name>/".
When s3fs opens this status file, s3fs locks this file for exclusive control by
calling flock function. You need to take care about this, the status files can
not be laid on network drive(like NFS).
This revision changes about file open mode, s3fs always opens a local cache
file and each status file with writable mode.
Last, this revision adds new option "del_cache", this option means that s3fs
deletes all local cache file when s3fs starts and exits.
3) Uploading
When s3fs writes data to file descriptor through FUSE request, old s3fs
revision downloads all of the object. But new revision does not download all,
it downloads only small percial area(some block units) including writing data
area.
And when s3fs closes or flushes the file descriptor, s3fs downloads other area
which is not downloaded from server. After that, s3fs uploads all of data.
Already r456 revision has parallel upload function, then this revision with
r456 and r457 are very big change for performance.
4) Downloading
By changing a temporary file and a local cache file, when s3fs downloads a
object, it downloads only the required range(some block units).
And s3fs downloads units by parallel GET request, it is same as a case of
uploading. (Maximum parallel request count and each download size are
specified same parameters for uploading.)
In the new revision, when s3fs opens file, s3fs returns file descriptor soon.
Because s3fs only opens(makes) the file descriptor with no downloading
data. And when s3fs reads a data, s3fs downloads only some block unit
including specified area.
This result is good for performance.
5) Changes option name
The option "parallel_upload" which added at r456 is changed to new option
name as "parallel_count". This reason is this option value is not only used by
uploading object, but a uploading object also uses this option. (For a while,
you can use old option name "parallel_upload" for compatibility.)
git-svn-id: http://s3fs.googlecode.com/svn/trunk@458 df820570-a93a-0410-bd06-b72b767a4274
2013-07-23 16:01:48 +00:00
|
|
|
}
|
2015-10-06 14:46:14 +00:00
|
|
|
// SSE
|
|
|
|
if(!AddSseRequestHead(ssetype, ssevalue, true, false)){
|
|
|
|
S3FS_PRN_WARN("Failed to set SSE header, but continue...");
|
2014-07-19 19:02:55 +00:00
|
|
|
}
|
2015-01-28 17:13:11 +00:00
|
|
|
|
2017-10-26 14:21:48 +00:00
|
|
|
op = "GET";
|
|
|
|
type = REQTYPE_GET;
|
|
|
|
insertAuthHeaders();
|
2013-07-05 02:28:31 +00:00
|
|
|
|
|
|
|
// setopt
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str());
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HTTPHEADER, requestHeaders);
|
Changes codes for performance(part 3)
* Summay
This revision includes big change about temporary file and local cache file.
By this big change, s3fs works with good performance when s3fs opens/
closes/syncs/reads object.
I made a big change about the handling about temporary file and local cache
file to do this implementation.
* Detail
1) About temporary file(local file)
s3fs uses a temporary file on local file system when s3fs does download/
upload/open/seek object on S3.
After this revision, s3fs calls ftruncate() function when s3fs makes the
temporary file.
In this way s3fs can set a file size of precisely length without downloading.
(Notice - ftruncate function is for XSI-compliant systems, so that possibly
you have a problem on non-XSI-compliant systems.)
By this change, s3fs can download a part of a object by requesting with
"Range" http header. It seems like downloading by each block unit.
The default block(part) size is 50MB, it is caused the result which is default
parallel requests count(5) by default multipart upload size(10MB).
If you need to change this block size, you can change by new option
"fd_page_size". This option can take from 1MB(1024 * 1024) to any bytes.
So that, you have to take care about that fdcache.cpp(and fdcache.h) were
changed a lot.
2) About local cache
Local cache files which are in directory specified by "use_cache" option do
not have always all of object data.
This cause is that s3fs uses ftruncate function and reads(writes) each block
unit of a temporary file.
s3fs manages each block unit's status which are "downloaded area" or "not".
For this status, s3fs makes new temporary file in cache directory which is
specified by "use_cache" option. This status files is in a directory which is
named "<use_cache sirectory>/.<bucket_name>/".
When s3fs opens this status file, s3fs locks this file for exclusive control by
calling flock function. You need to take care about this, the status files can
not be laid on network drive(like NFS).
This revision changes about file open mode, s3fs always opens a local cache
file and each status file with writable mode.
Last, this revision adds new option "del_cache", this option means that s3fs
deletes all local cache file when s3fs starts and exits.
3) Uploading
When s3fs writes data to file descriptor through FUSE request, old s3fs
revision downloads all of the object. But new revision does not download all,
it downloads only small percial area(some block units) including writing data
area.
And when s3fs closes or flushes the file descriptor, s3fs downloads other area
which is not downloaded from server. After that, s3fs uploads all of data.
Already r456 revision has parallel upload function, then this revision with
r456 and r457 are very big change for performance.
4) Downloading
By changing a temporary file and a local cache file, when s3fs downloads a
object, it downloads only the required range(some block units).
And s3fs downloads units by parallel GET request, it is same as a case of
uploading. (Maximum parallel request count and each download size are
specified same parameters for uploading.)
In the new revision, when s3fs opens file, s3fs returns file descriptor soon.
Because s3fs only opens(makes) the file descriptor with no downloading
data. And when s3fs reads a data, s3fs downloads only some block unit
including specified area.
This result is good for performance.
5) Changes option name
The option "parallel_upload" which added at r456 is changed to new option
name as "parallel_count". This reason is this option value is not only used by
uploading object, but a uploading object also uses this option. (For a while,
you can use old option name "parallel_upload" for compatibility.)
git-svn-id: http://s3fs.googlecode.com/svn/trunk@458 df820570-a93a-0410-bd06-b72b767a4274
2013-07-23 16:01:48 +00:00
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, S3fsCurl::DownloadWriteCallback);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)this);
|
2016-04-17 07:44:03 +00:00
|
|
|
S3fsCurl::AddUserAgent(hCurl); // put User-Agent
|
2013-07-05 02:28:31 +00:00
|
|
|
|
Changes codes for performance(part 3)
* Summay
This revision includes big change about temporary file and local cache file.
By this big change, s3fs works with good performance when s3fs opens/
closes/syncs/reads object.
I made a big change about the handling about temporary file and local cache
file to do this implementation.
* Detail
1) About temporary file(local file)
s3fs uses a temporary file on local file system when s3fs does download/
upload/open/seek object on S3.
After this revision, s3fs calls ftruncate() function when s3fs makes the
temporary file.
In this way s3fs can set a file size of precisely length without downloading.
(Notice - ftruncate function is for XSI-compliant systems, so that possibly
you have a problem on non-XSI-compliant systems.)
By this change, s3fs can download a part of a object by requesting with
"Range" http header. It seems like downloading by each block unit.
The default block(part) size is 50MB, it is caused the result which is default
parallel requests count(5) by default multipart upload size(10MB).
If you need to change this block size, you can change by new option
"fd_page_size". This option can take from 1MB(1024 * 1024) to any bytes.
So that, you have to take care about that fdcache.cpp(and fdcache.h) were
changed a lot.
2) About local cache
Local cache files which are in directory specified by "use_cache" option do
not have always all of object data.
This cause is that s3fs uses ftruncate function and reads(writes) each block
unit of a temporary file.
s3fs manages each block unit's status which are "downloaded area" or "not".
For this status, s3fs makes new temporary file in cache directory which is
specified by "use_cache" option. This status files is in a directory which is
named "<use_cache sirectory>/.<bucket_name>/".
When s3fs opens this status file, s3fs locks this file for exclusive control by
calling flock function. You need to take care about this, the status files can
not be laid on network drive(like NFS).
This revision changes about file open mode, s3fs always opens a local cache
file and each status file with writable mode.
Last, this revision adds new option "del_cache", this option means that s3fs
deletes all local cache file when s3fs starts and exits.
3) Uploading
When s3fs writes data to file descriptor through FUSE request, old s3fs
revision downloads all of the object. But new revision does not download all,
it downloads only small percial area(some block units) including writing data
area.
And when s3fs closes or flushes the file descriptor, s3fs downloads other area
which is not downloaded from server. After that, s3fs uploads all of data.
Already r456 revision has parallel upload function, then this revision with
r456 and r457 are very big change for performance.
4) Downloading
By changing a temporary file and a local cache file, when s3fs downloads a
object, it downloads only the required range(some block units).
And s3fs downloads units by parallel GET request, it is same as a case of
uploading. (Maximum parallel request count and each download size are
specified same parameters for uploading.)
In the new revision, when s3fs opens file, s3fs returns file descriptor soon.
Because s3fs only opens(makes) the file descriptor with no downloading
data. And when s3fs reads a data, s3fs downloads only some block unit
including specified area.
This result is good for performance.
5) Changes option name
The option "parallel_upload" which added at r456 is changed to new option
name as "parallel_count". This reason is this option value is not only used by
uploading object, but a uploading object also uses this option. (For a while,
you can use old option name "parallel_upload" for compatibility.)
git-svn-id: http://s3fs.googlecode.com/svn/trunk@458 df820570-a93a-0410-bd06-b72b767a4274
2013-07-23 16:01:48 +00:00
|
|
|
// set info for callback func.
|
|
|
|
// (use only fd, startpos and size, other member is not used.)
|
|
|
|
partdata.clear();
|
2013-08-21 07:43:32 +00:00
|
|
|
partdata.fd = fd;
|
|
|
|
partdata.startpos = start;
|
|
|
|
partdata.size = size;
|
|
|
|
b_partdata_startpos = start;
|
|
|
|
b_partdata_size = size;
|
2015-10-06 14:46:14 +00:00
|
|
|
b_ssetype = ssetype;
|
|
|
|
b_ssevalue = ssevalue;
|
|
|
|
b_ssekey_pos = -1; // not use this value for get object.
|
2013-08-21 07:43:32 +00:00
|
|
|
|
Changes codes for performance(part 3)
* Summay
This revision includes big change about temporary file and local cache file.
By this big change, s3fs works with good performance when s3fs opens/
closes/syncs/reads object.
I made a big change about the handling about temporary file and local cache
file to do this implementation.
* Detail
1) About temporary file(local file)
s3fs uses a temporary file on local file system when s3fs does download/
upload/open/seek object on S3.
After this revision, s3fs calls ftruncate() function when s3fs makes the
temporary file.
In this way s3fs can set a file size of precisely length without downloading.
(Notice - ftruncate function is for XSI-compliant systems, so that possibly
you have a problem on non-XSI-compliant systems.)
By this change, s3fs can download a part of a object by requesting with
"Range" http header. It seems like downloading by each block unit.
The default block(part) size is 50MB, it is caused the result which is default
parallel requests count(5) by default multipart upload size(10MB).
If you need to change this block size, you can change by new option
"fd_page_size". This option can take from 1MB(1024 * 1024) to any bytes.
So that, you have to take care about that fdcache.cpp(and fdcache.h) were
changed a lot.
2) About local cache
Local cache files which are in directory specified by "use_cache" option do
not have always all of object data.
This cause is that s3fs uses ftruncate function and reads(writes) each block
unit of a temporary file.
s3fs manages each block unit's status which are "downloaded area" or "not".
For this status, s3fs makes new temporary file in cache directory which is
specified by "use_cache" option. This status files is in a directory which is
named "<use_cache sirectory>/.<bucket_name>/".
When s3fs opens this status file, s3fs locks this file for exclusive control by
calling flock function. You need to take care about this, the status files can
not be laid on network drive(like NFS).
This revision changes about file open mode, s3fs always opens a local cache
file and each status file with writable mode.
Last, this revision adds new option "del_cache", this option means that s3fs
deletes all local cache file when s3fs starts and exits.
3) Uploading
When s3fs writes data to file descriptor through FUSE request, old s3fs
revision downloads all of the object. But new revision does not download all,
it downloads only small percial area(some block units) including writing data
area.
And when s3fs closes or flushes the file descriptor, s3fs downloads other area
which is not downloaded from server. After that, s3fs uploads all of data.
Already r456 revision has parallel upload function, then this revision with
r456 and r457 are very big change for performance.
4) Downloading
By changing a temporary file and a local cache file, when s3fs downloads a
object, it downloads only the required range(some block units).
And s3fs downloads units by parallel GET request, it is same as a case of
uploading. (Maximum parallel request count and each download size are
specified same parameters for uploading.)
In the new revision, when s3fs opens file, s3fs returns file descriptor soon.
Because s3fs only opens(makes) the file descriptor with no downloading
data. And when s3fs reads a data, s3fs downloads only some block unit
including specified area.
This result is good for performance.
5) Changes option name
The option "parallel_upload" which added at r456 is changed to new option
name as "parallel_count". This reason is this option value is not only used by
uploading object, but a uploading object also uses this option. (For a while,
you can use old option name "parallel_upload" for compatibility.)
git-svn-id: http://s3fs.googlecode.com/svn/trunk@458 df820570-a93a-0410-bd06-b72b767a4274
2013-07-23 16:01:48 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
|
Changes codes for performance(part 3)
* Summay
This revision includes big change about temporary file and local cache file.
By this big change, s3fs works with good performance when s3fs opens/
closes/syncs/reads object.
I made a big change about the handling about temporary file and local cache
file to do this implementation.
* Detail
1) About temporary file(local file)
s3fs uses a temporary file on local file system when s3fs does download/
upload/open/seek object on S3.
After this revision, s3fs calls ftruncate() function when s3fs makes the
temporary file.
In this way s3fs can set a file size of precisely length without downloading.
(Notice - ftruncate function is for XSI-compliant systems, so that possibly
you have a problem on non-XSI-compliant systems.)
By this change, s3fs can download a part of a object by requesting with
"Range" http header. It seems like downloading by each block unit.
The default block(part) size is 50MB, it is caused the result which is default
parallel requests count(5) by default multipart upload size(10MB).
If you need to change this block size, you can change by new option
"fd_page_size". This option can take from 1MB(1024 * 1024) to any bytes.
So that, you have to take care about that fdcache.cpp(and fdcache.h) were
changed a lot.
2) About local cache
Local cache files which are in directory specified by "use_cache" option do
not have always all of object data.
This cause is that s3fs uses ftruncate function and reads(writes) each block
unit of a temporary file.
s3fs manages each block unit's status which are "downloaded area" or "not".
For this status, s3fs makes new temporary file in cache directory which is
specified by "use_cache" option. This status files is in a directory which is
named "<use_cache sirectory>/.<bucket_name>/".
When s3fs opens this status file, s3fs locks this file for exclusive control by
calling flock function. You need to take care about this, the status files can
not be laid on network drive(like NFS).
This revision changes about file open mode, s3fs always opens a local cache
file and each status file with writable mode.
Last, this revision adds new option "del_cache", this option means that s3fs
deletes all local cache file when s3fs starts and exits.
3) Uploading
When s3fs writes data to file descriptor through FUSE request, old s3fs
revision downloads all of the object. But new revision does not download all,
it downloads only small percial area(some block units) including writing data
area.
And when s3fs closes or flushes the file descriptor, s3fs downloads other area
which is not downloaded from server. After that, s3fs uploads all of data.
Already r456 revision has parallel upload function, then this revision with
r456 and r457 are very big change for performance.
4) Downloading
By changing a temporary file and a local cache file, when s3fs downloads a
object, it downloads only the required range(some block units).
And s3fs downloads units by parallel GET request, it is same as a case of
uploading. (Maximum parallel request count and each download size are
specified same parameters for uploading.)
In the new revision, when s3fs opens file, s3fs returns file descriptor soon.
Because s3fs only opens(makes) the file descriptor with no downloading
data. And when s3fs reads a data, s3fs downloads only some block unit
including specified area.
This result is good for performance.
5) Changes option name
The option "parallel_upload" which added at r456 is changed to new option
name as "parallel_count". This reason is this option value is not only used by
uploading object, but a uploading object also uses this option. (For a while,
you can use old option name "parallel_upload" for compatibility.)
git-svn-id: http://s3fs.googlecode.com/svn/trunk@458 df820570-a93a-0410-bd06-b72b767a4274
2013-07-23 16:01:48 +00:00
|
|
|
int S3fsCurl::GetObjectRequest(const char* tpath, int fd, off_t start, ssize_t size)
|
|
|
|
{
|
|
|
|
int result;
|
|
|
|
|
2018-02-25 13:08:41 +00:00
|
|
|
S3FS_PRN_INFO3("[tpath=%s][start=%jd][size=%jd]", SAFESTRPTR(tpath), (intmax_t)start, (intmax_t)size);
|
Changes codes for performance(part 3)
* Summay
This revision includes big change about temporary file and local cache file.
By this big change, s3fs works with good performance when s3fs opens/
closes/syncs/reads object.
I made a big change about the handling about temporary file and local cache
file to do this implementation.
* Detail
1) About temporary file(local file)
s3fs uses a temporary file on local file system when s3fs does download/
upload/open/seek object on S3.
After this revision, s3fs calls ftruncate() function when s3fs makes the
temporary file.
In this way s3fs can set a file size of precisely length without downloading.
(Notice - ftruncate function is for XSI-compliant systems, so that possibly
you have a problem on non-XSI-compliant systems.)
By this change, s3fs can download a part of a object by requesting with
"Range" http header. It seems like downloading by each block unit.
The default block(part) size is 50MB, it is caused the result which is default
parallel requests count(5) by default multipart upload size(10MB).
If you need to change this block size, you can change by new option
"fd_page_size". This option can take from 1MB(1024 * 1024) to any bytes.
So that, you have to take care about that fdcache.cpp(and fdcache.h) were
changed a lot.
2) About local cache
Local cache files which are in directory specified by "use_cache" option do
not have always all of object data.
This cause is that s3fs uses ftruncate function and reads(writes) each block
unit of a temporary file.
s3fs manages each block unit's status which are "downloaded area" or "not".
For this status, s3fs makes new temporary file in cache directory which is
specified by "use_cache" option. This status files is in a directory which is
named "<use_cache sirectory>/.<bucket_name>/".
When s3fs opens this status file, s3fs locks this file for exclusive control by
calling flock function. You need to take care about this, the status files can
not be laid on network drive(like NFS).
This revision changes about file open mode, s3fs always opens a local cache
file and each status file with writable mode.
Last, this revision adds new option "del_cache", this option means that s3fs
deletes all local cache file when s3fs starts and exits.
3) Uploading
When s3fs writes data to file descriptor through FUSE request, old s3fs
revision downloads all of the object. But new revision does not download all,
it downloads only small percial area(some block units) including writing data
area.
And when s3fs closes or flushes the file descriptor, s3fs downloads other area
which is not downloaded from server. After that, s3fs uploads all of data.
Already r456 revision has parallel upload function, then this revision with
r456 and r457 are very big change for performance.
4) Downloading
By changing a temporary file and a local cache file, when s3fs downloads a
object, it downloads only the required range(some block units).
And s3fs downloads units by parallel GET request, it is same as a case of
uploading. (Maximum parallel request count and each download size are
specified same parameters for uploading.)
In the new revision, when s3fs opens file, s3fs returns file descriptor soon.
Because s3fs only opens(makes) the file descriptor with no downloading
data. And when s3fs reads a data, s3fs downloads only some block unit
including specified area.
This result is good for performance.
5) Changes option name
The option "parallel_upload" which added at r456 is changed to new option
name as "parallel_count". This reason is this option value is not only used by
uploading object, but a uploading object also uses this option. (For a while,
you can use old option name "parallel_upload" for compatibility.)
git-svn-id: http://s3fs.googlecode.com/svn/trunk@458 df820570-a93a-0410-bd06-b72b767a4274
2013-07-23 16:01:48 +00:00
|
|
|
|
|
|
|
if(!tpath){
|
|
|
|
return -1;
|
|
|
|
}
|
2015-10-06 14:46:14 +00:00
|
|
|
sse_type_t ssetype;
|
|
|
|
string ssevalue;
|
|
|
|
if(!get_object_sse_type(tpath, ssetype, ssevalue)){
|
|
|
|
S3FS_PRN_WARN("Failed to get SSE type for file(%s).", SAFESTRPTR(tpath));
|
2014-07-19 19:02:55 +00:00
|
|
|
}
|
2015-10-06 14:46:14 +00:00
|
|
|
|
|
|
|
if(0 != (result = PreGetObjectRequest(tpath, fd, start, size, ssetype, ssevalue))){
|
Changes codes for performance(part 3)
* Summay
This revision includes big change about temporary file and local cache file.
By this big change, s3fs works with good performance when s3fs opens/
closes/syncs/reads object.
I made a big change about the handling about temporary file and local cache
file to do this implementation.
* Detail
1) About temporary file(local file)
s3fs uses a temporary file on local file system when s3fs does download/
upload/open/seek object on S3.
After this revision, s3fs calls ftruncate() function when s3fs makes the
temporary file.
In this way s3fs can set a file size of precisely length without downloading.
(Notice - ftruncate function is for XSI-compliant systems, so that possibly
you have a problem on non-XSI-compliant systems.)
By this change, s3fs can download a part of a object by requesting with
"Range" http header. It seems like downloading by each block unit.
The default block(part) size is 50MB, it is caused the result which is default
parallel requests count(5) by default multipart upload size(10MB).
If you need to change this block size, you can change by new option
"fd_page_size". This option can take from 1MB(1024 * 1024) to any bytes.
So that, you have to take care about that fdcache.cpp(and fdcache.h) were
changed a lot.
2) About local cache
Local cache files which are in directory specified by "use_cache" option do
not have always all of object data.
This cause is that s3fs uses ftruncate function and reads(writes) each block
unit of a temporary file.
s3fs manages each block unit's status which are "downloaded area" or "not".
For this status, s3fs makes new temporary file in cache directory which is
specified by "use_cache" option. This status files is in a directory which is
named "<use_cache sirectory>/.<bucket_name>/".
When s3fs opens this status file, s3fs locks this file for exclusive control by
calling flock function. You need to take care about this, the status files can
not be laid on network drive(like NFS).
This revision changes about file open mode, s3fs always opens a local cache
file and each status file with writable mode.
Last, this revision adds new option "del_cache", this option means that s3fs
deletes all local cache file when s3fs starts and exits.
3) Uploading
When s3fs writes data to file descriptor through FUSE request, old s3fs
revision downloads all of the object. But new revision does not download all,
it downloads only small percial area(some block units) including writing data
area.
And when s3fs closes or flushes the file descriptor, s3fs downloads other area
which is not downloaded from server. After that, s3fs uploads all of data.
Already r456 revision has parallel upload function, then this revision with
r456 and r457 are very big change for performance.
4) Downloading
By changing a temporary file and a local cache file, when s3fs downloads a
object, it downloads only the required range(some block units).
And s3fs downloads units by parallel GET request, it is same as a case of
uploading. (Maximum parallel request count and each download size are
specified same parameters for uploading.)
In the new revision, when s3fs opens file, s3fs returns file descriptor soon.
Because s3fs only opens(makes) the file descriptor with no downloading
data. And when s3fs reads a data, s3fs downloads only some block unit
including specified area.
This result is good for performance.
5) Changes option name
The option "parallel_upload" which added at r456 is changed to new option
name as "parallel_count". This reason is this option value is not only used by
uploading object, but a uploading object also uses this option. (For a while,
you can use old option name "parallel_upload" for compatibility.)
git-svn-id: http://s3fs.googlecode.com/svn/trunk@458 df820570-a93a-0410-bd06-b72b767a4274
2013-07-23 16:01:48 +00:00
|
|
|
return result;
|
2013-07-05 02:28:31 +00:00
|
|
|
}
|
|
|
|
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_INFO3("downloading... [path=%s][fd=%d]", tpath, fd);
|
Changes codes for performance(part 3)
* Summay
This revision includes big change about temporary file and local cache file.
By this big change, s3fs works with good performance when s3fs opens/
closes/syncs/reads object.
I made a big change about the handling about temporary file and local cache
file to do this implementation.
* Detail
1) About temporary file(local file)
s3fs uses a temporary file on local file system when s3fs does download/
upload/open/seek object on S3.
After this revision, s3fs calls ftruncate() function when s3fs makes the
temporary file.
In this way s3fs can set a file size of precisely length without downloading.
(Notice - ftruncate function is for XSI-compliant systems, so that possibly
you have a problem on non-XSI-compliant systems.)
By this change, s3fs can download a part of a object by requesting with
"Range" http header. It seems like downloading by each block unit.
The default block(part) size is 50MB, it is caused the result which is default
parallel requests count(5) by default multipart upload size(10MB).
If you need to change this block size, you can change by new option
"fd_page_size". This option can take from 1MB(1024 * 1024) to any bytes.
So that, you have to take care about that fdcache.cpp(and fdcache.h) were
changed a lot.
2) About local cache
Local cache files which are in directory specified by "use_cache" option do
not have always all of object data.
This cause is that s3fs uses ftruncate function and reads(writes) each block
unit of a temporary file.
s3fs manages each block unit's status which are "downloaded area" or "not".
For this status, s3fs makes new temporary file in cache directory which is
specified by "use_cache" option. This status files is in a directory which is
named "<use_cache sirectory>/.<bucket_name>/".
When s3fs opens this status file, s3fs locks this file for exclusive control by
calling flock function. You need to take care about this, the status files can
not be laid on network drive(like NFS).
This revision changes about file open mode, s3fs always opens a local cache
file and each status file with writable mode.
Last, this revision adds new option "del_cache", this option means that s3fs
deletes all local cache file when s3fs starts and exits.
3) Uploading
When s3fs writes data to file descriptor through FUSE request, old s3fs
revision downloads all of the object. But new revision does not download all,
it downloads only small percial area(some block units) including writing data
area.
And when s3fs closes or flushes the file descriptor, s3fs downloads other area
which is not downloaded from server. After that, s3fs uploads all of data.
Already r456 revision has parallel upload function, then this revision with
r456 and r457 are very big change for performance.
4) Downloading
By changing a temporary file and a local cache file, when s3fs downloads a
object, it downloads only the required range(some block units).
And s3fs downloads units by parallel GET request, it is same as a case of
uploading. (Maximum parallel request count and each download size are
specified same parameters for uploading.)
In the new revision, when s3fs opens file, s3fs returns file descriptor soon.
Because s3fs only opens(makes) the file descriptor with no downloading
data. And when s3fs reads a data, s3fs downloads only some block unit
including specified area.
This result is good for performance.
5) Changes option name
The option "parallel_upload" which added at r456 is changed to new option
name as "parallel_count". This reason is this option value is not only used by
uploading object, but a uploading object also uses this option. (For a while,
you can use old option name "parallel_upload" for compatibility.)
git-svn-id: http://s3fs.googlecode.com/svn/trunk@458 df820570-a93a-0410-bd06-b72b767a4274
2013-07-23 16:01:48 +00:00
|
|
|
|
|
|
|
result = RequestPerform();
|
|
|
|
partdata.clear();
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
int S3fsCurl::CheckBucket(void)
|
|
|
|
{
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_INFO3("check a bucket.");
|
2013-07-05 02:28:31 +00:00
|
|
|
|
|
|
|
if(!CreateCurlHandle(true)){
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
string resource;
|
|
|
|
string turl;
|
2013-08-20 07:16:12 +00:00
|
|
|
MakeUrlResource(get_realpath("/").c_str(), resource, turl);
|
2013-07-05 02:28:31 +00:00
|
|
|
|
2013-08-20 07:16:12 +00:00
|
|
|
url = prepare_url(turl.c_str());
|
2015-03-21 04:31:59 +00:00
|
|
|
path = get_realpath("/");
|
2013-07-05 02:28:31 +00:00
|
|
|
requestHeaders = NULL;
|
|
|
|
responseHeaders.clear();
|
|
|
|
bodydata = new BodyData();
|
|
|
|
|
2017-10-26 14:21:48 +00:00
|
|
|
op = "GET";
|
|
|
|
type = REQTYPE_CHKBUCKET;
|
|
|
|
insertAuthHeaders();
|
2015-01-20 16:31:36 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
// setopt
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str());
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)bodydata);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HTTPHEADER, requestHeaders);
|
2016-04-17 07:44:03 +00:00
|
|
|
S3fsCurl::AddUserAgent(hCurl); // put User-Agent
|
2013-07-05 02:28:31 +00:00
|
|
|
|
|
|
|
int result = RequestPerform();
|
2014-09-28 23:12:53 +00:00
|
|
|
if (result != 0) {
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("Check bucket failed, S3 response: %s", (bodydata ? bodydata->str() : ""));
|
2014-09-28 23:12:53 +00:00
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
int S3fsCurl::ListBucketRequest(const char* tpath, const char* query)
|
|
|
|
{
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_INFO3("[tpath=%s]", SAFESTRPTR(tpath));
|
2013-07-05 02:28:31 +00:00
|
|
|
|
|
|
|
if(!tpath){
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if(!CreateCurlHandle(true)){
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
string resource;
|
|
|
|
string turl;
|
|
|
|
MakeUrlResource("", resource, turl); // NOTICE: path is "".
|
|
|
|
if(query){
|
|
|
|
turl += "?";
|
|
|
|
turl += query;
|
2017-10-26 14:21:48 +00:00
|
|
|
query_string = query;
|
2013-07-05 02:28:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
url = prepare_url(turl.c_str());
|
2015-03-21 04:31:59 +00:00
|
|
|
path = get_realpath(tpath);
|
2013-07-05 02:28:31 +00:00
|
|
|
requestHeaders = NULL;
|
|
|
|
responseHeaders.clear();
|
|
|
|
bodydata = new BodyData();
|
|
|
|
|
2017-10-26 14:21:48 +00:00
|
|
|
op = "GET";
|
|
|
|
type = REQTYPE_LISTBUCKET;
|
|
|
|
insertAuthHeaders();
|
2013-07-05 02:28:31 +00:00
|
|
|
|
|
|
|
// setopt
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str());
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)bodydata);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HTTPHEADER, requestHeaders);
|
2016-04-17 07:44:03 +00:00
|
|
|
S3fsCurl::AddUserAgent(hCurl); // put User-Agent
|
2013-07-05 02:28:31 +00:00
|
|
|
|
|
|
|
return RequestPerform();
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// Initialize multipart upload
|
|
|
|
//
|
|
|
|
// Example :
|
|
|
|
// POST /example-object?uploads HTTP/1.1
|
|
|
|
// Host: example-bucket.s3.amazonaws.com
|
|
|
|
// Date: Mon, 1 Nov 2010 20:34:56 GMT
|
|
|
|
// Authorization: AWS VGhpcyBtZXNzYWdlIHNpZ25lZCBieSBlbHZpbmc=
|
|
|
|
//
|
2014-08-26 17:11:10 +00:00
|
|
|
int S3fsCurl::PreMultipartPostRequest(const char* tpath, headers_t& meta, string& upload_id, bool is_copy)
|
2013-07-05 02:28:31 +00:00
|
|
|
{
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_INFO3("[tpath=%s]", SAFESTRPTR(tpath));
|
2013-07-05 02:28:31 +00:00
|
|
|
|
|
|
|
if(!tpath){
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if(!CreateCurlHandle(true)){
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
string resource;
|
|
|
|
string turl;
|
|
|
|
MakeUrlResource(get_realpath(tpath).c_str(), resource, turl);
|
|
|
|
|
2017-10-26 14:21:48 +00:00
|
|
|
query_string = "uploads";
|
2015-01-28 17:13:11 +00:00
|
|
|
turl += "?" + query_string;
|
|
|
|
url = prepare_url(turl.c_str());
|
2015-03-21 04:31:59 +00:00
|
|
|
path = get_realpath(tpath);
|
2015-01-28 17:13:11 +00:00
|
|
|
requestHeaders = NULL;
|
|
|
|
bodydata = new BodyData();
|
2013-07-05 02:28:31 +00:00
|
|
|
responseHeaders.clear();
|
|
|
|
|
|
|
|
string contype = S3fsCurl::LookupMimeType(string(tpath));
|
|
|
|
|
|
|
|
for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){
|
2015-01-28 17:13:11 +00:00
|
|
|
string key = lower(iter->first);
|
|
|
|
string value = iter->second;
|
|
|
|
if(key.substr(0, 9) == "x-amz-acl"){
|
2013-07-05 02:28:31 +00:00
|
|
|
// not set value, but after set it.
|
2015-01-28 17:13:11 +00:00
|
|
|
}else if(key.substr(0, 10) == "x-amz-meta"){
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, iter->first.c_str(), value.c_str());
|
2017-05-07 09:29:08 +00:00
|
|
|
}else if(key == "x-amz-server-side-encryption" && value != "aws:kms"){
|
2015-11-01 13:54:47 +00:00
|
|
|
// Only copy mode.
|
|
|
|
if(is_copy && !AddSseRequestHead(SSE_S3, value, false, true)){
|
|
|
|
S3FS_PRN_WARN("Failed to insert SSE-S3 header.");
|
|
|
|
}
|
2017-05-07 09:29:08 +00:00
|
|
|
}else if(key == "x-amz-server-side-encryption-aws-kms-key-id"){
|
2014-08-26 17:11:10 +00:00
|
|
|
// Only copy mode.
|
2015-11-01 13:54:47 +00:00
|
|
|
if(is_copy && !value.empty() && !AddSseRequestHead(SSE_KMS, value, false, true)){
|
|
|
|
S3FS_PRN_WARN("Failed to insert SSE-KMS header.");
|
|
|
|
}
|
|
|
|
}else if(key == "x-amz-server-side-encryption-customer-key-md5"){
|
|
|
|
// Only copy mode.
|
|
|
|
if(is_copy){
|
|
|
|
if(!AddSseRequestHead(SSE_C, value, true, true) || !AddSseRequestHead(SSE_C, value, true, false)){
|
|
|
|
S3FS_PRN_WARN("Failed to insert SSE-C header.");
|
|
|
|
}
|
2014-07-19 19:02:55 +00:00
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
}
|
|
|
|
}
|
2015-09-17 20:10:45 +00:00
|
|
|
// "x-amz-acl", storage class, sse
|
2017-04-22 23:15:02 +00:00
|
|
|
if(!S3fsCurl::default_acl.empty()){
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-acl", S3fsCurl::default_acl.c_str());
|
|
|
|
}
|
2015-09-17 20:10:45 +00:00
|
|
|
if(REDUCED_REDUNDANCY == GetStorageClass()){
|
2015-01-28 17:13:11 +00:00
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-storage-class", "REDUCED_REDUNDANCY");
|
2015-09-17 20:10:45 +00:00
|
|
|
} else if(STANDARD_IA == GetStorageClass()){
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-storage-class", "STANDARD_IA");
|
2013-07-05 02:28:31 +00:00
|
|
|
}
|
2015-10-06 14:46:14 +00:00
|
|
|
// SSE
|
2015-11-01 13:54:47 +00:00
|
|
|
if(!is_copy){
|
|
|
|
string ssevalue("");
|
|
|
|
if(!AddSseRequestHead(S3fsCurl::GetSseType(), ssevalue, false, false)){
|
|
|
|
S3FS_PRN_WARN("Failed to set SSE header, but continue...");
|
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
}
|
Fixed Issue 229 and Changes codes
1) Set metadata "Content-Encoding" automatically(Issue 292)
For this issue, s3fs is added new option "ahbe_conf".
New option means the configuration file path, and this file specifies
additional HTTP header by file(object) extension.
Thus you can specify any HTTP header for each object by extension.
* ahbe_conf file format:
-----------
line = [file suffix] HTTP-header [HTTP-header-values]
file suffix = file(object) suffix, if this field is empty,
it means "*"(all object).
HTTP-header = additional HTTP header name
HTTP-header-values = additional HTTP header value
-----------
* Example:
-----------
.gz Content-Encoding gzip
.Z Content-Encoding compress
X-S3FS-MYHTTPHEAD myvalue
-----------
A sample configuration file is uploaded in "test" directory.
If ahbe_conf parameter is specified, s3fs loads it's configuration
and compares extension(suffix) of object(file) when uploading
(PUT/POST) it. If the extension is same, s3fs adds/sends specified
HTTP header and value.
A case of sample configuration file, if a object(it's extension is
".gz") which already has Content-Encoding HTTP header is renamed
to ".txt" extension, s3fs does not set Content-Encoding. Because
".txt" is not match any line in configuration file.
So, s3fs matches the extension by each PUT/POST action.
* Please take care about "Content-Encoding".
This new option allows setting ANY HTTP header by object extension.
For example, you can specify "Content-Encoding" for ".gz"/etc
extension in configuration. But this means that S3 always returns
"Content-Encoding: gzip" when a client requests with other
"Accept-Encoding:" header. It SHOULD NOT be good.
Please see RFC 2616.
2) Changes about allow_other/uid/gid option for mount point
I reviewed about mount point permission and allow_other/uid/gid
options, and found bugs about these.
s3fs is fixed bugs and changed to the following specifications.
* s3fs only allows uid(gid) options as 0(root), when the effective
user is zero(root).
* A mount point(directory) must have a permission to allow
accessing by effective user/group.
* If allow_other option is specified, the mount point permission
is set 0777(all users allow all access).
In another case, the mount point is set 0700(only allows
effective user).
* When uid/gid option is specified, the mount point owner/group
is set uid/gid option value.
If uid/gid is not set, it is set effective user/group id.
This changes maybe fixes some issue(321, 338).
3) Changes a logic about (Issue 229)
The chmod command returns -EIO when changing the mount point.
It is correct, s3fs can not changed owner/group/mtime for the
mount point, but s3fs sends a request for changing the bucket.
This revision does not send the request, and returns EIO as
soon as possible.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@465 df820570-a93a-0410-bd06-b72b767a4274
2013-08-16 19:24:01 +00:00
|
|
|
if(is_use_ahbe){
|
|
|
|
// set additional header by ahbe conf
|
|
|
|
requestHeaders = AdditionalHeader::get()->AddHeader(requestHeaders, tpath);
|
|
|
|
}
|
2015-01-20 16:31:36 +00:00
|
|
|
|
2017-10-26 14:21:48 +00:00
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "Accept", NULL);
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "Content-Length", NULL);
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "Content-Type", contype.c_str());
|
2015-01-20 16:31:36 +00:00
|
|
|
|
2017-10-26 14:21:48 +00:00
|
|
|
op = "POST";
|
|
|
|
type = REQTYPE_PREMULTIPOST;
|
|
|
|
insertAuthHeaders();
|
2013-07-05 02:28:31 +00:00
|
|
|
|
|
|
|
// setopt
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str());
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_POST, true); // POST
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)bodydata);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_POSTFIELDSIZE, 0);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HTTPHEADER, requestHeaders);
|
2016-04-17 07:44:03 +00:00
|
|
|
S3fsCurl::AddUserAgent(hCurl); // put User-Agent
|
2013-07-05 02:28:31 +00:00
|
|
|
|
|
|
|
// request
|
|
|
|
int result;
|
|
|
|
if(0 != (result = RequestPerform())){
|
|
|
|
delete bodydata;
|
|
|
|
bodydata = NULL;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Parse XML body for UploadId
|
|
|
|
if(!S3fsCurl::GetUploadId(upload_id)){
|
|
|
|
delete bodydata;
|
|
|
|
bodydata = NULL;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
delete bodydata;
|
|
|
|
bodydata = NULL;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-07-10 06:24:06 +00:00
|
|
|
int S3fsCurl::CompleteMultipartPostRequest(const char* tpath, string& upload_id, etaglist_t& parts)
|
2013-07-05 02:28:31 +00:00
|
|
|
{
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_INFO3("[tpath=%s][parts=%zu]", SAFESTRPTR(tpath), parts.size());
|
2013-07-05 02:28:31 +00:00
|
|
|
|
|
|
|
if(!tpath){
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
// make contents
|
|
|
|
string postContent;
|
|
|
|
postContent += "<CompleteMultipartUpload>\n";
|
|
|
|
for(int cnt = 0; cnt < (int)parts.size(); cnt++){
|
2013-07-10 06:24:06 +00:00
|
|
|
if(0 == parts[cnt].length()){
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("%d file part is not finished uploading.", cnt + 1);
|
Changes codes for performance(part 3)
* Summay
This revision includes big change about temporary file and local cache file.
By this big change, s3fs works with good performance when s3fs opens/
closes/syncs/reads object.
I made a big change about the handling about temporary file and local cache
file to do this implementation.
* Detail
1) About temporary file(local file)
s3fs uses a temporary file on local file system when s3fs does download/
upload/open/seek object on S3.
After this revision, s3fs calls ftruncate() function when s3fs makes the
temporary file.
In this way s3fs can set a file size of precisely length without downloading.
(Notice - ftruncate function is for XSI-compliant systems, so that possibly
you have a problem on non-XSI-compliant systems.)
By this change, s3fs can download a part of a object by requesting with
"Range" http header. It seems like downloading by each block unit.
The default block(part) size is 50MB, it is caused the result which is default
parallel requests count(5) by default multipart upload size(10MB).
If you need to change this block size, you can change by new option
"fd_page_size". This option can take from 1MB(1024 * 1024) to any bytes.
So that, you have to take care about that fdcache.cpp(and fdcache.h) were
changed a lot.
2) About local cache
Local cache files which are in directory specified by "use_cache" option do
not have always all of object data.
This cause is that s3fs uses ftruncate function and reads(writes) each block
unit of a temporary file.
s3fs manages each block unit's status which are "downloaded area" or "not".
For this status, s3fs makes new temporary file in cache directory which is
specified by "use_cache" option. This status files is in a directory which is
named "<use_cache sirectory>/.<bucket_name>/".
When s3fs opens this status file, s3fs locks this file for exclusive control by
calling flock function. You need to take care about this, the status files can
not be laid on network drive(like NFS).
This revision changes about file open mode, s3fs always opens a local cache
file and each status file with writable mode.
Last, this revision adds new option "del_cache", this option means that s3fs
deletes all local cache file when s3fs starts and exits.
3) Uploading
When s3fs writes data to file descriptor through FUSE request, old s3fs
revision downloads all of the object. But new revision does not download all,
it downloads only small percial area(some block units) including writing data
area.
And when s3fs closes or flushes the file descriptor, s3fs downloads other area
which is not downloaded from server. After that, s3fs uploads all of data.
Already r456 revision has parallel upload function, then this revision with
r456 and r457 are very big change for performance.
4) Downloading
By changing a temporary file and a local cache file, when s3fs downloads a
object, it downloads only the required range(some block units).
And s3fs downloads units by parallel GET request, it is same as a case of
uploading. (Maximum parallel request count and each download size are
specified same parameters for uploading.)
In the new revision, when s3fs opens file, s3fs returns file descriptor soon.
Because s3fs only opens(makes) the file descriptor with no downloading
data. And when s3fs reads a data, s3fs downloads only some block unit
including specified area.
This result is good for performance.
5) Changes option name
The option "parallel_upload" which added at r456 is changed to new option
name as "parallel_count". This reason is this option value is not only used by
uploading object, but a uploading object also uses this option. (For a while,
you can use old option name "parallel_upload" for compatibility.)
git-svn-id: http://s3fs.googlecode.com/svn/trunk@458 df820570-a93a-0410-bd06-b72b767a4274
2013-07-23 16:01:48 +00:00
|
|
|
return -1;
|
2013-07-05 02:28:31 +00:00
|
|
|
}
|
|
|
|
postContent += "<Part>\n";
|
2014-11-04 05:45:48 +00:00
|
|
|
postContent += " <PartNumber>" + str(cnt + 1) + "</PartNumber>\n";
|
2016-11-23 20:27:15 +00:00
|
|
|
postContent += " <ETag>" + parts[cnt] + "</ETag>\n";
|
2013-07-05 02:28:31 +00:00
|
|
|
postContent += "</Part>\n";
|
|
|
|
}
|
|
|
|
postContent += "</CompleteMultipartUpload>\n";
|
|
|
|
|
|
|
|
// set postdata
|
2013-08-21 07:43:32 +00:00
|
|
|
postdata = reinterpret_cast<const unsigned char*>(postContent.c_str());
|
|
|
|
b_postdata = postdata;
|
|
|
|
postdata_remaining = postContent.size(); // without null
|
|
|
|
b_postdata_remaining = postdata_remaining;
|
2013-07-05 02:28:31 +00:00
|
|
|
|
|
|
|
if(!CreateCurlHandle(true)){
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
string resource;
|
|
|
|
string turl;
|
|
|
|
MakeUrlResource(get_realpath(tpath).c_str(), resource, turl);
|
|
|
|
|
2017-10-26 14:21:48 +00:00
|
|
|
query_string = "uploadId=" + upload_id;
|
2015-01-28 17:13:11 +00:00
|
|
|
turl += "?" + query_string;
|
|
|
|
url = prepare_url(turl.c_str());
|
2015-03-21 04:31:59 +00:00
|
|
|
path = get_realpath(tpath);
|
2015-01-28 17:13:11 +00:00
|
|
|
requestHeaders = NULL;
|
|
|
|
bodydata = new BodyData();
|
2015-01-20 16:31:36 +00:00
|
|
|
responseHeaders.clear();
|
2015-01-28 17:13:11 +00:00
|
|
|
string contype = S3fsCurl::LookupMimeType(string(tpath));
|
2015-01-20 16:31:36 +00:00
|
|
|
|
2017-10-26 14:21:48 +00:00
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "Accept", NULL);
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "Content-Type", contype.c_str());
|
2015-01-28 17:13:11 +00:00
|
|
|
|
2017-10-26 14:21:48 +00:00
|
|
|
op = "POST";
|
|
|
|
type = REQTYPE_COMPLETEMULTIPOST;
|
|
|
|
insertAuthHeaders();
|
2013-07-05 02:28:31 +00:00
|
|
|
|
|
|
|
// setopt
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str());
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HTTPHEADER, requestHeaders);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_POST, true); // POST
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)bodydata);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback);
|
2013-09-27 07:39:07 +00:00
|
|
|
curl_easy_setopt(hCurl, CURLOPT_POSTFIELDSIZE, static_cast<curl_off_t>(postdata_remaining));
|
2013-07-05 02:28:31 +00:00
|
|
|
curl_easy_setopt(hCurl, CURLOPT_READDATA, (void*)this);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_READFUNCTION, S3fsCurl::ReadCallback);
|
2016-04-17 07:44:03 +00:00
|
|
|
S3fsCurl::AddUserAgent(hCurl); // put User-Agent
|
2013-07-05 02:28:31 +00:00
|
|
|
|
|
|
|
// request
|
|
|
|
int result = RequestPerform();
|
|
|
|
delete bodydata;
|
|
|
|
bodydata = NULL;
|
|
|
|
postdata = NULL;
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
int S3fsCurl::MultipartListRequest(string& body)
|
|
|
|
{
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_INFO3("list request(multipart)");
|
2013-07-05 02:28:31 +00:00
|
|
|
|
|
|
|
if(!CreateCurlHandle(true)){
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
string resource;
|
|
|
|
string turl;
|
2015-03-21 04:31:59 +00:00
|
|
|
path = get_realpath("/");
|
|
|
|
MakeUrlResource(path.c_str(), resource, turl);
|
2013-07-05 02:28:31 +00:00
|
|
|
|
|
|
|
turl += "?uploads";
|
|
|
|
url = prepare_url(turl.c_str());
|
|
|
|
requestHeaders = NULL;
|
|
|
|
responseHeaders.clear();
|
|
|
|
bodydata = new BodyData();
|
|
|
|
|
2017-10-26 14:21:48 +00:00
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "Accept", NULL);
|
2015-01-28 17:13:11 +00:00
|
|
|
|
2017-10-26 14:21:48 +00:00
|
|
|
op = "GET";
|
|
|
|
type = REQTYPE_MULTILIST;
|
|
|
|
insertAuthHeaders();
|
2013-07-05 02:28:31 +00:00
|
|
|
|
|
|
|
// setopt
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str());
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)bodydata);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HTTPHEADER, requestHeaders);
|
2016-04-17 07:44:03 +00:00
|
|
|
S3fsCurl::AddUserAgent(hCurl); // put User-Agent
|
2013-07-05 02:28:31 +00:00
|
|
|
|
|
|
|
int result;
|
|
|
|
if(0 == (result = RequestPerform()) && 0 < bodydata->size()){
|
|
|
|
body = bodydata->str();
|
|
|
|
}else{
|
|
|
|
body = "";
|
|
|
|
}
|
|
|
|
delete bodydata;
|
|
|
|
bodydata = NULL;
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2013-11-11 13:45:35 +00:00
|
|
|
int S3fsCurl::AbortMultipartUpload(const char* tpath, string& upload_id)
|
|
|
|
{
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_INFO3("[tpath=%s]", SAFESTRPTR(tpath));
|
2013-11-11 13:45:35 +00:00
|
|
|
|
|
|
|
if(!tpath){
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if(!CreateCurlHandle(true)){
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
string resource;
|
|
|
|
string turl;
|
|
|
|
MakeUrlResource(get_realpath(tpath).c_str(), resource, turl);
|
|
|
|
|
|
|
|
turl += "?uploadId=" + upload_id;
|
|
|
|
url = prepare_url(turl.c_str());
|
2015-03-21 04:31:59 +00:00
|
|
|
path = get_realpath(tpath);
|
2013-11-11 13:45:35 +00:00
|
|
|
requestHeaders = NULL;
|
|
|
|
responseHeaders.clear();
|
|
|
|
|
2017-10-26 14:21:48 +00:00
|
|
|
op = "DELETE";
|
|
|
|
type = REQTYPE_ABORTMULTIUPLOAD;
|
|
|
|
insertAuthHeaders();
|
2013-11-11 13:45:35 +00:00
|
|
|
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str());
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_CUSTOMREQUEST, "DELETE");
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HTTPHEADER, requestHeaders);
|
2016-04-17 07:44:03 +00:00
|
|
|
S3fsCurl::AddUserAgent(hCurl); // put User-Agent
|
2013-11-11 13:45:35 +00:00
|
|
|
|
|
|
|
return RequestPerform();
|
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
//
|
|
|
|
// PUT /ObjectName?partNumber=PartNumber&uploadId=UploadId HTTP/1.1
|
|
|
|
// Host: BucketName.s3.amazonaws.com
|
|
|
|
// Date: date
|
|
|
|
// Content-Length: Size
|
|
|
|
// Authorization: Signature
|
|
|
|
//
|
|
|
|
// PUT /my-movie.m2ts?partNumber=1&uploadId=VCVsb2FkIElEIGZvciBlbZZpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZR HTTP/1.1
|
|
|
|
// Host: example-bucket.s3.amazonaws.com
|
|
|
|
// Date: Mon, 1 Nov 2010 20:34:56 GMT
|
|
|
|
// Content-Length: 10485760
|
|
|
|
// Content-MD5: pUNXr/BjKK5G2UKvaRRrOA==
|
|
|
|
// Authorization: AWS VGhpcyBtZXNzYWdlIHNpZ25lZGGieSRlbHZpbmc=
|
|
|
|
//
|
2013-07-12 00:33:36 +00:00
|
|
|
|
2015-08-17 04:42:45 +00:00
|
|
|
int S3fsCurl::UploadMultipartPostSetup(const char* tpath, int part_num, const string& upload_id)
|
2013-07-05 02:28:31 +00:00
|
|
|
{
|
2018-02-25 13:08:41 +00:00
|
|
|
S3FS_PRN_INFO3("[tpath=%s][start=%jd][size=%jd][part=%d]", SAFESTRPTR(tpath), (intmax_t)(partdata.startpos), (intmax_t)(partdata.size), part_num);
|
2013-07-05 02:28:31 +00:00
|
|
|
|
2013-07-12 00:33:36 +00:00
|
|
|
if(-1 == partdata.fd || -1 == partdata.startpos || -1 == partdata.size){
|
|
|
|
return -1;
|
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
|
|
|
|
// make md5 and file pointer
|
2016-11-23 20:27:15 +00:00
|
|
|
if(S3fsCurl::is_content_md5){
|
|
|
|
unsigned char *md5raw = s3fs_md5hexsum(partdata.fd, partdata.startpos, partdata.size);
|
|
|
|
if(md5raw == NULL){
|
|
|
|
S3FS_PRN_ERR("Could not make md5 for file(part %d)", part_num);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
partdata.etag = s3fs_hex(md5raw, get_md5_digest_length());
|
|
|
|
char* md5base64p = s3fs_base64(md5raw, get_md5_digest_length());
|
2017-10-26 14:21:48 +00:00
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "Content-MD5", md5base64p);
|
2016-11-23 20:27:15 +00:00
|
|
|
free(md5base64p);
|
|
|
|
free(md5raw);
|
2013-07-05 02:28:31 +00:00
|
|
|
}
|
|
|
|
|
2013-07-12 00:33:36 +00:00
|
|
|
// create handle
|
2013-07-05 02:28:31 +00:00
|
|
|
if(!CreateCurlHandle(true)){
|
|
|
|
return -1;
|
|
|
|
}
|
2013-07-12 00:33:36 +00:00
|
|
|
|
|
|
|
// make request
|
2017-10-26 14:21:48 +00:00
|
|
|
query_string = "partNumber=" + str(part_num) + "&uploadId=" + upload_id;
|
|
|
|
string urlargs = "?" + query_string;
|
2013-07-05 02:28:31 +00:00
|
|
|
string resource;
|
|
|
|
string turl;
|
|
|
|
MakeUrlResource(get_realpath(tpath).c_str(), resource, turl);
|
|
|
|
|
2015-01-28 17:13:11 +00:00
|
|
|
turl += urlargs;
|
|
|
|
url = prepare_url(turl.c_str());
|
2015-03-21 04:31:59 +00:00
|
|
|
path = get_realpath(tpath);
|
2015-01-28 17:13:11 +00:00
|
|
|
requestHeaders = NULL;
|
|
|
|
bodydata = new BodyData();
|
|
|
|
headdata = new BodyData();
|
2013-07-05 02:28:31 +00:00
|
|
|
responseHeaders.clear();
|
|
|
|
|
2017-05-07 09:29:08 +00:00
|
|
|
// SSE
|
|
|
|
if(SSE_C == S3fsCurl::GetSseType()){
|
|
|
|
string ssevalue("");
|
|
|
|
if(!AddSseRequestHead(S3fsCurl::GetSseType(), ssevalue, false, false)){
|
|
|
|
S3FS_PRN_WARN("Failed to set SSE header, but continue...");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-26 14:21:48 +00:00
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "Accept", NULL);
|
2015-01-20 16:31:36 +00:00
|
|
|
|
2017-10-26 14:21:48 +00:00
|
|
|
op = "PUT";
|
|
|
|
type = REQTYPE_UPLOADMULTIPOST;
|
|
|
|
insertAuthHeaders();
|
2013-07-05 02:28:31 +00:00
|
|
|
|
|
|
|
// setopt
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str());
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_UPLOAD, true); // HTTP PUT
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)bodydata);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback);
|
2016-11-23 20:27:15 +00:00
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HEADERDATA, (void*)&responseHeaders);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HEADERFUNCTION, HeaderCallback);
|
2013-09-27 07:39:07 +00:00
|
|
|
curl_easy_setopt(hCurl, CURLOPT_INFILESIZE_LARGE, static_cast<curl_off_t>(partdata.size)); // Content-Length
|
2013-07-12 00:33:36 +00:00
|
|
|
curl_easy_setopt(hCurl, CURLOPT_READFUNCTION, S3fsCurl::UploadReadCallback);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_READDATA, (void*)this);
|
2013-07-05 02:28:31 +00:00
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HTTPHEADER, requestHeaders);
|
2016-04-17 07:44:03 +00:00
|
|
|
S3fsCurl::AddUserAgent(hCurl); // put User-Agent
|
2013-07-05 02:28:31 +00:00
|
|
|
|
2013-07-10 06:24:06 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-08-17 04:42:45 +00:00
|
|
|
int S3fsCurl::UploadMultipartPostRequest(const char* tpath, int part_num, const string& upload_id)
|
2013-07-10 06:24:06 +00:00
|
|
|
{
|
|
|
|
int result;
|
|
|
|
|
2018-02-25 13:08:41 +00:00
|
|
|
S3FS_PRN_INFO3("[tpath=%s][start=%jd][size=%jd][part=%d]", SAFESTRPTR(tpath), (intmax_t)(partdata.startpos), (intmax_t)(partdata.size), part_num);
|
2013-07-10 06:24:06 +00:00
|
|
|
|
|
|
|
// setup
|
|
|
|
if(0 != (result = S3fsCurl::UploadMultipartPostSetup(tpath, part_num, upload_id))){
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
// request
|
2013-07-10 06:24:06 +00:00
|
|
|
if(0 == (result = RequestPerform())){
|
2017-04-02 07:27:43 +00:00
|
|
|
// UploadMultipartPostComplete returns true on success -> convert to 0
|
|
|
|
result = !UploadMultipartPostComplete();
|
2013-07-05 02:28:31 +00:00
|
|
|
}
|
2013-11-11 13:45:35 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
// closing
|
|
|
|
delete bodydata;
|
|
|
|
bodydata = NULL;
|
|
|
|
delete headdata;
|
|
|
|
headdata = NULL;
|
|
|
|
|
2013-07-10 06:24:06 +00:00
|
|
|
return result;
|
2013-07-05 02:28:31 +00:00
|
|
|
}
|
|
|
|
|
2013-11-11 13:45:35 +00:00
|
|
|
int S3fsCurl::CopyMultipartPostRequest(const char* from, const char* to, int part_num, string& upload_id, headers_t& meta)
|
2013-07-05 02:28:31 +00:00
|
|
|
{
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_INFO3("[from=%s][to=%s][part=%d]", SAFESTRPTR(from), SAFESTRPTR(to), part_num);
|
2013-07-05 02:28:31 +00:00
|
|
|
|
|
|
|
if(!from || !to){
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if(!CreateCurlHandle(true)){
|
|
|
|
return -1;
|
|
|
|
}
|
2017-10-26 14:21:48 +00:00
|
|
|
query_string = "partNumber=" + str(part_num) + "&uploadId=" + upload_id;
|
|
|
|
string urlargs = "?" + query_string;
|
2013-07-05 02:28:31 +00:00
|
|
|
string resource;
|
|
|
|
string turl;
|
|
|
|
MakeUrlResource(get_realpath(to).c_str(), resource, turl);
|
|
|
|
|
|
|
|
turl += urlargs;
|
|
|
|
url = prepare_url(turl.c_str());
|
2015-03-21 04:31:59 +00:00
|
|
|
path = get_realpath(to);
|
2013-07-05 02:28:31 +00:00
|
|
|
requestHeaders = NULL;
|
|
|
|
responseHeaders.clear();
|
|
|
|
bodydata = new BodyData();
|
|
|
|
headdata = new BodyData();
|
|
|
|
|
|
|
|
// Make request headers
|
|
|
|
for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){
|
2015-01-28 17:13:11 +00:00
|
|
|
string key = lower(iter->first);
|
|
|
|
string value = iter->second;
|
|
|
|
if(key == "content-type"){
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, iter->first.c_str(), value.c_str());
|
|
|
|
}else if(key == "x-amz-copy-source"){
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, iter->first.c_str(), value.c_str());
|
|
|
|
}else if(key == "x-amz-copy-source-range"){
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, iter->first.c_str(), value.c_str());
|
2013-07-05 02:28:31 +00:00
|
|
|
}
|
2013-11-11 13:45:35 +00:00
|
|
|
// NOTICE: x-amz-acl, x-amz-server-side-encryption is not set!
|
Fixed Issue 229 and Changes codes
1) Set metadata "Content-Encoding" automatically(Issue 292)
For this issue, s3fs is added new option "ahbe_conf".
New option means the configuration file path, and this file specifies
additional HTTP header by file(object) extension.
Thus you can specify any HTTP header for each object by extension.
* ahbe_conf file format:
-----------
line = [file suffix] HTTP-header [HTTP-header-values]
file suffix = file(object) suffix, if this field is empty,
it means "*"(all object).
HTTP-header = additional HTTP header name
HTTP-header-values = additional HTTP header value
-----------
* Example:
-----------
.gz Content-Encoding gzip
.Z Content-Encoding compress
X-S3FS-MYHTTPHEAD myvalue
-----------
A sample configuration file is uploaded in "test" directory.
If ahbe_conf parameter is specified, s3fs loads it's configuration
and compares extension(suffix) of object(file) when uploading
(PUT/POST) it. If the extension is same, s3fs adds/sends specified
HTTP header and value.
A case of sample configuration file, if a object(it's extension is
".gz") which already has Content-Encoding HTTP header is renamed
to ".txt" extension, s3fs does not set Content-Encoding. Because
".txt" is not match any line in configuration file.
So, s3fs matches the extension by each PUT/POST action.
* Please take care about "Content-Encoding".
This new option allows setting ANY HTTP header by object extension.
For example, you can specify "Content-Encoding" for ".gz"/etc
extension in configuration. But this means that S3 always returns
"Content-Encoding: gzip" when a client requests with other
"Accept-Encoding:" header. It SHOULD NOT be good.
Please see RFC 2616.
2) Changes about allow_other/uid/gid option for mount point
I reviewed about mount point permission and allow_other/uid/gid
options, and found bugs about these.
s3fs is fixed bugs and changed to the following specifications.
* s3fs only allows uid(gid) options as 0(root), when the effective
user is zero(root).
* A mount point(directory) must have a permission to allow
accessing by effective user/group.
* If allow_other option is specified, the mount point permission
is set 0777(all users allow all access).
In another case, the mount point is set 0700(only allows
effective user).
* When uid/gid option is specified, the mount point owner/group
is set uid/gid option value.
If uid/gid is not set, it is set effective user/group id.
This changes maybe fixes some issue(321, 338).
3) Changes a logic about (Issue 229)
The chmod command returns -EIO when changing the mount point.
It is correct, s3fs can not changed owner/group/mtime for the
mount point, but s3fs sends a request for changing the bucket.
This revision does not send the request, and returns EIO as
soon as possible.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@465 df820570-a93a-0410-bd06-b72b767a4274
2013-08-16 19:24:01 +00:00
|
|
|
}
|
2015-01-20 16:31:36 +00:00
|
|
|
|
2017-10-26 14:21:48 +00:00
|
|
|
op = "PUT";
|
|
|
|
type = REQTYPE_COPYMULTIPOST;
|
|
|
|
insertAuthHeaders();
|
2013-07-05 02:28:31 +00:00
|
|
|
|
|
|
|
// setopt
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str());
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_UPLOAD, true); // HTTP PUT
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)bodydata);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HEADERDATA, (void*)headdata);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HEADERFUNCTION, WriteMemoryCallback);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_INFILESIZE, 0); // Content-Length
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HTTPHEADER, requestHeaders);
|
2016-04-17 07:44:03 +00:00
|
|
|
S3fsCurl::AddUserAgent(hCurl); // put User-Agent
|
2013-07-05 02:28:31 +00:00
|
|
|
|
|
|
|
// request
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_INFO3("copying... [from=%s][to=%s][part=%d]", from, to, part_num);
|
2013-07-05 02:28:31 +00:00
|
|
|
|
|
|
|
int result = RequestPerform();
|
|
|
|
if(0 == result){
|
2015-07-27 22:47:08 +00:00
|
|
|
// parse ETag from response
|
|
|
|
xmlDocPtr doc;
|
|
|
|
if(NULL == (doc = xmlReadMemory(bodydata->str(), bodydata->size(), "", NULL, 0))){
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
if(NULL == doc->children){
|
|
|
|
S3FS_XMLFREEDOC(doc);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
for(xmlNodePtr cur_node = doc->children->children; NULL != cur_node; cur_node = cur_node->next){
|
|
|
|
if(XML_ELEMENT_NODE == cur_node->type){
|
|
|
|
string elementName = reinterpret_cast<const char*>(cur_node->name);
|
|
|
|
if(cur_node->children){
|
|
|
|
if(XML_TEXT_NODE == cur_node->children->type){
|
|
|
|
if(elementName == "ETag") {
|
|
|
|
string etag = reinterpret_cast<const char *>(cur_node->children->content);
|
|
|
|
if(etag.size() >= 2 && *etag.begin() == '"' && *etag.rbegin() == '"'){
|
|
|
|
etag.assign(etag.substr(1, etag.size() - 2));
|
|
|
|
}
|
|
|
|
partdata.etag.assign(etag);
|
|
|
|
partdata.uploaded = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
S3FS_XMLFREEDOC(doc);
|
2013-07-05 02:28:31 +00:00
|
|
|
}
|
2015-07-27 22:47:08 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
delete bodydata;
|
|
|
|
bodydata = NULL;
|
|
|
|
delete headdata;
|
|
|
|
headdata = NULL;
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2017-04-02 07:27:43 +00:00
|
|
|
bool S3fsCurl::UploadMultipartPostComplete()
|
|
|
|
{
|
|
|
|
headers_t::iterator it = responseHeaders.find("ETag");
|
|
|
|
if (it == responseHeaders.end()) {
|
|
|
|
return false;
|
|
|
|
}
|
2017-05-07 09:29:08 +00:00
|
|
|
|
2017-04-02 07:27:43 +00:00
|
|
|
// check etag(md5);
|
2017-05-07 09:29:08 +00:00
|
|
|
//
|
|
|
|
// The ETAG when using SSE_C and SSE_KMS does not reflect the MD5 we sent
|
|
|
|
// SSE_C: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
|
|
|
|
// SSE_KMS is ignored in the above, but in the following it states the same in the highlights:
|
|
|
|
// http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html
|
|
|
|
//
|
|
|
|
if(S3fsCurl::is_content_md5 && SSE_C != S3fsCurl::GetSseType() && SSE_KMS != S3fsCurl::GetSseType()){
|
|
|
|
if(!etag_equals(it->second, partdata.etag)){
|
|
|
|
return false;
|
|
|
|
}
|
2017-04-02 07:27:43 +00:00
|
|
|
}
|
|
|
|
partdata.etaglist->at(partdata.etagpos).assign(it->second);
|
|
|
|
partdata.uploaded = true;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-08-26 17:11:10 +00:00
|
|
|
int S3fsCurl::MultipartHeadRequest(const char* tpath, off_t size, headers_t& meta, bool is_copy)
|
2013-07-05 02:28:31 +00:00
|
|
|
{
|
|
|
|
int result;
|
|
|
|
string upload_id;
|
|
|
|
off_t chunk;
|
|
|
|
off_t bytes_remaining;
|
2013-07-10 06:24:06 +00:00
|
|
|
etaglist_t list;
|
2013-07-05 02:28:31 +00:00
|
|
|
stringstream strrange;
|
|
|
|
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_INFO3("[tpath=%s]", SAFESTRPTR(tpath));
|
2013-07-05 02:28:31 +00:00
|
|
|
|
2014-08-26 17:11:10 +00:00
|
|
|
if(0 != (result = PreMultipartPostRequest(tpath, meta, upload_id, is_copy))){
|
2013-07-05 02:28:31 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
DestroyCurlHandle();
|
|
|
|
|
|
|
|
for(bytes_remaining = size, chunk = 0; 0 < bytes_remaining; bytes_remaining -= chunk){
|
|
|
|
chunk = bytes_remaining > MAX_MULTI_COPY_SOURCE_SIZE ? MAX_MULTI_COPY_SOURCE_SIZE : bytes_remaining;
|
|
|
|
|
|
|
|
strrange << "bytes=" << (size - bytes_remaining) << "-" << (size - bytes_remaining + chunk - 1);
|
|
|
|
meta["x-amz-copy-source-range"] = strrange.str();
|
2013-11-11 13:45:35 +00:00
|
|
|
strrange.str("");
|
2013-07-05 02:28:31 +00:00
|
|
|
strrange.clear(stringstream::goodbit);
|
|
|
|
|
2013-11-11 13:45:35 +00:00
|
|
|
if(0 != (result = CopyMultipartPostRequest(tpath, tpath, (list.size() + 1), upload_id, meta))){
|
2013-07-05 02:28:31 +00:00
|
|
|
return result;
|
|
|
|
}
|
2013-07-10 06:24:06 +00:00
|
|
|
list.push_back(partdata.etag);
|
2013-07-05 02:28:31 +00:00
|
|
|
DestroyCurlHandle();
|
|
|
|
}
|
|
|
|
|
|
|
|
if(0 != (result = CompleteMultipartPostRequest(tpath, upload_id, list))){
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-08-26 17:11:10 +00:00
|
|
|
int S3fsCurl::MultipartUploadRequest(const char* tpath, headers_t& meta, int fd, bool is_copy)
|
2013-07-05 02:28:31 +00:00
|
|
|
{
|
|
|
|
int result;
|
|
|
|
string upload_id;
|
|
|
|
struct stat st;
|
|
|
|
int fd2;
|
2013-07-10 06:24:06 +00:00
|
|
|
etaglist_t list;
|
2013-07-05 02:28:31 +00:00
|
|
|
off_t remaining_bytes;
|
|
|
|
off_t chunk;
|
|
|
|
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_INFO3("[tpath=%s][fd=%d]", SAFESTRPTR(tpath), fd);
|
2013-07-05 02:28:31 +00:00
|
|
|
|
|
|
|
// duplicate fd
|
2013-09-14 21:50:39 +00:00
|
|
|
if(-1 == (fd2 = dup(fd)) || 0 != lseek(fd2, 0, SEEK_SET)){
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("Could not duplicate file descriptor(errno=%d)", errno);
|
2013-07-05 06:36:11 +00:00
|
|
|
if(-1 != fd2){
|
|
|
|
close(fd2);
|
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
return -errno;
|
|
|
|
}
|
|
|
|
if(-1 == fstat(fd2, &st)){
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("Invalid file descriptor(errno=%d)", errno);
|
2013-09-14 21:50:39 +00:00
|
|
|
close(fd2);
|
2013-07-05 02:28:31 +00:00
|
|
|
return -errno;
|
|
|
|
}
|
|
|
|
|
2014-08-26 17:11:10 +00:00
|
|
|
if(0 != (result = PreMultipartPostRequest(tpath, meta, upload_id, is_copy))){
|
2013-09-14 21:50:39 +00:00
|
|
|
close(fd2);
|
2013-07-05 02:28:31 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
DestroyCurlHandle();
|
|
|
|
|
|
|
|
// cycle through open fd, pulling off 10MB chunks at a time
|
|
|
|
for(remaining_bytes = st.st_size; 0 < remaining_bytes; remaining_bytes -= chunk){
|
2013-07-10 06:24:06 +00:00
|
|
|
// chunk size
|
2014-03-30 07:53:41 +00:00
|
|
|
chunk = remaining_bytes > S3fsCurl::multipart_size ? S3fsCurl::multipart_size : remaining_bytes;
|
2013-07-05 02:28:31 +00:00
|
|
|
|
2013-07-12 00:33:36 +00:00
|
|
|
// set
|
2013-08-21 07:43:32 +00:00
|
|
|
partdata.fd = fd2;
|
|
|
|
partdata.startpos = st.st_size - remaining_bytes;
|
|
|
|
partdata.size = chunk;
|
|
|
|
b_partdata_startpos = partdata.startpos;
|
|
|
|
b_partdata_size = partdata.size;
|
2017-04-02 07:27:43 +00:00
|
|
|
partdata.add_etag_list(&list);
|
2013-07-05 02:28:31 +00:00
|
|
|
|
|
|
|
// upload part
|
2017-04-02 07:27:43 +00:00
|
|
|
if(0 != (result = UploadMultipartPostRequest(tpath, list.size(), upload_id))){
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_ERR("failed uploading part(%d)", result);
|
2013-09-14 21:50:39 +00:00
|
|
|
close(fd2);
|
2013-07-05 02:28:31 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
DestroyCurlHandle();
|
2013-03-30 13:37:14 +00:00
|
|
|
}
|
2013-09-14 21:50:39 +00:00
|
|
|
close(fd2);
|
2013-07-05 02:28:31 +00:00
|
|
|
|
|
|
|
if(0 != (result = CompleteMultipartPostRequest(tpath, upload_id, list))){
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
return 0;
|
2013-03-30 13:37:14 +00:00
|
|
|
}
|
|
|
|
|
2015-08-17 04:42:45 +00:00
|
|
|
int S3fsCurl::MultipartUploadRequest(const string& upload_id, const char* tpath, int fd, off_t offset, size_t size, etaglist_t& list)
|
2015-10-18 17:03:41 +00:00
|
|
|
{
|
|
|
|
S3FS_PRN_INFO3("[upload_id=%s][tpath=%s][fd=%d][offset=%jd][size=%jd]", upload_id.c_str(), SAFESTRPTR(tpath), fd, (intmax_t)offset, (intmax_t)size);
|
|
|
|
|
|
|
|
// duplicate fd
|
|
|
|
int fd2;
|
|
|
|
if(-1 == (fd2 = dup(fd)) || 0 != lseek(fd2, 0, SEEK_SET)){
|
|
|
|
S3FS_PRN_ERR("Could not duplicate file descriptor(errno=%d)", errno);
|
|
|
|
if(-1 != fd2){
|
|
|
|
close(fd2);
|
|
|
|
}
|
|
|
|
return -errno;
|
|
|
|
}
|
|
|
|
|
|
|
|
// set
|
|
|
|
partdata.fd = fd2;
|
|
|
|
partdata.startpos = offset;
|
|
|
|
partdata.size = size;
|
|
|
|
b_partdata_startpos = partdata.startpos;
|
|
|
|
b_partdata_size = partdata.size;
|
2017-04-02 07:27:43 +00:00
|
|
|
partdata.add_etag_list(&list);
|
2015-10-18 17:03:41 +00:00
|
|
|
|
|
|
|
// upload part
|
|
|
|
int result;
|
2017-04-02 07:27:43 +00:00
|
|
|
if(0 != (result = UploadMultipartPostRequest(tpath, list.size(), upload_id))){
|
2015-10-18 17:03:41 +00:00
|
|
|
S3FS_PRN_ERR("failed uploading part(%d)", result);
|
|
|
|
close(fd2);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
DestroyCurlHandle();
|
|
|
|
close(fd2);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
int S3fsCurl::MultipartRenameRequest(const char* from, const char* to, headers_t& meta, off_t size)
|
2013-03-30 13:37:14 +00:00
|
|
|
{
|
2013-07-05 02:28:31 +00:00
|
|
|
int result;
|
|
|
|
string upload_id;
|
|
|
|
off_t chunk;
|
|
|
|
off_t bytes_remaining;
|
2013-07-10 06:24:06 +00:00
|
|
|
etaglist_t list;
|
2013-07-05 02:28:31 +00:00
|
|
|
stringstream strrange;
|
2013-03-30 13:37:14 +00:00
|
|
|
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_INFO3("[from=%s][to=%s]", SAFESTRPTR(from), SAFESTRPTR(to));
|
2013-07-05 02:28:31 +00:00
|
|
|
|
|
|
|
string srcresource;
|
|
|
|
string srcurl;
|
|
|
|
MakeUrlResource(get_realpath(from).c_str(), srcresource, srcurl);
|
|
|
|
|
|
|
|
meta["Content-Type"] = S3fsCurl::LookupMimeType(string(to));
|
|
|
|
meta["x-amz-copy-source"] = srcresource;
|
|
|
|
|
2014-08-26 17:11:10 +00:00
|
|
|
if(0 != (result = PreMultipartPostRequest(to, meta, upload_id, true))){
|
2013-03-30 13:37:14 +00:00
|
|
|
return result;
|
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
DestroyCurlHandle();
|
|
|
|
|
|
|
|
for(bytes_remaining = size, chunk = 0; 0 < bytes_remaining; bytes_remaining -= chunk){
|
|
|
|
chunk = bytes_remaining > MAX_MULTI_COPY_SOURCE_SIZE ? MAX_MULTI_COPY_SOURCE_SIZE : bytes_remaining;
|
|
|
|
|
|
|
|
strrange << "bytes=" << (size - bytes_remaining) << "-" << (size - bytes_remaining + chunk - 1);
|
|
|
|
meta["x-amz-copy-source-range"] = strrange.str();
|
2013-11-11 13:45:35 +00:00
|
|
|
strrange.str("");
|
2013-07-05 02:28:31 +00:00
|
|
|
strrange.clear(stringstream::goodbit);
|
|
|
|
|
2013-11-11 13:45:35 +00:00
|
|
|
if(0 != (result = CopyMultipartPostRequest(from, to, (list.size() + 1), upload_id, meta))){
|
2013-07-05 02:28:31 +00:00
|
|
|
return result;
|
|
|
|
}
|
2013-07-10 06:24:06 +00:00
|
|
|
list.push_back(partdata.etag);
|
2013-07-05 02:28:31 +00:00
|
|
|
DestroyCurlHandle();
|
2013-03-30 13:37:14 +00:00
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
|
|
|
|
if(0 != (result = CompleteMultipartPostRequest(to, upload_id, list))){
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
//-------------------------------------------------------------------
|
|
|
|
// Class S3fsMultiCurl
|
|
|
|
//-------------------------------------------------------------------
|
2017-11-18 03:50:36 +00:00
|
|
|
static const int MAX_MULTI_HEADREQ = 20; // default: max request count in readdir curl_multi.
|
2013-07-05 02:28:31 +00:00
|
|
|
|
|
|
|
//-------------------------------------------------------------------
|
|
|
|
// Class method for S3fsMultiCurl
|
|
|
|
//-------------------------------------------------------------------
|
|
|
|
int S3fsMultiCurl::max_multireq = MAX_MULTI_HEADREQ;
|
|
|
|
|
|
|
|
int S3fsMultiCurl::SetMaxMultiRequest(int max)
|
|
|
|
{
|
|
|
|
int old = S3fsMultiCurl::max_multireq;
|
|
|
|
S3fsMultiCurl::max_multireq= max;
|
|
|
|
return old;
|
|
|
|
}
|
|
|
|
|
|
|
|
//-------------------------------------------------------------------
|
|
|
|
// method for S3fsMultiCurl
|
|
|
|
//-------------------------------------------------------------------
|
2017-03-29 07:13:05 +00:00
|
|
|
S3fsMultiCurl::S3fsMultiCurl() : SuccessCallback(NULL), RetryCallback(NULL)
|
2013-07-05 02:28:31 +00:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
S3fsMultiCurl::~S3fsMultiCurl()
|
|
|
|
{
|
|
|
|
Clear();
|
|
|
|
}
|
|
|
|
|
2013-09-14 21:50:39 +00:00
|
|
|
bool S3fsMultiCurl::ClearEx(bool is_all)
|
2013-07-05 02:28:31 +00:00
|
|
|
{
|
2013-09-14 21:50:39 +00:00
|
|
|
s3fscurlmap_t::iterator iter;
|
|
|
|
for(iter = cMap_req.begin(); iter != cMap_req.end(); cMap_req.erase(iter++)){
|
|
|
|
S3fsCurl* s3fscurl = (*iter).second;
|
|
|
|
if(s3fscurl){
|
|
|
|
s3fscurl->DestroyCurlHandle();
|
|
|
|
delete s3fscurl; // with destroy curl handle.
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if(is_all){
|
|
|
|
for(iter = cMap_all.begin(); iter != cMap_all.end(); cMap_all.erase(iter++)){
|
|
|
|
S3fsCurl* s3fscurl = (*iter).second;
|
|
|
|
s3fscurl->DestroyCurlHandle();
|
|
|
|
delete s3fscurl;
|
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
}
|
2013-09-14 21:50:39 +00:00
|
|
|
S3FS_MALLOCTRIM(0);
|
2013-07-05 02:28:31 +00:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
S3fsMultiSuccessCallback S3fsMultiCurl::SetSuccessCallback(S3fsMultiSuccessCallback function)
|
|
|
|
{
|
|
|
|
S3fsMultiSuccessCallback old = SuccessCallback;
|
|
|
|
SuccessCallback = function;
|
|
|
|
return old;
|
|
|
|
}
|
|
|
|
|
|
|
|
S3fsMultiRetryCallback S3fsMultiCurl::SetRetryCallback(S3fsMultiRetryCallback function)
|
|
|
|
{
|
|
|
|
S3fsMultiRetryCallback old = RetryCallback;
|
|
|
|
RetryCallback = function;
|
|
|
|
return old;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool S3fsMultiCurl::SetS3fsCurlObject(S3fsCurl* s3fscurl)
|
|
|
|
{
|
|
|
|
if(!s3fscurl){
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if(cMap_all.end() != cMap_all.find(s3fscurl->hCurl)){
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
cMap_all[s3fscurl->hCurl] = s3fscurl;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
int S3fsMultiCurl::MultiPerform(void)
|
|
|
|
{
|
2017-03-29 07:13:05 +00:00
|
|
|
std::vector<pthread_t> threads;
|
|
|
|
bool success = true;
|
2018-05-22 13:26:24 +00:00
|
|
|
bool isMultiHead = false;
|
2013-07-05 02:28:31 +00:00
|
|
|
|
2017-03-29 07:13:05 +00:00
|
|
|
for(s3fscurlmap_t::iterator iter = cMap_req.begin(); iter != cMap_req.end(); ++iter) {
|
|
|
|
pthread_t thread;
|
|
|
|
S3fsCurl* s3fscurl = (*iter).second;
|
|
|
|
int rc;
|
2013-07-05 02:28:31 +00:00
|
|
|
|
2018-05-22 13:26:24 +00:00
|
|
|
isMultiHead |= s3fscurl->GetOp() == "HEAD";
|
|
|
|
|
2017-03-29 07:13:05 +00:00
|
|
|
rc = pthread_create(&thread, NULL, S3fsMultiCurl::RequestPerformWrapper, static_cast<void*>(s3fscurl));
|
|
|
|
if (rc != 0) {
|
|
|
|
success = false;
|
|
|
|
S3FS_PRN_ERR("failed pthread_create - rc(%d)", rc);
|
|
|
|
break;
|
2013-07-05 02:28:31 +00:00
|
|
|
}
|
|
|
|
|
2017-03-29 07:13:05 +00:00
|
|
|
threads.push_back(thread);
|
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
|
2017-03-29 07:13:05 +00:00
|
|
|
for (std::vector<pthread_t>::iterator iter = threads.begin(); iter != threads.end(); ++iter) {
|
|
|
|
void* retval;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = pthread_join(*iter, &retval);
|
|
|
|
if (rc) {
|
|
|
|
success = false;
|
|
|
|
S3FS_PRN_ERR("failed pthread_join - rc(%d)", rc);
|
|
|
|
} else {
|
|
|
|
int int_retval = (int)(intptr_t)(retval);
|
2018-05-22 13:26:24 +00:00
|
|
|
if (int_retval && !(int_retval == ENOENT && isMultiHead)) {
|
2018-02-26 10:06:08 +00:00
|
|
|
S3FS_PRN_WARN("thread failed - rc(%d)", int_retval);
|
2013-07-05 02:28:31 +00:00
|
|
|
}
|
|
|
|
}
|
2017-03-29 07:13:05 +00:00
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
|
2017-03-29 07:13:05 +00:00
|
|
|
return success ? 0 : -EIO;
|
2013-07-05 02:28:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int S3fsMultiCurl::MultiRead(void)
|
|
|
|
{
|
2017-03-29 07:13:05 +00:00
|
|
|
for(s3fscurlmap_t::iterator iter = cMap_req.begin(); iter != cMap_req.end(); cMap_req.erase(iter++)) {
|
|
|
|
S3fsCurl* s3fscurl = (*iter).second;
|
2013-07-05 02:28:31 +00:00
|
|
|
|
2017-03-29 07:13:05 +00:00
|
|
|
bool isRetry = false;
|
2013-07-05 02:28:31 +00:00
|
|
|
|
2017-03-29 07:13:05 +00:00
|
|
|
long responseCode = -1;
|
|
|
|
if(s3fscurl->GetResponseCode(responseCode)){
|
|
|
|
if(400 > responseCode){
|
|
|
|
// add into stat cache
|
|
|
|
if(SuccessCallback && !SuccessCallback(s3fscurl)){
|
|
|
|
S3FS_PRN_WARN("error from callback function(%s).", s3fscurl->url.c_str());
|
2013-07-05 02:28:31 +00:00
|
|
|
}
|
2017-03-29 07:13:05 +00:00
|
|
|
}else if(400 == responseCode){
|
|
|
|
// as possibly in multipart
|
|
|
|
S3FS_PRN_WARN("failed a request(%ld: %s)", responseCode, s3fscurl->url.c_str());
|
|
|
|
isRetry = true;
|
|
|
|
}else if(404 == responseCode){
|
|
|
|
// not found
|
2018-05-22 13:26:24 +00:00
|
|
|
// HEAD requests on readdir_multi_head can return 404
|
|
|
|
if(s3fscurl->GetOp() != "HEAD"){
|
|
|
|
S3FS_PRN_WARN("failed a request(%ld: %s)", responseCode, s3fscurl->url.c_str());
|
|
|
|
}
|
2017-03-29 07:13:05 +00:00
|
|
|
}else if(500 == responseCode){
|
|
|
|
// case of all other result, do retry.(11/13/2013)
|
|
|
|
// because it was found that s3fs got 500 error from S3, but could success
|
|
|
|
// to retry it.
|
|
|
|
S3FS_PRN_WARN("failed a request(%ld: %s)", responseCode, s3fscurl->url.c_str());
|
|
|
|
isRetry = true;
|
2013-11-11 13:45:35 +00:00
|
|
|
}else{
|
2017-03-29 07:13:05 +00:00
|
|
|
// Retry in other case.
|
|
|
|
S3FS_PRN_WARN("failed a request(%ld: %s)", responseCode, s3fscurl->url.c_str());
|
2013-11-11 13:45:35 +00:00
|
|
|
isRetry = true;
|
|
|
|
}
|
2017-03-29 07:13:05 +00:00
|
|
|
}else{
|
|
|
|
S3FS_PRN_ERR("failed a request(Unknown response code: %s)", s3fscurl->url.c_str());
|
|
|
|
}
|
2013-11-11 13:45:35 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
|
2017-03-29 07:13:05 +00:00
|
|
|
if(!isRetry){
|
|
|
|
s3fscurl->DestroyCurlHandle();
|
|
|
|
delete s3fscurl;
|
2013-09-14 21:50:39 +00:00
|
|
|
|
2017-03-29 07:13:05 +00:00
|
|
|
}else{
|
|
|
|
S3fsCurl* retrycurl = NULL;
|
|
|
|
|
|
|
|
// For retry
|
|
|
|
if(RetryCallback){
|
|
|
|
retrycurl = RetryCallback(s3fscurl);
|
|
|
|
if(NULL != retrycurl){
|
|
|
|
cMap_all[retrycurl->hCurl] = retrycurl;
|
|
|
|
}else{
|
|
|
|
// Could not set up callback.
|
|
|
|
return -EIO;
|
2013-09-14 21:50:39 +00:00
|
|
|
}
|
|
|
|
}
|
2017-03-29 07:13:05 +00:00
|
|
|
if(s3fscurl != retrycurl){
|
|
|
|
s3fscurl->DestroyCurlHandle();
|
|
|
|
delete s3fscurl;
|
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
}
|
2013-03-30 13:37:14 +00:00
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2013-03-30 13:37:14 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
int S3fsMultiCurl::Request(void)
|
|
|
|
{
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_INFO3("[count=%zu]", cMap_all.size());
|
2013-07-05 02:28:31 +00:00
|
|
|
|
|
|
|
// Make request list.
|
|
|
|
//
|
|
|
|
// Send multi request loop( with retry )
|
|
|
|
// (When many request is sends, sometimes gets "Couldn't connect to server")
|
|
|
|
//
|
2015-08-12 15:04:16 +00:00
|
|
|
while(!cMap_all.empty()){
|
2013-07-05 02:28:31 +00:00
|
|
|
// set curl handle to multi handle
|
2018-02-28 12:06:06 +00:00
|
|
|
int result;
|
2013-07-05 02:28:31 +00:00
|
|
|
int cnt;
|
|
|
|
s3fscurlmap_t::iterator iter;
|
|
|
|
for(cnt = 0, iter = cMap_all.begin(); cnt < S3fsMultiCurl::max_multireq && iter != cMap_all.end(); cMap_all.erase(iter++), cnt++){
|
|
|
|
CURL* hCurl = (*iter).first;
|
|
|
|
S3fsCurl* s3fscurl = (*iter).second;
|
|
|
|
|
|
|
|
cMap_req[hCurl] = s3fscurl;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Send multi request.
|
|
|
|
if(0 != (result = MultiPerform())){
|
|
|
|
Clear();
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read the result
|
|
|
|
if(0 != (result = MultiRead())){
|
|
|
|
Clear();
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2013-09-14 21:50:39 +00:00
|
|
|
// Cleanup curl handle in multi handle
|
|
|
|
ClearEx(false);
|
2013-03-30 13:37:14 +00:00
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2013-03-30 13:37:14 +00:00
|
|
|
|
2017-03-29 07:13:05 +00:00
|
|
|
// thread function for performing an S3fsCurl request
|
|
|
|
void* S3fsMultiCurl::RequestPerformWrapper(void* arg) {
|
|
|
|
return (void*)(intptr_t)(static_cast<S3fsCurl*>(arg)->RequestPerform());
|
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
//-------------------------------------------------------------------
|
|
|
|
// Utility functions
|
|
|
|
//-------------------------------------------------------------------
|
|
|
|
//
|
|
|
|
// curl_slist_sort_insert
|
|
|
|
// This function is like curl_slist_append function, but this adds data by a-sorting.
|
|
|
|
// Because AWS signature needs sorted header.
|
|
|
|
//
|
|
|
|
struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* data)
|
2015-01-28 17:13:11 +00:00
|
|
|
{
|
|
|
|
if(!data){
|
|
|
|
return list;
|
|
|
|
}
|
|
|
|
string strkey = data;
|
|
|
|
string strval = "";
|
|
|
|
|
|
|
|
string::size_type pos = strkey.find(':', 0);
|
|
|
|
if(string::npos != pos){
|
|
|
|
strval = strkey.substr(pos + 1);
|
|
|
|
strkey = strkey.substr(0, pos);
|
|
|
|
}
|
|
|
|
|
|
|
|
return curl_slist_sort_insert(list, strkey.c_str(), strval.c_str());
|
|
|
|
}
|
|
|
|
|
|
|
|
struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* key, const char* value)
|
2013-07-05 02:28:31 +00:00
|
|
|
{
|
|
|
|
struct curl_slist* curpos;
|
|
|
|
struct curl_slist* lastpos;
|
|
|
|
struct curl_slist* new_item;
|
|
|
|
|
2015-01-28 17:13:11 +00:00
|
|
|
if(!key){
|
2013-07-05 02:28:31 +00:00
|
|
|
return list;
|
|
|
|
}
|
2018-02-25 08:51:19 +00:00
|
|
|
if(NULL == (new_item = reinterpret_cast<struct curl_slist*>(malloc(sizeof(struct curl_slist))))){
|
2013-07-05 02:28:31 +00:00
|
|
|
return list;
|
|
|
|
}
|
2015-01-28 17:13:11 +00:00
|
|
|
|
2014-04-05 05:11:55 +00:00
|
|
|
// key & value are trimmed and lower (only key)
|
2015-01-28 17:13:11 +00:00
|
|
|
string strkey = trim(string(key));
|
|
|
|
string strval = trim(string(value ? value : ""));
|
|
|
|
string strnew = key + string(": ") + strval;
|
|
|
|
if(NULL == (new_item->data = strdup(strnew.c_str()))){
|
2013-07-05 02:28:31 +00:00
|
|
|
free(new_item);
|
|
|
|
return list;
|
|
|
|
}
|
|
|
|
new_item->next = NULL;
|
|
|
|
|
2015-01-28 17:13:11 +00:00
|
|
|
for(lastpos = NULL, curpos = list; curpos; lastpos = curpos, curpos = curpos->next){
|
2013-11-11 13:45:35 +00:00
|
|
|
string strcur = curpos->data;
|
2015-01-28 17:13:11 +00:00
|
|
|
size_t pos;
|
2013-11-11 13:45:35 +00:00
|
|
|
if(string::npos != (pos = strcur.find(':', 0))){
|
|
|
|
strcur = strcur.substr(0, pos);
|
|
|
|
}
|
|
|
|
|
2015-01-28 17:13:11 +00:00
|
|
|
int result = strcasecmp(strkey.c_str(), strcur.c_str());
|
2013-07-05 02:28:31 +00:00
|
|
|
if(0 == result){
|
|
|
|
// same data, so replace it.
|
|
|
|
if(lastpos){
|
|
|
|
lastpos->next = new_item;
|
|
|
|
}else{
|
|
|
|
list = new_item;
|
|
|
|
}
|
|
|
|
new_item->next = curpos->next;
|
2013-07-05 05:41:46 +00:00
|
|
|
free(curpos->data);
|
2013-07-05 02:28:31 +00:00
|
|
|
free(curpos);
|
|
|
|
break;
|
|
|
|
|
|
|
|
}else if(0 > result){
|
|
|
|
// add data before curpos.
|
|
|
|
if(lastpos){
|
|
|
|
lastpos->next = new_item;
|
|
|
|
}else{
|
|
|
|
list = new_item;
|
|
|
|
}
|
|
|
|
new_item->next = curpos;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2015-01-28 17:13:11 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
if(!curpos){
|
|
|
|
// append to last pos
|
|
|
|
if(lastpos){
|
|
|
|
lastpos->next = new_item;
|
|
|
|
}else{
|
|
|
|
// a case of list is null
|
|
|
|
list = new_item;
|
|
|
|
}
|
|
|
|
}
|
2015-01-28 17:13:11 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
return list;
|
|
|
|
}
|
|
|
|
|
2015-01-28 17:13:11 +00:00
|
|
|
string get_sorted_header_keys(const struct curl_slist* list)
|
|
|
|
{
|
|
|
|
string sorted_headers;
|
|
|
|
|
|
|
|
if(!list){
|
|
|
|
return sorted_headers;
|
|
|
|
}
|
|
|
|
|
|
|
|
for( ; list; list = list->next){
|
|
|
|
string strkey = list->data;
|
|
|
|
size_t pos;
|
|
|
|
if(string::npos != (pos = strkey.find(':', 0))){
|
2017-04-04 12:24:20 +00:00
|
|
|
if (trim(strkey.substr(pos + 1)).empty()) {
|
|
|
|
// skip empty-value headers (as they are discarded by libcurl)
|
|
|
|
continue;
|
|
|
|
}
|
2015-01-28 17:13:11 +00:00
|
|
|
strkey = strkey.substr(0, pos);
|
|
|
|
}
|
|
|
|
if(0 < sorted_headers.length()){
|
|
|
|
sorted_headers += ";";
|
|
|
|
}
|
|
|
|
sorted_headers += lower(strkey);
|
|
|
|
}
|
|
|
|
|
|
|
|
return sorted_headers;
|
|
|
|
}
|
|
|
|
|
2017-10-26 14:21:48 +00:00
|
|
|
string get_header_value(const struct curl_slist* list, const string &key)
|
|
|
|
{
|
|
|
|
if(!list){
|
|
|
|
return "";
|
|
|
|
}
|
|
|
|
|
|
|
|
for( ; list; list = list->next){
|
|
|
|
string strkey = list->data;
|
|
|
|
size_t pos;
|
|
|
|
if(string::npos != (pos = strkey.find(':', 0))){
|
|
|
|
if(0 == strcasecmp(trim(strkey.substr(0, pos)).c_str(), key.c_str())){
|
|
|
|
return trim(strkey.substr(pos+1));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return "";
|
|
|
|
}
|
|
|
|
|
2015-01-28 17:13:11 +00:00
|
|
|
string get_canonical_headers(const struct curl_slist* list)
|
|
|
|
{
|
|
|
|
string canonical_headers;
|
|
|
|
|
|
|
|
if(!list){
|
|
|
|
canonical_headers = "\n";
|
|
|
|
return canonical_headers;
|
|
|
|
}
|
|
|
|
|
|
|
|
for( ; list; list = list->next){
|
|
|
|
string strhead = list->data;
|
|
|
|
size_t pos;
|
|
|
|
if(string::npos != (pos = strhead.find(':', 0))){
|
|
|
|
string strkey = trim(lower(strhead.substr(0, pos)));
|
|
|
|
string strval = trim(strhead.substr(pos + 1));
|
2017-04-04 12:24:20 +00:00
|
|
|
if (strval.empty()) {
|
|
|
|
// skip empty-value headers (as they are discarded by libcurl)
|
|
|
|
continue;
|
|
|
|
}
|
2015-01-28 17:13:11 +00:00
|
|
|
strhead = strkey + string(":") + strval;
|
|
|
|
}else{
|
|
|
|
strhead = trim(lower(strhead));
|
|
|
|
}
|
|
|
|
canonical_headers += strhead;
|
|
|
|
canonical_headers += "\n";
|
|
|
|
}
|
|
|
|
return canonical_headers;
|
|
|
|
}
|
|
|
|
|
|
|
|
string get_canonical_headers(const struct curl_slist* list, bool only_amz)
|
|
|
|
{
|
|
|
|
string canonical_headers;
|
|
|
|
|
|
|
|
if(!list){
|
|
|
|
canonical_headers = "\n";
|
|
|
|
return canonical_headers;
|
|
|
|
}
|
|
|
|
|
|
|
|
for( ; list; list = list->next){
|
|
|
|
string strhead = list->data;
|
|
|
|
size_t pos;
|
|
|
|
if(string::npos != (pos = strhead.find(':', 0))){
|
|
|
|
string strkey = trim(lower(strhead.substr(0, pos)));
|
|
|
|
string strval = trim(strhead.substr(pos + 1));
|
2017-04-04 12:24:20 +00:00
|
|
|
if (strval.empty()) {
|
|
|
|
// skip empty-value headers (as they are discarded by libcurl)
|
|
|
|
continue;
|
|
|
|
}
|
2015-01-28 17:13:11 +00:00
|
|
|
strhead = strkey + string(":") + strval;
|
|
|
|
}else{
|
|
|
|
strhead = trim(lower(strhead));
|
|
|
|
}
|
|
|
|
if(only_amz && strhead.substr(0, 5) != "x-amz"){
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
canonical_headers += strhead;
|
|
|
|
canonical_headers += "\n";
|
|
|
|
}
|
|
|
|
return canonical_headers;
|
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
// function for using global values
|
|
|
|
bool MakeUrlResource(const char* realpath, string& resourcepath, string& url)
|
|
|
|
{
|
|
|
|
if(!realpath){
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
resourcepath = urlEncode(service_path + bucket + realpath);
|
|
|
|
url = host + resourcepath;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-12-06 23:51:36 +00:00
|
|
|
string prepare_url(const char* url)
|
|
|
|
{
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_INFO3("URL is %s", url);
|
2014-12-06 23:51:36 +00:00
|
|
|
|
|
|
|
string uri;
|
|
|
|
string host;
|
|
|
|
string path;
|
2017-11-18 18:10:29 +00:00
|
|
|
string url_str = string(url);
|
|
|
|
string token = string("/") + bucket;
|
2014-12-06 23:51:36 +00:00
|
|
|
int bucket_pos = url_str.find(token);
|
|
|
|
int bucket_length = token.size();
|
|
|
|
int uri_length = 0;
|
|
|
|
|
|
|
|
if(!strncasecmp(url_str.c_str(), "https://", 8)){
|
|
|
|
uri_length = 8;
|
|
|
|
} else if(!strncasecmp(url_str.c_str(), "http://", 7)) {
|
|
|
|
uri_length = 7;
|
|
|
|
}
|
|
|
|
uri = url_str.substr(0, uri_length);
|
|
|
|
|
|
|
|
if(!pathrequeststyle){
|
|
|
|
host = bucket + "." + url_str.substr(uri_length, bucket_pos - uri_length).c_str();
|
|
|
|
path = url_str.substr((bucket_pos + bucket_length));
|
|
|
|
}else{
|
|
|
|
host = url_str.substr(uri_length, bucket_pos - uri_length).c_str();
|
|
|
|
string part = url_str.substr((bucket_pos + bucket_length));
|
|
|
|
if('/' != part[0]){
|
|
|
|
part = "/" + part;
|
|
|
|
}
|
|
|
|
path = "/" + bucket + part;
|
|
|
|
}
|
|
|
|
|
|
|
|
url_str = uri + host + path;
|
|
|
|
|
2015-09-30 19:41:27 +00:00
|
|
|
S3FS_PRN_INFO3("URL changed is %s", url_str.c_str());
|
2014-12-06 23:51:36 +00:00
|
|
|
|
2017-11-18 18:10:29 +00:00
|
|
|
return url_str;
|
2014-12-06 23:51:36 +00:00
|
|
|
}
|
|
|
|
|
2014-09-07 15:08:27 +00:00
|
|
|
/*
|
|
|
|
* Local variables:
|
|
|
|
* tab-width: 4
|
|
|
|
* c-basic-offset: 4
|
|
|
|
* End:
|
|
|
|
* vim600: noet sw=4 ts=4 fdm=marker
|
|
|
|
* vim<600: noet sw=4 ts=4
|
|
|
|
*/
|