2011-03-01 19:35:55 +00:00
|
|
|
/*
|
|
|
|
* s3fs - FUSE-based file system backed by Amazon S3
|
|
|
|
*
|
|
|
|
* Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version 2
|
|
|
|
* of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <time.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
#include <fcntl.h>
|
|
|
|
#include <dirent.h>
|
|
|
|
#include <errno.h>
|
|
|
|
#include <syslog.h>
|
|
|
|
#include <pthread.h>
|
|
|
|
#include <curl/curl.h>
|
2011-08-31 20:36:40 +00:00
|
|
|
#include <openssl/bio.h>
|
|
|
|
#include <openssl/buffer.h>
|
|
|
|
#include <openssl/evp.h>
|
|
|
|
#include <openssl/hmac.h>
|
|
|
|
#include <openssl/md5.h>
|
2013-07-05 02:28:31 +00:00
|
|
|
#include <libxml/xpath.h>
|
|
|
|
#include <libxml/xpathInternals.h>
|
|
|
|
#include <libxml/tree.h>
|
2011-03-01 19:35:55 +00:00
|
|
|
#include <iostream>
|
|
|
|
#include <fstream>
|
|
|
|
#include <string>
|
|
|
|
#include <map>
|
Summary of Changes(1.63 -> 1.64)
* This new version was made for fixing big issue about directory object.
Please be careful and review new s3fs.
==========================
List of Changes
==========================
1) Fixed bugs
Fixed some memory leak and un-freed curl handle.
Fixed codes with a bug which is not found yet.
Fixed a bug that the s3fs could not update object's mtime when the s3fs had a opened file descriptor.
Please let us know a bug, when you find new bug of a memory leak.
2) Changed codes
Changed codes of s3fs_readdir() and list_bucket() etc.
Changed codes so that the get_realpath() function returned std::string.
Changed codes about exit() function. Because the exit() function is called from many fuse callback function directly, these function called fuse_exit() function and retuned with error.
Changed codes so that the case of the characters for the "x-amz-meta" response header is ignored.
3) Added a option
Added the norenameapi option for the storage compatible with S3 without copy API.
This option is subset of nocopyapi option.
Please read man page or call with --help option.
4) Object for directory
This is very big and important change.
The object of directory is changed "dir/" instead of "dir" for being compatible with other S3 client applications.
And this version understands the object of directory which is made by old version.
If the new s3fs changes the attributes or owner/group or mtime of the directory object, the s3fs automatically changes the object from old object name("dir") to new("dir/").
If you need to change old object name("dir") to new("dir/") manually, you can use shell script(mergedir.sh) in test directory.
* About the directory object name
AWS S3 allows the object name as both "dir" and "dir/".
The s3fs before this version understood only "dir" as directory object name, but old version did not understand the "dir/" object name.
The new version understands both of "dir" and "dir/" object name.
The s3fs user needs to be care for the special situation that I mentioned later.
The new version deletes old "dir" object and makes new "dir/" object, when the user operates the directory object for changing the permission or owner/group or mtime.
This operation does on background and automatically.
If you need to merge manually, you can use shell script which is mergedir.sh in test directory.
This script runs chmod/chown/touch commands after finding a directory.
Other S3 client application makes a directory object("dir/") without meta information which is needed to understand by the s3fs, this script can add meta information for a directory object.
If this script function is insufficient for you, you can read and modify the codes by yourself.
Please use the shell script carefully because of changing the object.
If you find a bug in this script, please let me know.
* Details
** The directory object made by old version
The directory object made by old version is not understood by other S3 client application.
New s3fs version was updated for keeping compatibility with other clients.
You can use the mergedir.sh in test directory for merging from old directory object("dir") to new("dir/").
The directory object name is changed from "dir" to "dir/" after the mergedir.sh is run, this changed "dir/" object is understood by other S3 clients.
This script runs chmod/chown/chgrp/touch/etc commands against the old directory object("dir"), then new s3fs merges that directory automatically.
If you need to change directory object from old to new manually, you can do it by running these commands which change the directory attributes(mode/owner/group/mtime).
** The directory object made by new version
The directory object name made by new version is "dir/".
Because the name includes "/", other S3 client applications understand it as the directory.
I tested new directory by s3cmd/tntDrive/DragonDisk/Gladinet as other S3 clients, the result was good compatibility.
You need to know that the compatibility has small problem by the difference in specifications between clients.
And you need to be careful about that the old s3fs can not understand the directory object which made by new s3fs.
You should change all s3fs which accesses same bucket.
** The directory object made by other S3 client application
Because the object is determined as a directory by the s3fs, the s3fs makes and uses special meta information which is "x-amz-meta-***" and "Content-Type" as HTTP header.
The s3fs sets and uses HTTP headers for the directory object, those headers are listed below.
Content-Type: application/x-directory
x-amz-meta-mode: <mode>
x-amz-meta-uid: <UID>
x-amz-meta-gid <GID>
x-amz-meta-mtime: <unix time of modified file>
Other S3 client application builds the directory object without attributes which is needed by the s3fs.
When the "ls" command is run on the s3fs-fuse file system which has directories/files made by other S3 clients, this result is shown below.
d--------- 1 root root 0 Feb 27 11:21 dir
---------- 1 root root 1024 Mar 14 02:15 file
Because the objects don't have meta information("x-amz-meta-mode"), it means mode=0000.
In this case, the directory object is shown only "d", because the s3fs determines the object as a directory when the object is the name with "/" or has "Content-type: application/x-directory" header.
(The s3fs sets "Content-Type: application/x-directory" to the directory object, but other S3 clients set "binary/octet-stream".)
In that result, nobody without root is allowed to operate the object.
The owner and group are "root"(UID=0) because the object doesn't have "x-amz-meta-uid/gid".
If the object doesn't have "x-amz-meta-mtime", the s3fs uses "Last-Modified" HTTP header.
Therefore the object's mtime is "Last-Modified" value.(This logic is same as old version)
It has been already explained, if you need to change the object attributes, you can do it by manually operation or mergedir.sh.
* Example of the compatibility with s3cmd etc
** Case A) Only "dir/file" object
One of case, there is only "dir/file" object without "dir/" object, that object is made by s3cmd or etc.
In this case, the response of REST API(list bucket) with "delimiter=/" parameter has "CommonPrefixes", and the "dir/" is listed in "CommonPrefixes/Prefix", but the "dir/" object is not real object.
The s3fs needs to determine this object as directory, however there is no real directory object("dir" or "dir/").
But both new s3fs and old one does NOT understand this "dir/" in "CommonPrefixes", because the s3fs fails to get meta information from "dir" or "dir/".
On this case, the result of "ls" command is shown below.
??????????? ? ? ? ? ? dir
This "dir" is not operated by anyone and any process, because the s3fs does not understand this object permission.
And "dir/file" object can not be shown and operated too.
Some other S3 clients(tntDrive/Gladinet/etc) can not understand this object as same as the s3fs.
If you need to operate "dir/file" object, you need to make the "dir/" object as a directory.
To make the "dir/" directory object, you need to do below.
Because there is already the "dir" object which is not real object, you can not make "dir/" directory.
(s3cmd does not make "dir/" object because the object name has "/".).
You should make another name directory(ex: "dir2/"), and move the "dir/file" objects to in new directory.
Last, you can rename the directory name from "dir2/" to "dir/".
** Case B) Both "dir" and "dir/file" object
This case is that there are "dir" and "dir/file" objects which were made by s3cmd/etc.
s3cmd and s3fs understand the "dir" object as normal(file) object because this object does not have meta information and a name with "/".
But the result of REST API(list bucket) has "dir/" name in "CommonPrefixes/Prefix".
The s3fs checks "dir/" and "dir" as a directory, but the "dir" object is not directory object.
(Because the new s3fs need to compatible old version, the s3fs checks a directory object in order of "dir/", "dir")
In this case, the result of "ls" command is shown below.
---------- 1 root root 0 Feb 27 02:48 dir
As a result, the "dir/file" can not be shown and operated because the "dir" object is a file.
If you determine the "dir" as a directory, you need to add mete information to the "dir" object by s3cmd.
** Case C) Both "dir" and "dir/" object
Last case is that there are "dir" and "dir/" objects which were made by other S3 clients.
(example: At first you upload a object "dir/" as a directory by new 3sfs, and you upload a object "dir" by s3cmd.)
New s3fs determines "dir/" as a directory, because the s3fs searches in oder of "dir/", "dir".
As a result, the "dir" object can not be shown and operated.
** Compatibility between S3 clients
Both new and old s3fs do not understand both "dir" and "dir/" at the same time, tntDrive and Galdinet are same as the s3fs.
If there are "dir/" and "dir" objects, the s3fs gives priority to "dir/".
But s3cmd and DragonDisk understand both objects.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@392 df820570-a93a-0410-bd06-b72b767a4274
2013-03-23 14:04:07 +00:00
|
|
|
#include <algorithm>
|
2013-05-27 02:22:47 +00:00
|
|
|
#include <list>
|
2013-07-05 02:28:31 +00:00
|
|
|
#include <vector>
|
2011-03-01 19:35:55 +00:00
|
|
|
|
2013-03-30 13:37:14 +00:00
|
|
|
#include "common.h"
|
2011-03-01 19:35:55 +00:00
|
|
|
#include "curl.h"
|
2011-08-31 22:20:20 +00:00
|
|
|
#include "string_util.h"
|
2013-04-11 01:49:00 +00:00
|
|
|
#include "s3fs.h"
|
2013-05-27 02:22:47 +00:00
|
|
|
#include "s3fs_util.h"
|
2011-03-01 19:35:55 +00:00
|
|
|
|
|
|
|
using namespace std;
|
|
|
|
|
2013-04-11 01:49:00 +00:00
|
|
|
//-------------------------------------------------------------------
|
|
|
|
// Class BodyData
|
|
|
|
//-------------------------------------------------------------------
|
|
|
|
#define BODYDATA_RESIZE_APPEND_MIN (1 * 1024) // 1KB
|
|
|
|
#define BODYDATA_RESIZE_APPEND_MID (1 * 1024 * 1024) // 1MB
|
|
|
|
#define BODYDATA_RESIZE_APPEND_MAX (10 * 1024 * 1024) // 10MB
|
|
|
|
|
|
|
|
bool BodyData::Resize(size_t addbytes)
|
|
|
|
{
|
|
|
|
if(IsSafeSize(addbytes)){
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
// New size
|
|
|
|
size_t need_size = (lastpos + addbytes + 1) - bufsize;
|
|
|
|
if(BODYDATA_RESIZE_APPEND_MAX < bufsize){
|
|
|
|
need_size = (BODYDATA_RESIZE_APPEND_MAX < need_size ? need_size : BODYDATA_RESIZE_APPEND_MAX);
|
|
|
|
}else if(BODYDATA_RESIZE_APPEND_MID < bufsize){
|
|
|
|
need_size = (BODYDATA_RESIZE_APPEND_MID < need_size ? need_size : BODYDATA_RESIZE_APPEND_MID);
|
|
|
|
}else if(BODYDATA_RESIZE_APPEND_MIN < bufsize){
|
|
|
|
need_size = ((bufsize * 2) < need_size ? need_size : (bufsize * 2));
|
|
|
|
}else{
|
|
|
|
need_size = (BODYDATA_RESIZE_APPEND_MIN < need_size ? need_size : BODYDATA_RESIZE_APPEND_MIN);
|
|
|
|
}
|
|
|
|
// realloc
|
|
|
|
if(NULL == (text = (char*)realloc(text, (bufsize + need_size)))){
|
|
|
|
FGPRINT("BodyData::Resize() not enough memory (realloc returned NULL)\n");
|
|
|
|
SYSLOGDBGERR("not enough memory (realloc returned NULL)\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
bufsize += need_size;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void BodyData::Clear(void)
|
|
|
|
{
|
|
|
|
if(text){
|
|
|
|
free(text);
|
|
|
|
text = NULL;
|
|
|
|
}
|
|
|
|
lastpos = 0;
|
|
|
|
bufsize = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool BodyData::Append(void* ptr, size_t bytes)
|
|
|
|
{
|
|
|
|
if(!ptr){
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if(0 == bytes){
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
if(!Resize(bytes)){
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
memcpy(&text[lastpos], ptr, bytes);
|
|
|
|
lastpos += bytes;
|
|
|
|
text[lastpos] = '\0';
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
const char* BodyData::str(void) const
|
|
|
|
{
|
|
|
|
static const char* strnull = "";
|
|
|
|
if(!text){
|
|
|
|
return strnull;
|
|
|
|
}
|
|
|
|
return text;
|
|
|
|
}
|
|
|
|
|
2013-03-30 13:37:14 +00:00
|
|
|
//-------------------------------------------------------------------
|
2013-07-05 02:28:31 +00:00
|
|
|
// Class S3fsCurl
|
|
|
|
//-------------------------------------------------------------------
|
|
|
|
#define MULTIPART_SIZE 10485760 // 10MB
|
|
|
|
#define MAX_MULTI_COPY_SOURCE_SIZE 524288000 // 500MB
|
|
|
|
|
|
|
|
pthread_mutex_t S3fsCurl::curl_handles_lock;
|
|
|
|
pthread_mutex_t S3fsCurl::curl_share_lock;
|
|
|
|
bool S3fsCurl::is_initglobal_done = false;
|
|
|
|
CURLSH* S3fsCurl::hCurlShare = NULL;
|
|
|
|
bool S3fsCurl::is_dns_cache = true; // default
|
|
|
|
long S3fsCurl::connect_timeout = 10; // default
|
|
|
|
time_t S3fsCurl::readwrite_timeout = 30; // default
|
|
|
|
int S3fsCurl::retries = 3; // default
|
|
|
|
bool S3fsCurl::is_public_bucket = false;
|
|
|
|
string S3fsCurl::default_acl = "private";
|
|
|
|
bool S3fsCurl::is_use_rrs = false;
|
|
|
|
bool S3fsCurl::is_use_sse = false;
|
|
|
|
bool S3fsCurl::is_content_md5 = false;
|
|
|
|
string S3fsCurl::AWSAccessKeyId;
|
|
|
|
string S3fsCurl::AWSSecretAccessKey;
|
|
|
|
long S3fsCurl::ssl_verify_hostname = 1; // default(original code...)
|
|
|
|
const EVP_MD* S3fsCurl::evp_md = EVP_sha1();
|
|
|
|
curltime_t S3fsCurl::curl_times;
|
|
|
|
curlprogress_t S3fsCurl::curl_progress;
|
|
|
|
string S3fsCurl::curl_ca_bundle;
|
|
|
|
mimes_t S3fsCurl::mimeTypes;
|
|
|
|
|
|
|
|
//-------------------------------------------------------------------
|
|
|
|
// Class methods for S3fsCurl
|
2013-03-30 13:37:14 +00:00
|
|
|
//-------------------------------------------------------------------
|
2013-07-05 02:28:31 +00:00
|
|
|
bool S3fsCurl::InitS3fsCurl(const char* MimeFile, bool reinit)
|
2013-03-30 13:37:14 +00:00
|
|
|
{
|
2013-07-05 02:28:31 +00:00
|
|
|
if(!reinit){
|
|
|
|
if(0 != pthread_mutex_init(&S3fsCurl::curl_handles_lock, NULL)){
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if(0 != pthread_mutex_init(&S3fsCurl::curl_share_lock, NULL)){
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if(!S3fsCurl::InitMimeType(MimeFile)){
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if(!S3fsCurl::InitGlobalCurl()){
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if(!S3fsCurl::InitShareCurl()){
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
2013-03-30 13:37:14 +00:00
|
|
|
}
|
2011-08-31 22:20:20 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
bool S3fsCurl::DestroyS3fsCurl(bool reinit)
|
2013-03-30 13:37:14 +00:00
|
|
|
{
|
2013-07-05 02:28:31 +00:00
|
|
|
bool result = true;
|
|
|
|
|
|
|
|
if(!S3fsCurl::DestroyShareCurl()){
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if(!S3fsCurl::DestroyGlobalCurl()){
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if(!reinit){
|
|
|
|
if(0 != pthread_mutex_destroy(&S3fsCurl::curl_share_lock)){
|
|
|
|
result = false;
|
|
|
|
}
|
|
|
|
if(0 != pthread_mutex_destroy(&S3fsCurl::curl_handles_lock)){
|
|
|
|
result = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return result;
|
2013-03-30 13:37:14 +00:00
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
bool S3fsCurl::InitGlobalCurl(void)
|
2013-06-01 15:31:31 +00:00
|
|
|
{
|
2013-07-05 02:28:31 +00:00
|
|
|
if(S3fsCurl::is_initglobal_done){
|
|
|
|
return false;
|
|
|
|
}
|
2013-06-01 15:31:31 +00:00
|
|
|
if(CURLE_OK != curl_global_init(CURL_GLOBAL_ALL)){
|
|
|
|
FGPRINT("init_curl_global_all returns error.\n");
|
|
|
|
SYSLOGERR("init_curl_global_all returns error.");
|
|
|
|
return false;
|
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
S3fsCurl::is_initglobal_done = true;
|
2013-06-01 15:31:31 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
bool S3fsCurl::DestroyGlobalCurl(void)
|
2013-06-01 15:31:31 +00:00
|
|
|
{
|
2013-07-05 02:28:31 +00:00
|
|
|
if(!S3fsCurl::is_initglobal_done){
|
|
|
|
return false;
|
|
|
|
}
|
2013-06-01 15:31:31 +00:00
|
|
|
curl_global_cleanup();
|
2013-07-05 02:28:31 +00:00
|
|
|
S3fsCurl::is_initglobal_done = false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool S3fsCurl::InitShareCurl(void)
|
|
|
|
{
|
|
|
|
CURLSHcode nSHCode;
|
|
|
|
|
|
|
|
if(!S3fsCurl::is_dns_cache){
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if(!S3fsCurl::is_initglobal_done){
|
|
|
|
FGPRINT("S3fsCurl::InitShareCurl : Dose not initialize global curl.\n");
|
|
|
|
SYSLOGERR("S3fsCurl::InitShareCurl : Dose not initialize global curl.");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if(S3fsCurl::hCurlShare){
|
|
|
|
FGPRINT("S3fsCurl::InitShareCurl : already initiated.\n");
|
|
|
|
SYSLOGERR("S3fsCurl::InitShareCurl : already initiated.");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if(NULL == (S3fsCurl::hCurlShare = curl_share_init())){
|
|
|
|
FGPRINT("S3fsCurl::InitShareCurl : curl_share_init failed\n");
|
|
|
|
SYSLOGERR("S3fsCurl::InitShareCurl : curl_share_init failed");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if(CURLSHE_OK != (nSHCode = curl_share_setopt(S3fsCurl::hCurlShare, CURLSHOPT_LOCKFUNC, S3fsCurl::LockCurlShare))){
|
|
|
|
FGPRINT("S3fsCurl::InitShareCurl : curl_share_setopt(LOCKFUNC) returns %d(%s)\n", nSHCode, curl_share_strerror(nSHCode));
|
|
|
|
SYSLOGERR("S3fsCurl::InitShareCurl : %d(%s)", nSHCode, curl_share_strerror(nSHCode));
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if(CURLSHE_OK != (nSHCode = curl_share_setopt(S3fsCurl::hCurlShare, CURLSHOPT_UNLOCKFUNC, S3fsCurl::UnlockCurlShare))){
|
|
|
|
FGPRINT("S3fsCurl::InitShareCurl : curl_share_setopt(UNLOCKFUNC) returns %d(%s)\n", nSHCode, curl_share_strerror(nSHCode));
|
|
|
|
SYSLOGERR("S3fsCurl::InitShareCurl : %d(%s)", nSHCode, curl_share_strerror(nSHCode));
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if(CURLSHE_OK != (nSHCode = curl_share_setopt(S3fsCurl::hCurlShare, CURLSHOPT_SHARE, CURL_LOCK_DATA_DNS))){
|
|
|
|
FGPRINT("S3fsCurl::InitShareCurl : curl_share_setopt(DNS) returns %d(%s)\n", nSHCode, curl_share_strerror(nSHCode));
|
|
|
|
SYSLOGERR("S3fsCurl::InitShareCurl : %d(%s)", nSHCode, curl_share_strerror(nSHCode));
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if(CURLSHE_OK != (nSHCode = curl_share_setopt(S3fsCurl::hCurlShare, CURLSHOPT_USERDATA, (void*)&S3fsCurl::curl_share_lock))){
|
|
|
|
FGPRINT("S3fsCurl::InitShareCurl : curl_share_setopt(USERDATA) returns %d(%s)\n", nSHCode, curl_share_strerror(nSHCode));
|
|
|
|
SYSLOGERR("S3fsCurl::InitShareCurl : %d(%s)", nSHCode, curl_share_strerror(nSHCode));
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool S3fsCurl::DestroyShareCurl(void)
|
|
|
|
{
|
|
|
|
if(!S3fsCurl::is_initglobal_done){
|
|
|
|
FGPRINT("S3fsCurl::DestroyShareCurl : already destroy global curl.\n");
|
|
|
|
SYSLOGERR("S3fsCurl::DestroyShareCurl : already destroy global curl.");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if(!S3fsCurl::hCurlShare){
|
|
|
|
if(S3fsCurl::is_dns_cache){
|
|
|
|
FGPRINT("S3fsCurl::DestroyShareCurl : already destroy share curl.\n");
|
|
|
|
SYSLOGERR("S3fsCurl::DestroyShareCurl : already destroy share curl.");
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if(CURLSHE_OK != curl_share_cleanup(S3fsCurl::hCurlShare)){
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
S3fsCurl::hCurlShare = NULL;
|
|
|
|
return true;
|
2013-06-01 15:31:31 +00:00
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
void S3fsCurl::LockCurlShare(CURL* handle, curl_lock_data nLockData, curl_lock_access laccess, void* useptr)
|
2013-05-22 08:49:23 +00:00
|
|
|
{
|
2013-06-15 15:29:08 +00:00
|
|
|
if(hCurlShare && useptr && CURL_LOCK_DATA_DNS == nLockData){
|
2013-07-05 02:28:31 +00:00
|
|
|
pthread_mutex_t* lockmutex = static_cast<pthread_mutex_t*>(useptr);
|
2013-06-15 15:29:08 +00:00
|
|
|
pthread_mutex_lock(lockmutex);
|
2013-05-22 08:49:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
void S3fsCurl::UnlockCurlShare(CURL* handle, curl_lock_data nLockData, void* useptr)
|
2013-05-22 08:49:23 +00:00
|
|
|
{
|
2013-06-15 15:29:08 +00:00
|
|
|
if(hCurlShare && useptr && CURL_LOCK_DATA_DNS == nLockData){
|
2013-07-05 02:28:31 +00:00
|
|
|
pthread_mutex_t* lockmutex = static_cast<pthread_mutex_t*>(useptr);
|
2013-06-15 15:29:08 +00:00
|
|
|
pthread_mutex_unlock(lockmutex);
|
2013-05-22 08:49:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
// homegrown timeout mechanism
|
|
|
|
int S3fsCurl::CurlProgress(void *clientp, double dltotal, double dlnow, double ultotal, double ulnow)
|
2013-05-22 08:49:23 +00:00
|
|
|
{
|
2013-07-05 02:28:31 +00:00
|
|
|
CURL* curl = static_cast<CURL*>(clientp);
|
|
|
|
time_t now = time(0);
|
|
|
|
progress_t p(dlnow, ulnow);
|
2013-05-22 08:49:23 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
pthread_mutex_lock(&S3fsCurl::curl_handles_lock);
|
|
|
|
|
|
|
|
// any progress?
|
|
|
|
if(p != S3fsCurl::curl_progress[curl]){
|
|
|
|
// yes!
|
|
|
|
S3fsCurl::curl_times[curl] = now;
|
|
|
|
S3fsCurl::curl_progress[curl] = p;
|
|
|
|
}else{
|
|
|
|
// timeout?
|
|
|
|
if(now - S3fsCurl::curl_times[curl] > readwrite_timeout){
|
|
|
|
pthread_mutex_unlock(&S3fsCurl::curl_handles_lock);
|
|
|
|
SYSLOGERR("timeout now: %li, curl_times[curl]: %lil, readwrite_timeout: %li",
|
|
|
|
(long int)now, S3fsCurl::curl_times[curl], (long int)readwrite_timeout);
|
|
|
|
return CURLE_ABORTED_BY_CALLBACK;
|
|
|
|
}
|
2013-05-22 08:49:23 +00:00
|
|
|
}
|
2013-06-15 15:29:08 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
pthread_mutex_unlock(&S3fsCurl::curl_handles_lock);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool S3fsCurl::InitMimeType(const char* MimeFile)
|
|
|
|
{
|
|
|
|
if(!MimeFile){
|
|
|
|
MimeFile = "/etc/mime.types"; // default
|
2013-05-22 08:49:23 +00:00
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
|
|
|
|
string line;
|
|
|
|
ifstream MT(MimeFile);
|
|
|
|
if(MT.good()){
|
|
|
|
while(getline(MT, line)){
|
|
|
|
if(line[0]=='#'){
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if(line.size() == 0){
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
stringstream tmp(line);
|
|
|
|
string mimeType;
|
|
|
|
tmp >> mimeType;
|
|
|
|
while(tmp){
|
|
|
|
string ext;
|
|
|
|
tmp >> ext;
|
|
|
|
if(ext.size() == 0){
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
S3fsCurl::mimeTypes[ext] = mimeType;
|
|
|
|
}
|
|
|
|
}
|
2013-05-22 08:49:23 +00:00
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// @param s e.g., "index.html"
|
|
|
|
// @return e.g., "text/html"
|
|
|
|
//
|
|
|
|
string S3fsCurl::LookupMimeType(string name)
|
|
|
|
{
|
|
|
|
string result("application/octet-stream");
|
|
|
|
string::size_type last_pos = name.find_last_of('.');
|
|
|
|
string::size_type first_pos = name.find_first_of('.');
|
|
|
|
string prefix, ext, ext2;
|
|
|
|
|
|
|
|
// No dots in name, just return
|
|
|
|
if(last_pos == string::npos){
|
|
|
|
return result;
|
2013-05-22 08:49:23 +00:00
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
// extract the last extension
|
|
|
|
if(last_pos != string::npos){
|
|
|
|
ext = name.substr(1+last_pos, string::npos);
|
2013-06-15 15:29:08 +00:00
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
if (last_pos != string::npos) {
|
|
|
|
// one dot was found, now look for another
|
|
|
|
if (first_pos != string::npos && first_pos < last_pos) {
|
|
|
|
prefix = name.substr(0, last_pos);
|
|
|
|
// Now get the second to last file extension
|
|
|
|
string::size_type next_pos = prefix.find_last_of('.');
|
|
|
|
if (next_pos != string::npos) {
|
|
|
|
ext2 = prefix.substr(1+next_pos, string::npos);
|
|
|
|
}
|
|
|
|
}
|
2013-05-22 08:49:23 +00:00
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
|
|
|
|
// if we get here, then we have an extension (ext)
|
|
|
|
mimes_t::const_iterator iter = S3fsCurl::mimeTypes.find(ext);
|
|
|
|
// if the last extension matches a mimeType, then return
|
|
|
|
// that mime type
|
|
|
|
if (iter != S3fsCurl::mimeTypes.end()) {
|
|
|
|
result = (*iter).second;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
// return with the default result if there isn't a second extension
|
|
|
|
if(first_pos == last_pos){
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Didn't find a mime-type for the first extension
|
|
|
|
// Look for second extension in mimeTypes, return if found
|
|
|
|
iter = S3fsCurl::mimeTypes.find(ext2);
|
|
|
|
if (iter != S3fsCurl::mimeTypes.end()) {
|
|
|
|
result = (*iter).second;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
// neither the last extension nor the second-to-last extension
|
|
|
|
// matched a mimeType, return the default mime type
|
|
|
|
return result;
|
2013-05-22 08:49:23 +00:00
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
bool S3fsCurl::LocateBundle(void)
|
2013-05-22 08:49:23 +00:00
|
|
|
{
|
2013-07-05 02:28:31 +00:00
|
|
|
// See if environment variable CURL_CA_BUNDLE is set
|
|
|
|
// if so, check it, if it is a good path, then set the
|
|
|
|
// curl_ca_bundle variable to it
|
|
|
|
char *CURL_CA_BUNDLE;
|
2013-05-22 08:49:23 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
if(0 == S3fsCurl::curl_ca_bundle.size()){
|
|
|
|
CURL_CA_BUNDLE = getenv("CURL_CA_BUNDLE");
|
|
|
|
if(CURL_CA_BUNDLE != NULL) {
|
|
|
|
// check for existance and readability of the file
|
|
|
|
ifstream BF(CURL_CA_BUNDLE);
|
|
|
|
if(!BF.good()){
|
|
|
|
SYSLOGERR("%s: file specified by CURL_CA_BUNDLE environment variable is not readable", program_name.c_str());
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
BF.close();
|
|
|
|
S3fsCurl::curl_ca_bundle.assign(CURL_CA_BUNDLE);
|
|
|
|
return true;
|
|
|
|
}
|
2013-05-22 08:49:23 +00:00
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
|
|
|
|
// not set via environment variable, look in likely locations
|
|
|
|
|
|
|
|
///////////////////////////////////////////
|
|
|
|
// from curl's (7.21.2) acinclude.m4 file
|
|
|
|
///////////////////////////////////////////
|
|
|
|
// dnl CURL_CHECK_CA_BUNDLE
|
|
|
|
// dnl -------------------------------------------------
|
|
|
|
// dnl Check if a default ca-bundle should be used
|
|
|
|
// dnl
|
|
|
|
// dnl regarding the paths this will scan:
|
|
|
|
// dnl /etc/ssl/certs/ca-certificates.crt Debian systems
|
|
|
|
// dnl /etc/pki/tls/certs/ca-bundle.crt Redhat and Mandriva
|
|
|
|
// dnl /usr/share/ssl/certs/ca-bundle.crt old(er) Redhat
|
|
|
|
// dnl /usr/local/share/certs/ca-root.crt FreeBSD
|
|
|
|
// dnl /etc/ssl/cert.pem OpenBSD
|
|
|
|
// dnl /etc/ssl/certs/ (ca path) SUSE
|
|
|
|
ifstream BF("/etc/pki/tls/certs/ca-bundle.crt");
|
|
|
|
if(BF.good()){
|
|
|
|
BF.close();
|
|
|
|
S3fsCurl::curl_ca_bundle.assign("/etc/pki/tls/certs/ca-bundle.crt");
|
2013-06-15 15:29:08 +00:00
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t S3fsCurl::WriteMemoryCallback(void *ptr, size_t blockSize, size_t numBlocks, void *data)
|
|
|
|
{
|
|
|
|
BodyData* body = (BodyData*)data;
|
|
|
|
|
|
|
|
if(!body->Append(ptr, blockSize, numBlocks)){
|
|
|
|
FGPRINT("WriteMemoryCallback(): BodyData.Append() returned false.\n");
|
|
|
|
S3FS_FUSE_EXIT();
|
|
|
|
return -1;
|
2013-05-22 08:49:23 +00:00
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
return (blockSize * numBlocks);
|
2013-05-22 08:49:23 +00:00
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
size_t S3fsCurl::ReadCallback(void *ptr, size_t size, size_t nmemb, void *userp)
|
2013-05-22 08:49:23 +00:00
|
|
|
{
|
2013-07-05 02:28:31 +00:00
|
|
|
S3fsCurl* pCurl = reinterpret_cast<S3fsCurl*>(userp);
|
|
|
|
|
|
|
|
if(1 > (size * nmemb)){
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if(0 >= pCurl->postdata_remaining){
|
|
|
|
return 0;
|
2013-05-22 08:49:23 +00:00
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
int copysize = std::min((int)(size * nmemb), pCurl->postdata_remaining);
|
|
|
|
memcpy(ptr, pCurl->postdata, copysize);
|
|
|
|
|
|
|
|
pCurl->postdata_remaining = (pCurl->postdata_remaining > copysize ? (pCurl->postdata_remaining - copysize) : 0);
|
|
|
|
pCurl->postdata += static_cast<size_t>(copysize);
|
|
|
|
|
|
|
|
return copysize;
|
2013-05-22 08:49:23 +00:00
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
size_t S3fsCurl::HeaderCallback(void *data, size_t blockSize, size_t numBlocks, void *userPtr)
|
2013-03-30 13:37:14 +00:00
|
|
|
{
|
2011-09-01 19:24:12 +00:00
|
|
|
headers_t* headers = reinterpret_cast<headers_t*>(userPtr);
|
|
|
|
string header(reinterpret_cast<char*>(data), blockSize * numBlocks);
|
|
|
|
string key;
|
|
|
|
stringstream ss(header);
|
2013-03-30 13:37:14 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
if(getline(ss, key, ':')){
|
Summary of Changes(1.63 -> 1.64)
* This new version was made for fixing big issue about directory object.
Please be careful and review new s3fs.
==========================
List of Changes
==========================
1) Fixed bugs
Fixed some memory leak and un-freed curl handle.
Fixed codes with a bug which is not found yet.
Fixed a bug that the s3fs could not update object's mtime when the s3fs had a opened file descriptor.
Please let us know a bug, when you find new bug of a memory leak.
2) Changed codes
Changed codes of s3fs_readdir() and list_bucket() etc.
Changed codes so that the get_realpath() function returned std::string.
Changed codes about exit() function. Because the exit() function is called from many fuse callback function directly, these function called fuse_exit() function and retuned with error.
Changed codes so that the case of the characters for the "x-amz-meta" response header is ignored.
3) Added a option
Added the norenameapi option for the storage compatible with S3 without copy API.
This option is subset of nocopyapi option.
Please read man page or call with --help option.
4) Object for directory
This is very big and important change.
The object of directory is changed "dir/" instead of "dir" for being compatible with other S3 client applications.
And this version understands the object of directory which is made by old version.
If the new s3fs changes the attributes or owner/group or mtime of the directory object, the s3fs automatically changes the object from old object name("dir") to new("dir/").
If you need to change old object name("dir") to new("dir/") manually, you can use shell script(mergedir.sh) in test directory.
* About the directory object name
AWS S3 allows the object name as both "dir" and "dir/".
The s3fs before this version understood only "dir" as directory object name, but old version did not understand the "dir/" object name.
The new version understands both of "dir" and "dir/" object name.
The s3fs user needs to be care for the special situation that I mentioned later.
The new version deletes old "dir" object and makes new "dir/" object, when the user operates the directory object for changing the permission or owner/group or mtime.
This operation does on background and automatically.
If you need to merge manually, you can use shell script which is mergedir.sh in test directory.
This script runs chmod/chown/touch commands after finding a directory.
Other S3 client application makes a directory object("dir/") without meta information which is needed to understand by the s3fs, this script can add meta information for a directory object.
If this script function is insufficient for you, you can read and modify the codes by yourself.
Please use the shell script carefully because of changing the object.
If you find a bug in this script, please let me know.
* Details
** The directory object made by old version
The directory object made by old version is not understood by other S3 client application.
New s3fs version was updated for keeping compatibility with other clients.
You can use the mergedir.sh in test directory for merging from old directory object("dir") to new("dir/").
The directory object name is changed from "dir" to "dir/" after the mergedir.sh is run, this changed "dir/" object is understood by other S3 clients.
This script runs chmod/chown/chgrp/touch/etc commands against the old directory object("dir"), then new s3fs merges that directory automatically.
If you need to change directory object from old to new manually, you can do it by running these commands which change the directory attributes(mode/owner/group/mtime).
** The directory object made by new version
The directory object name made by new version is "dir/".
Because the name includes "/", other S3 client applications understand it as the directory.
I tested new directory by s3cmd/tntDrive/DragonDisk/Gladinet as other S3 clients, the result was good compatibility.
You need to know that the compatibility has small problem by the difference in specifications between clients.
And you need to be careful about that the old s3fs can not understand the directory object which made by new s3fs.
You should change all s3fs which accesses same bucket.
** The directory object made by other S3 client application
Because the object is determined as a directory by the s3fs, the s3fs makes and uses special meta information which is "x-amz-meta-***" and "Content-Type" as HTTP header.
The s3fs sets and uses HTTP headers for the directory object, those headers are listed below.
Content-Type: application/x-directory
x-amz-meta-mode: <mode>
x-amz-meta-uid: <UID>
x-amz-meta-gid <GID>
x-amz-meta-mtime: <unix time of modified file>
Other S3 client application builds the directory object without attributes which is needed by the s3fs.
When the "ls" command is run on the s3fs-fuse file system which has directories/files made by other S3 clients, this result is shown below.
d--------- 1 root root 0 Feb 27 11:21 dir
---------- 1 root root 1024 Mar 14 02:15 file
Because the objects don't have meta information("x-amz-meta-mode"), it means mode=0000.
In this case, the directory object is shown only "d", because the s3fs determines the object as a directory when the object is the name with "/" or has "Content-type: application/x-directory" header.
(The s3fs sets "Content-Type: application/x-directory" to the directory object, but other S3 clients set "binary/octet-stream".)
In that result, nobody without root is allowed to operate the object.
The owner and group are "root"(UID=0) because the object doesn't have "x-amz-meta-uid/gid".
If the object doesn't have "x-amz-meta-mtime", the s3fs uses "Last-Modified" HTTP header.
Therefore the object's mtime is "Last-Modified" value.(This logic is same as old version)
It has been already explained, if you need to change the object attributes, you can do it by manually operation or mergedir.sh.
* Example of the compatibility with s3cmd etc
** Case A) Only "dir/file" object
One of case, there is only "dir/file" object without "dir/" object, that object is made by s3cmd or etc.
In this case, the response of REST API(list bucket) with "delimiter=/" parameter has "CommonPrefixes", and the "dir/" is listed in "CommonPrefixes/Prefix", but the "dir/" object is not real object.
The s3fs needs to determine this object as directory, however there is no real directory object("dir" or "dir/").
But both new s3fs and old one does NOT understand this "dir/" in "CommonPrefixes", because the s3fs fails to get meta information from "dir" or "dir/".
On this case, the result of "ls" command is shown below.
??????????? ? ? ? ? ? dir
This "dir" is not operated by anyone and any process, because the s3fs does not understand this object permission.
And "dir/file" object can not be shown and operated too.
Some other S3 clients(tntDrive/Gladinet/etc) can not understand this object as same as the s3fs.
If you need to operate "dir/file" object, you need to make the "dir/" object as a directory.
To make the "dir/" directory object, you need to do below.
Because there is already the "dir" object which is not real object, you can not make "dir/" directory.
(s3cmd does not make "dir/" object because the object name has "/".).
You should make another name directory(ex: "dir2/"), and move the "dir/file" objects to in new directory.
Last, you can rename the directory name from "dir2/" to "dir/".
** Case B) Both "dir" and "dir/file" object
This case is that there are "dir" and "dir/file" objects which were made by s3cmd/etc.
s3cmd and s3fs understand the "dir" object as normal(file) object because this object does not have meta information and a name with "/".
But the result of REST API(list bucket) has "dir/" name in "CommonPrefixes/Prefix".
The s3fs checks "dir/" and "dir" as a directory, but the "dir" object is not directory object.
(Because the new s3fs need to compatible old version, the s3fs checks a directory object in order of "dir/", "dir")
In this case, the result of "ls" command is shown below.
---------- 1 root root 0 Feb 27 02:48 dir
As a result, the "dir/file" can not be shown and operated because the "dir" object is a file.
If you determine the "dir" as a directory, you need to add mete information to the "dir" object by s3cmd.
** Case C) Both "dir" and "dir/" object
Last case is that there are "dir" and "dir/" objects which were made by other S3 clients.
(example: At first you upload a object "dir/" as a directory by new 3sfs, and you upload a object "dir" by s3cmd.)
New s3fs determines "dir/" as a directory, because the s3fs searches in oder of "dir/", "dir".
As a result, the "dir" object can not be shown and operated.
** Compatibility between S3 clients
Both new and old s3fs do not understand both "dir" and "dir/" at the same time, tntDrive and Galdinet are same as the s3fs.
If there are "dir/" and "dir" objects, the s3fs gives priority to "dir/".
But s3cmd and DragonDisk understand both objects.
git-svn-id: http://s3fs.googlecode.com/svn/trunk@392 df820570-a93a-0410-bd06-b72b767a4274
2013-03-23 14:04:07 +00:00
|
|
|
// Force to lower, only "x-amz"
|
|
|
|
string lkey = key;
|
|
|
|
transform(lkey.begin(), lkey.end(), lkey.begin(), static_cast<int (*)(int)>(std::tolower));
|
|
|
|
if(lkey.substr(0, 5) == "x-amz"){
|
|
|
|
key = lkey;
|
|
|
|
}
|
2011-09-01 19:24:12 +00:00
|
|
|
string value;
|
|
|
|
getline(ss, value);
|
|
|
|
(*headers)[key] = trim(value);
|
|
|
|
}
|
|
|
|
return blockSize * numBlocks;
|
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
bool S3fsCurl::SetDnsCache(bool isCache)
|
2013-03-30 13:37:14 +00:00
|
|
|
{
|
2013-07-05 02:28:31 +00:00
|
|
|
bool old = S3fsCurl::is_dns_cache;
|
|
|
|
S3fsCurl::is_dns_cache = isCache;
|
|
|
|
return old;
|
|
|
|
}
|
2011-03-01 19:35:55 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
long S3fsCurl::SetConnectTimeout(long timeout)
|
|
|
|
{
|
|
|
|
long old = S3fsCurl::connect_timeout;
|
|
|
|
S3fsCurl::connect_timeout = timeout;
|
|
|
|
return old;
|
|
|
|
}
|
2011-03-01 19:35:55 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
time_t S3fsCurl::SetReadwriteTimeout(time_t timeout)
|
|
|
|
{
|
|
|
|
time_t old = S3fsCurl::readwrite_timeout;
|
|
|
|
S3fsCurl::readwrite_timeout = timeout;
|
|
|
|
return old;
|
2011-03-01 19:35:55 +00:00
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
int S3fsCurl::SetRetries(int count)
|
2013-03-30 13:37:14 +00:00
|
|
|
{
|
2013-07-05 02:28:31 +00:00
|
|
|
int old = S3fsCurl::retries;
|
|
|
|
S3fsCurl::retries = count;
|
|
|
|
return old;
|
|
|
|
}
|
2011-03-01 19:35:55 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
bool S3fsCurl::SetPublicBucket(bool flag)
|
|
|
|
{
|
|
|
|
bool old = S3fsCurl::is_public_bucket;
|
|
|
|
S3fsCurl::is_public_bucket = flag;
|
|
|
|
return old;
|
2011-03-01 19:35:55 +00:00
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
string S3fsCurl::SetDefaultAcl(const char* acl)
|
2013-03-30 13:37:14 +00:00
|
|
|
{
|
2013-07-05 02:28:31 +00:00
|
|
|
string old = S3fsCurl::default_acl;
|
|
|
|
S3fsCurl::default_acl = acl ? acl : "";
|
|
|
|
return old;
|
|
|
|
}
|
2011-08-31 22:20:20 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
bool S3fsCurl::SetUseRrs(bool flag)
|
|
|
|
{
|
|
|
|
bool old = S3fsCurl::is_use_rrs;
|
|
|
|
S3fsCurl::is_use_rrs = flag;
|
|
|
|
return old;
|
2011-08-31 22:20:20 +00:00
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
bool S3fsCurl::SetUseSse(bool flag)
|
2013-03-30 13:37:14 +00:00
|
|
|
{
|
2013-07-05 02:28:31 +00:00
|
|
|
bool old = S3fsCurl::is_use_sse;
|
|
|
|
S3fsCurl::is_use_sse = flag;
|
|
|
|
return old;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool S3fsCurl::SetContentMd5(bool flag)
|
|
|
|
{
|
|
|
|
bool old = S3fsCurl::is_content_md5;
|
|
|
|
S3fsCurl::is_content_md5 = flag;
|
|
|
|
return old;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool S3fsCurl::SetAccessKey(const char* AccessKeyId, const char* SecretAccessKey)
|
|
|
|
{
|
|
|
|
if(!AccessKeyId || '\0' == AccessKeyId[0] || !SecretAccessKey || '\0' == SecretAccessKey[0]){
|
|
|
|
return false;
|
2013-03-30 13:37:14 +00:00
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
AWSAccessKeyId = AccessKeyId;
|
|
|
|
AWSSecretAccessKey = SecretAccessKey;
|
|
|
|
return true;
|
|
|
|
}
|
2011-09-01 19:24:12 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
long S3fsCurl::SetSslVerifyHostname(long value)
|
|
|
|
{
|
|
|
|
if(0 != value && 1 != value){
|
|
|
|
return -1;
|
2011-09-01 19:24:12 +00:00
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
long old = S3fsCurl::ssl_verify_hostname;
|
|
|
|
S3fsCurl::ssl_verify_hostname = value;
|
|
|
|
return old;
|
|
|
|
}
|
2011-09-01 19:24:12 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
//-------------------------------------------------------------------
|
|
|
|
// Methods for S3fsCurl
|
|
|
|
//-------------------------------------------------------------------
|
|
|
|
S3fsCurl::S3fsCurl() :
|
|
|
|
hCurl(NULL), path(""), base_path(""), saved_path(""), url(""), requestHeaders(NULL),
|
|
|
|
bodydata(NULL), headdata(NULL), LastResponseCode(-1), postdata(NULL), postdata_remaining(0)
|
|
|
|
{
|
2011-09-01 19:24:12 +00:00
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
S3fsCurl::~S3fsCurl()
|
2013-03-30 13:37:14 +00:00
|
|
|
{
|
2013-07-05 02:28:31 +00:00
|
|
|
DestroyCurlHandle();
|
|
|
|
}
|
2013-03-30 13:37:14 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
bool S3fsCurl::CreateCurlHandle(bool force)
|
|
|
|
{
|
|
|
|
pthread_mutex_lock(&S3fsCurl::curl_handles_lock);
|
2013-03-30 13:37:14 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
if(hCurl){
|
|
|
|
if(!force){
|
|
|
|
FGPRINT("S3fsCurl::CreateCurlHandle: already create handle.\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if(!DestroyCurlHandle()){
|
|
|
|
FGPRINT("S3fsCurl::CreateCurlHandle: could not destroy handle.\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
ClearInternalData();
|
|
|
|
FGPRINT("S3fsCurl::CreateCurlHandle: has handle, so destroied it.\n");
|
|
|
|
}
|
2013-03-30 13:37:14 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
if(NULL == (hCurl = curl_easy_init())){
|
|
|
|
FGPRINT("S3fsCurl::CreateCurlHandle: Failed to create handle.\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
curl_easy_reset(hCurl);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_NOSIGNAL, 1);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_FOLLOWLOCATION, true);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_CONNECTTIMEOUT, S3fsCurl::connect_timeout);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_NOPROGRESS, 0);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_PROGRESSFUNCTION, S3fsCurl::CurlProgress);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_PROGRESSDATA, hCurl);
|
|
|
|
// curl_easy_setopt(hCurl, CURLOPT_FORBID_REUSE, 1);
|
|
|
|
|
|
|
|
if(0 == S3fsCurl::ssl_verify_hostname){
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_SSL_VERIFYHOST, 0);
|
|
|
|
}
|
|
|
|
if(S3fsCurl::curl_ca_bundle.size() != 0){
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_CAINFO, S3fsCurl::curl_ca_bundle.c_str());
|
|
|
|
}
|
|
|
|
if(S3fsCurl::is_dns_cache && S3fsCurl::hCurlShare){
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_SHARE, S3fsCurl::hCurlShare);
|
|
|
|
}
|
2013-03-30 13:37:14 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
S3fsCurl::curl_times[hCurl] = time(0);
|
|
|
|
S3fsCurl::curl_progress[hCurl] = progress_t(-1, -1);
|
|
|
|
|
|
|
|
pthread_mutex_unlock(&S3fsCurl::curl_handles_lock);
|
2013-03-30 13:37:14 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
return true;
|
2013-03-30 13:37:14 +00:00
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
bool S3fsCurl::DestroyCurlHandle(void)
|
2013-03-30 13:37:14 +00:00
|
|
|
{
|
2013-07-05 02:28:31 +00:00
|
|
|
if(!hCurl){
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
pthread_mutex_lock(&S3fsCurl::curl_handles_lock);
|
2011-03-01 19:35:55 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
S3fsCurl::curl_times.erase(hCurl);
|
|
|
|
S3fsCurl::curl_progress.erase(hCurl);
|
|
|
|
curl_easy_cleanup(hCurl);
|
|
|
|
hCurl = NULL;
|
|
|
|
ClearInternalData();
|
|
|
|
|
|
|
|
pthread_mutex_unlock(&S3fsCurl::curl_handles_lock);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool S3fsCurl::ClearInternalData(void)
|
|
|
|
{
|
|
|
|
if(hCurl){
|
|
|
|
return false;
|
2013-06-15 15:29:08 +00:00
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
path = "";
|
|
|
|
base_path = "";
|
|
|
|
saved_path= "";
|
|
|
|
url = "";
|
|
|
|
if(requestHeaders){
|
|
|
|
curl_slist_free_all(requestHeaders);
|
|
|
|
requestHeaders = NULL;
|
|
|
|
}
|
|
|
|
responseHeaders.clear();
|
|
|
|
if(bodydata){
|
|
|
|
delete bodydata;
|
|
|
|
bodydata = NULL;
|
|
|
|
}
|
|
|
|
if(headdata){
|
|
|
|
delete headdata;
|
|
|
|
headdata = NULL;
|
|
|
|
}
|
|
|
|
LastResponseCode = -1;
|
|
|
|
return true;
|
|
|
|
}
|
2011-03-01 19:35:55 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
bool S3fsCurl::GetResponseCode(long& responseCode)
|
|
|
|
{
|
|
|
|
if(!hCurl){
|
|
|
|
return false;
|
2013-03-30 13:37:14 +00:00
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
responseCode = -1;
|
|
|
|
if(CURLE_OK != curl_easy_getinfo(hCurl, CURLINFO_RESPONSE_CODE, &LastResponseCode)){
|
|
|
|
return false;
|
2013-03-30 13:37:14 +00:00
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
responseCode = LastResponseCode;
|
|
|
|
return true;
|
|
|
|
}
|
2011-03-01 19:35:55 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
//
|
|
|
|
// returns curl return code
|
|
|
|
//
|
|
|
|
int S3fsCurl::RequestPerform(FILE* file)
|
|
|
|
{
|
|
|
|
if(debug){
|
|
|
|
char* ptr_url = NULL;
|
|
|
|
curl_easy_getinfo(hCurl, CURLINFO_EFFECTIVE_URL , &ptr_url);
|
|
|
|
SYSLOGDBG("connecting to URL %s", SAFESTRPTR(ptr_url));
|
|
|
|
}
|
|
|
|
// curl_easy_setopt(curl, CURLOPT_VERBOSE, true);
|
2011-03-01 19:35:55 +00:00
|
|
|
|
|
|
|
// 1 attempt + retries...
|
2013-07-05 02:28:31 +00:00
|
|
|
for(int retrycnt = S3fsCurl::retries; 0 < retrycnt; retrycnt--){
|
|
|
|
if(file){
|
|
|
|
rewind(file);
|
|
|
|
}
|
|
|
|
if(bodydata){
|
|
|
|
bodydata->Clear();
|
|
|
|
}
|
|
|
|
if(headdata){
|
|
|
|
headdata->Clear();
|
2011-03-01 19:35:55 +00:00
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
// Requests
|
|
|
|
CURLcode curlCode = curl_easy_perform(hCurl);
|
|
|
|
|
|
|
|
// Check result
|
|
|
|
switch(curlCode){
|
2011-03-01 19:35:55 +00:00
|
|
|
case CURLE_OK:
|
|
|
|
// Need to look at the HTTP response code
|
2013-07-05 02:28:31 +00:00
|
|
|
if(0 != curl_easy_getinfo(hCurl, CURLINFO_RESPONSE_CODE, &LastResponseCode)){
|
2013-03-30 13:37:14 +00:00
|
|
|
SYSLOGERR("curl_easy_getinfo failed while trying to retrieve HTTP response code");
|
2011-03-01 19:35:55 +00:00
|
|
|
return -EIO;
|
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
SYSLOGDBG("HTTP response code %ld", LastResponseCode);
|
2011-03-01 19:35:55 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
if(400 > LastResponseCode){
|
2011-03-01 19:35:55 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
if(500 <= LastResponseCode){
|
|
|
|
SYSLOGERR("###HTTP response=%ld", LastResponseCode);
|
2011-03-01 19:35:55 +00:00
|
|
|
sleep(4);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Service response codes which are >= 400 && < 500
|
2013-07-05 02:28:31 +00:00
|
|
|
switch(LastResponseCode){
|
2011-03-01 19:35:55 +00:00
|
|
|
case 400:
|
2013-03-30 13:37:14 +00:00
|
|
|
SYSLOGDBGERR("HTTP response code 400 was returned");
|
2013-07-05 02:28:31 +00:00
|
|
|
SYSLOGDBGERR("Body Text: %s", (bodydata ? bodydata->str() : ""));
|
2013-03-30 13:37:14 +00:00
|
|
|
SYSLOGDBG("Now returning EIO");
|
2011-03-01 19:35:55 +00:00
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
case 403:
|
2013-03-30 13:37:14 +00:00
|
|
|
SYSLOGDBGERR("HTTP response code 403 was returned");
|
2013-07-05 02:28:31 +00:00
|
|
|
SYSLOGDBGERR("Body Text: %s", (bodydata ? bodydata->str() : ""));
|
2011-08-30 19:44:26 +00:00
|
|
|
return -EPERM;
|
2011-03-01 19:35:55 +00:00
|
|
|
|
|
|
|
case 404:
|
2013-03-30 13:37:14 +00:00
|
|
|
SYSLOGDBG("HTTP response code 404 was returned");
|
2013-07-05 02:28:31 +00:00
|
|
|
SYSLOGDBG("Body Text: %s", (bodydata ? bodydata->str() : ""));
|
2013-03-30 13:37:14 +00:00
|
|
|
SYSLOGDBG("Now returning ENOENT");
|
2011-03-01 19:35:55 +00:00
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
default:
|
2013-07-05 02:28:31 +00:00
|
|
|
SYSLOGERR("###response=%ld", LastResponseCode);
|
|
|
|
SYSLOGDBG("Body Text: %s", (bodydata ? bodydata->str() : ""));
|
|
|
|
FGPRINT("responseCode %ld\n", LastResponseCode);
|
|
|
|
FGPRINT("Body Text: %s", (bodydata ? bodydata->str() : ""));
|
2011-03-01 19:35:55 +00:00
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case CURLE_WRITE_ERROR:
|
2013-03-30 13:37:14 +00:00
|
|
|
SYSLOGERR("### CURLE_WRITE_ERROR");
|
2011-03-01 19:35:55 +00:00
|
|
|
sleep(2);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case CURLE_OPERATION_TIMEDOUT:
|
2013-03-30 13:37:14 +00:00
|
|
|
SYSLOGERR("### CURLE_OPERATION_TIMEDOUT");
|
2011-03-01 19:35:55 +00:00
|
|
|
sleep(2);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case CURLE_COULDNT_RESOLVE_HOST:
|
2013-03-30 13:37:14 +00:00
|
|
|
SYSLOGERR("### CURLE_COULDNT_RESOLVE_HOST");
|
2011-03-01 19:35:55 +00:00
|
|
|
sleep(2);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case CURLE_COULDNT_CONNECT:
|
2013-03-30 13:37:14 +00:00
|
|
|
SYSLOGERR("### CURLE_COULDNT_CONNECT");
|
2011-03-01 19:35:55 +00:00
|
|
|
sleep(4);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case CURLE_GOT_NOTHING:
|
2013-03-30 13:37:14 +00:00
|
|
|
SYSLOGERR("### CURLE_GOT_NOTHING");
|
2011-03-01 19:35:55 +00:00
|
|
|
sleep(4);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case CURLE_ABORTED_BY_CALLBACK:
|
2013-03-30 13:37:14 +00:00
|
|
|
SYSLOGERR("### CURLE_ABORTED_BY_CALLBACK");
|
2011-03-01 19:35:55 +00:00
|
|
|
sleep(4);
|
2013-07-05 02:28:31 +00:00
|
|
|
S3fsCurl::curl_times[hCurl] = time(0);
|
2011-03-01 19:35:55 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case CURLE_PARTIAL_FILE:
|
2013-03-30 13:37:14 +00:00
|
|
|
SYSLOGERR("### CURLE_PARTIAL_FILE");
|
2011-03-01 19:35:55 +00:00
|
|
|
sleep(4);
|
|
|
|
break;
|
|
|
|
|
2011-07-29 15:48:15 +00:00
|
|
|
case CURLE_SEND_ERROR:
|
2013-03-30 13:37:14 +00:00
|
|
|
SYSLOGERR("### CURLE_SEND_ERROR");
|
2011-07-29 15:48:15 +00:00
|
|
|
sleep(2);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case CURLE_RECV_ERROR:
|
2013-03-30 13:37:14 +00:00
|
|
|
SYSLOGERR("### CURLE_RECV_ERROR");
|
2011-07-29 15:48:15 +00:00
|
|
|
sleep(2);
|
|
|
|
break;
|
|
|
|
|
2011-03-01 19:35:55 +00:00
|
|
|
case CURLE_SSL_CACERT:
|
|
|
|
// try to locate cert, if successful, then set the
|
|
|
|
// option and continue
|
2013-07-05 02:28:31 +00:00
|
|
|
if(0 == S3fsCurl::curl_ca_bundle.size()){
|
|
|
|
if(!S3fsCurl::LocateBundle()){
|
|
|
|
exit(EXIT_FAILURE);
|
|
|
|
}
|
|
|
|
if(0 != S3fsCurl::curl_ca_bundle.size()){
|
|
|
|
retrycnt++;
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_CAINFO, S3fsCurl::curl_ca_bundle.c_str());
|
|
|
|
// break for switch-case, and continue loop.
|
|
|
|
break;
|
|
|
|
}
|
2011-03-01 19:35:55 +00:00
|
|
|
}
|
2013-03-30 13:37:14 +00:00
|
|
|
SYSLOGERR("curlCode: %i msg: %s", curlCode, curl_easy_strerror(curlCode));
|
2013-07-05 02:28:31 +00:00
|
|
|
FGPRINT("%s: curlCode: %i -- %s\n", program_name.c_str(), curlCode, curl_easy_strerror(curlCode));
|
2011-03-01 19:35:55 +00:00
|
|
|
exit(EXIT_FAILURE);
|
|
|
|
break;
|
|
|
|
|
|
|
|
#ifdef CURLE_PEER_FAILED_VERIFICATION
|
|
|
|
case CURLE_PEER_FAILED_VERIFICATION:
|
|
|
|
first_pos = bucket.find_first_of(".");
|
2013-07-05 02:28:31 +00:00
|
|
|
if(first_pos != string::npos){
|
|
|
|
FGPRINT("%s: curl returned a CURL_PEER_FAILED_VERIFICATION error\n", program_name.c_str());
|
|
|
|
FGPRINT("%s: security issue found: buckets with periods in their name are incompatible with https\n", program_name.c_str());
|
|
|
|
FGPRINT("%s: This check can be over-ridden by using the -o ssl_verify_hostname=0\n", program_name.c_str());
|
|
|
|
FGPRINT("%s: The certificate will still be checked but the hostname will not be verified.\n", program_name.c_str());
|
|
|
|
FGPRINT("%s: A more secure method would be to use a bucket name without periods.\n", program_name.c_str());
|
|
|
|
}else{
|
|
|
|
FGPRINT("%s: my_curl_easy_perform: curlCode: %i -- %s\n", program_name.c_str(), curlCode, curl_easy_strerror(curlCode));
|
2011-03-01 19:35:55 +00:00
|
|
|
}
|
|
|
|
exit(EXIT_FAILURE);
|
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
// This should be invalid since curl option HTTP FAILONERROR is now off
|
|
|
|
case CURLE_HTTP_RETURNED_ERROR:
|
2013-03-30 13:37:14 +00:00
|
|
|
SYSLOGERR("### CURLE_HTTP_RETURNED_ERROR");
|
2011-03-01 19:35:55 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
if(0 != curl_easy_getinfo(hCurl, CURLINFO_RESPONSE_CODE, &LastResponseCode)){
|
2011-03-01 19:35:55 +00:00
|
|
|
return -EIO;
|
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
SYSLOGERR("###response=%ld", LastResponseCode);
|
2011-03-01 19:35:55 +00:00
|
|
|
|
|
|
|
// Let's try to retrieve the
|
2013-07-05 02:28:31 +00:00
|
|
|
if(404 == LastResponseCode){
|
2011-03-01 19:35:55 +00:00
|
|
|
return -ENOENT;
|
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
if(500 > LastResponseCode){
|
2011-03-01 19:35:55 +00:00
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
// Unknown CURL return code
|
|
|
|
default:
|
2013-03-30 13:37:14 +00:00
|
|
|
SYSLOGERR("###curlCode: %i msg: %s", curlCode, curl_easy_strerror(curlCode));
|
2011-03-01 19:35:55 +00:00
|
|
|
exit(EXIT_FAILURE);
|
|
|
|
break;
|
|
|
|
}
|
2013-03-30 13:37:14 +00:00
|
|
|
SYSLOGERR("###retrying...");
|
2011-03-01 19:35:55 +00:00
|
|
|
}
|
2013-03-30 13:37:14 +00:00
|
|
|
SYSLOGERR("###giving up");
|
2011-03-01 19:35:55 +00:00
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
//
|
|
|
|
// Returns the Amazon AWS signature for the given parameters.
|
|
|
|
//
|
|
|
|
// @param method e.g., "GET"
|
|
|
|
// @param content_type e.g., "application/x-directory"
|
|
|
|
// @param date e.g., get_date()
|
|
|
|
// @param resource e.g., "/pub"
|
|
|
|
//
|
|
|
|
string S3fsCurl::CalcSignature(string method, string strMD5, string content_type, string date, string resource)
|
2013-03-30 13:37:14 +00:00
|
|
|
{
|
2011-08-31 20:36:40 +00:00
|
|
|
int ret;
|
|
|
|
int bytes_written;
|
|
|
|
int offset;
|
|
|
|
int write_attempts = 0;
|
|
|
|
string Signature;
|
|
|
|
string StringToSign;
|
2013-07-05 02:28:31 +00:00
|
|
|
|
2011-08-31 20:36:40 +00:00
|
|
|
StringToSign += method + "\n";
|
2013-07-05 02:28:31 +00:00
|
|
|
StringToSign += strMD5 + "\n"; // md5
|
2011-08-31 20:36:40 +00:00
|
|
|
StringToSign += content_type + "\n";
|
|
|
|
StringToSign += date + "\n";
|
2013-07-05 02:28:31 +00:00
|
|
|
for(curl_slist* headers = requestHeaders; headers; headers = headers->next){
|
|
|
|
if(0 == strncmp(headers->data, "x-amz", 5)){
|
|
|
|
StringToSign += headers->data;
|
|
|
|
StringToSign += "\n";
|
|
|
|
}
|
2011-08-31 20:36:40 +00:00
|
|
|
}
|
|
|
|
StringToSign += resource;
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
const void* key = S3fsCurl::AWSSecretAccessKey.data();
|
|
|
|
int key_len = S3fsCurl::AWSSecretAccessKey.size();
|
|
|
|
const unsigned char* sdata = reinterpret_cast<const unsigned char*>(StringToSign.data());
|
|
|
|
int sdata_len = StringToSign.size();
|
2011-08-31 20:36:40 +00:00
|
|
|
unsigned char md[EVP_MAX_MD_SIZE];
|
2013-07-05 02:28:31 +00:00
|
|
|
unsigned int md_len;
|
2011-08-31 20:36:40 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
HMAC(S3fsCurl::evp_md, key, key_len, sdata, sdata_len, md, &md_len);
|
2011-08-31 20:36:40 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
BIO* b64 = BIO_new(BIO_f_base64());
|
2011-08-31 20:36:40 +00:00
|
|
|
BIO* bmem = BIO_new(BIO_s_mem());
|
2013-07-05 02:28:31 +00:00
|
|
|
b64 = BIO_push(b64, bmem);
|
2011-08-31 20:36:40 +00:00
|
|
|
|
|
|
|
offset = 0;
|
2013-07-05 02:28:31 +00:00
|
|
|
for(;;){
|
2011-08-31 20:36:40 +00:00
|
|
|
bytes_written = BIO_write(b64, &(md[offset]), md_len);
|
|
|
|
write_attempts++;
|
|
|
|
// -1 indicates that an error occurred, or a temporary error, such as
|
|
|
|
// the server is busy, occurred and we need to retry later.
|
|
|
|
// BIO_write can do a short write, this code addresses this condition
|
2013-07-05 02:28:31 +00:00
|
|
|
if(bytes_written <= 0){
|
2011-08-31 20:36:40 +00:00
|
|
|
// Indicates whether a temporary error occurred or a failure to
|
|
|
|
// complete the operation occurred
|
2013-07-05 02:28:31 +00:00
|
|
|
if((ret = BIO_should_retry(b64))){
|
2011-08-31 20:36:40 +00:00
|
|
|
// Wait until the write can be accomplished
|
2013-07-05 02:28:31 +00:00
|
|
|
if(write_attempts <= 10){
|
2011-08-31 20:36:40 +00:00
|
|
|
continue;
|
2013-07-05 02:28:31 +00:00
|
|
|
}
|
2011-08-31 20:36:40 +00:00
|
|
|
// Too many write attempts
|
2013-03-30 13:37:14 +00:00
|
|
|
SYSLOGERR("Failure during BIO_write, returning null String");
|
2011-08-31 20:36:40 +00:00
|
|
|
BIO_free_all(b64);
|
|
|
|
Signature.clear();
|
|
|
|
return Signature;
|
2013-07-05 02:28:31 +00:00
|
|
|
|
|
|
|
}else{
|
2011-08-31 20:36:40 +00:00
|
|
|
// If not a retry then it is an error
|
2013-03-30 13:37:14 +00:00
|
|
|
SYSLOGERR("Failure during BIO_write, returning null String");
|
2011-08-31 20:36:40 +00:00
|
|
|
BIO_free_all(b64);
|
|
|
|
Signature.clear();
|
|
|
|
return Signature;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// The write request succeeded in writing some Bytes
|
|
|
|
offset += bytes_written;
|
|
|
|
md_len -= bytes_written;
|
|
|
|
|
|
|
|
// If there is no more data to write, the request sending has been
|
|
|
|
// completed
|
2013-03-30 13:37:14 +00:00
|
|
|
if(md_len <= 0){
|
2011-08-31 20:36:40 +00:00
|
|
|
break;
|
2013-03-30 13:37:14 +00:00
|
|
|
}
|
2011-08-31 20:36:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Flush the data
|
|
|
|
ret = BIO_flush(b64);
|
2013-07-05 02:28:31 +00:00
|
|
|
if(ret <= 0){
|
2013-03-30 13:37:14 +00:00
|
|
|
SYSLOGERR("Failure during BIO_flush, returning null String");
|
2011-08-31 20:36:40 +00:00
|
|
|
BIO_free_all(b64);
|
|
|
|
Signature.clear();
|
|
|
|
return Signature;
|
|
|
|
}
|
|
|
|
|
|
|
|
BUF_MEM *bptr;
|
|
|
|
BIO_get_mem_ptr(b64, &bptr);
|
|
|
|
|
|
|
|
Signature.resize(bptr->length - 1);
|
|
|
|
memcpy(&Signature[0], bptr->data, bptr->length-1);
|
|
|
|
|
|
|
|
BIO_free_all(b64);
|
|
|
|
|
|
|
|
return Signature;
|
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
// XML in BodyData has UploadId, Parse XML body for UploadId
|
|
|
|
bool S3fsCurl::GetUploadId(string& upload_id)
|
2013-03-30 13:37:14 +00:00
|
|
|
{
|
2013-07-05 02:28:31 +00:00
|
|
|
bool result = false;
|
2011-03-01 19:35:55 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
if(!bodydata){
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
upload_id.clear();
|
|
|
|
|
|
|
|
xmlDocPtr doc = xmlReadMemory(bodydata->str(), bodydata->size(), "", NULL, 0);
|
|
|
|
if(NULL == doc || NULL == doc->children){
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
for(xmlNodePtr cur_node = doc->children->children; NULL != cur_node; cur_node = cur_node->next){
|
|
|
|
// For DEBUG
|
|
|
|
// string cur_node_name(reinterpret_cast<const char *>(cur_node->name));
|
|
|
|
// printf("cur_node_name: %s\n", cur_node_name.c_str());
|
|
|
|
|
|
|
|
if(XML_ELEMENT_NODE == cur_node->type){
|
|
|
|
string elementName = reinterpret_cast<const char*>(cur_node->name);
|
|
|
|
// For DEBUG
|
|
|
|
// printf("elementName: %s\n", elementName.c_str());
|
|
|
|
|
|
|
|
if(cur_node->children){
|
|
|
|
if(XML_TEXT_NODE == cur_node->children->type){
|
|
|
|
if(elementName == "UploadId") {
|
|
|
|
upload_id = reinterpret_cast<const char *>(cur_node->children->content);
|
|
|
|
result = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2011-03-01 19:35:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
xmlFreeDoc(doc);
|
2011-03-01 19:35:55 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
return result;
|
|
|
|
}
|
2011-03-01 19:35:55 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
int S3fsCurl::DeleteRequest(const char* tpath)
|
|
|
|
{
|
|
|
|
FGPRINT("S3fsCurl::DeleteRequest [tpath=%s]\n", SAFESTRPTR(tpath));
|
|
|
|
|
|
|
|
if(!tpath){
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if(!CreateCurlHandle(true)){
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
string resource;
|
|
|
|
string turl;
|
|
|
|
MakeUrlResource(get_realpath(tpath).c_str(), resource, turl);
|
|
|
|
|
|
|
|
url = prepare_url(turl.c_str());
|
|
|
|
path = tpath;
|
|
|
|
requestHeaders = NULL;
|
|
|
|
responseHeaders.clear();
|
|
|
|
|
|
|
|
string date = get_date();
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, string("Date: " + date).c_str());
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "Content-Type: ");
|
|
|
|
if(!S3fsCurl::IsPublicBucket()){
|
|
|
|
requestHeaders = curl_slist_sort_insert(
|
|
|
|
requestHeaders,
|
|
|
|
string("Authorization: AWS " + AWSAccessKeyId + ":" +
|
|
|
|
CalcSignature("DELETE", "", "", date, resource)).c_str());
|
2011-03-01 19:35:55 +00:00
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str());
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_CUSTOMREQUEST, "DELETE");
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HTTPHEADER, requestHeaders);
|
|
|
|
|
|
|
|
return RequestPerform();
|
2011-03-01 19:35:55 +00:00
|
|
|
}
|
2013-03-30 13:37:14 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
//
|
|
|
|
// tpath : target path for head request
|
|
|
|
// bpath : saved into base_path
|
|
|
|
// savedpath : saved into saved_path
|
|
|
|
//
|
|
|
|
bool S3fsCurl::PreHeadRequest(const char* tpath, const char* bpath, const char* savedpath)
|
2013-05-16 02:02:55 +00:00
|
|
|
{
|
2013-07-05 02:28:31 +00:00
|
|
|
//FGPRINT("S3fsCurl::PreHeadRequest [tpath=%s][bpath=%s][save=%s]\n", SAFESTRPTR(tpath), SAFESTRPTR(bpath), SAFESTRPTR(savedpath));
|
2013-05-16 02:02:55 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
if(!tpath){
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if(!CreateCurlHandle(true)){
|
|
|
|
return false;
|
2013-05-16 02:02:55 +00:00
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
string resource;
|
|
|
|
string turl;
|
|
|
|
MakeUrlResource(get_realpath(tpath).c_str(), resource, turl);
|
2013-05-16 02:02:55 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
// libcurl 7.17 does deep copy of url, deep copy "stable" url
|
|
|
|
url = prepare_url(turl.c_str());
|
|
|
|
path = tpath;
|
|
|
|
base_path = SAFESTRPTR(bpath);
|
|
|
|
saved_path = SAFESTRPTR(savedpath);
|
|
|
|
requestHeaders = NULL;
|
|
|
|
responseHeaders.clear();
|
2013-05-16 02:02:55 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
// requestHeaders
|
|
|
|
string date = get_date();
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, string("Date: " + date).c_str());
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "Content-Type: ");
|
|
|
|
if(!S3fsCurl::IsPublicBucket()){
|
|
|
|
requestHeaders = curl_slist_sort_insert(
|
|
|
|
requestHeaders,
|
|
|
|
string("Authorization: AWS " + AWSAccessKeyId + ":" +
|
|
|
|
CalcSignature("HEAD", "", "", date, resource)).c_str());
|
2013-05-16 02:02:55 +00:00
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str());
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_NOBODY, true); // HEAD
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_FILETIME, true); // Last-Modified
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HTTPHEADER, requestHeaders);
|
2013-05-16 02:02:55 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
// responseHeaders
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HEADERDATA, (void*)&responseHeaders);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HEADERFUNCTION, HeaderCallback);
|
2013-05-16 02:02:55 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
return true;
|
2013-05-16 02:02:55 +00:00
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
int S3fsCurl::HeadRequest(const char* tpath, headers_t& meta)
|
2013-03-30 13:37:14 +00:00
|
|
|
{
|
2013-07-05 02:28:31 +00:00
|
|
|
int result;
|
2013-05-16 02:02:55 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
FGPRINT("S3fsCurl::HeadRequest [tpath=%s]\n", SAFESTRPTR(tpath));
|
2013-05-16 02:02:55 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
if(!PreHeadRequest(tpath)){
|
|
|
|
return -1;
|
2013-03-30 13:37:14 +00:00
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
// Requests
|
|
|
|
if(0 != (result = RequestPerform())){
|
|
|
|
return result;
|
2013-05-16 02:02:55 +00:00
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
// file exists in s3
|
|
|
|
// fixme: clean this up.
|
|
|
|
meta.clear();
|
|
|
|
for(headers_t::iterator iter = responseHeaders.begin(); iter != responseHeaders.end(); ++iter){
|
|
|
|
string key = (*iter).first;
|
|
|
|
string value = (*iter).second;
|
|
|
|
if(key == "Content-Type"){
|
|
|
|
meta[key] = value;
|
|
|
|
}else if(key == "Content-Length"){
|
|
|
|
meta[key] = value;
|
|
|
|
}else if(key == "ETag"){
|
|
|
|
meta[key] = value;
|
|
|
|
}else if(key == "Last-Modified"){
|
|
|
|
meta[key] = value;
|
|
|
|
}else if(key.substr(0, 5) == "x-amz"){
|
|
|
|
meta[key] = value;
|
|
|
|
}else{
|
|
|
|
// Check for upper case
|
|
|
|
transform(key.begin(), key.end(), key.begin(), static_cast<int (*)(int)>(std::tolower));
|
|
|
|
if(key.substr(0, 5) == "x-amz"){
|
|
|
|
meta[key] = value;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
2013-05-16 02:02:55 +00:00
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
int S3fsCurl::PutHeadRequest(const char* tpath, headers_t& meta, bool ow_sse_flg)
|
2013-05-16 02:02:55 +00:00
|
|
|
{
|
2013-07-05 02:28:31 +00:00
|
|
|
FGPRINT("S3fsCurl::PutHeadRequest [tpath=%s]\n", SAFESTRPTR(tpath));
|
2013-05-16 02:02:55 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
if(!tpath){
|
|
|
|
return -1;
|
2013-05-16 02:02:55 +00:00
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
if(!CreateCurlHandle(true)){
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
string resource;
|
|
|
|
string turl;
|
|
|
|
MakeUrlResource(get_realpath(tpath).c_str(), resource, turl);
|
2013-05-16 02:02:55 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
url = prepare_url(turl.c_str());
|
|
|
|
path = tpath;
|
|
|
|
requestHeaders = NULL;
|
|
|
|
responseHeaders.clear();
|
|
|
|
bodydata = new BodyData();
|
|
|
|
|
|
|
|
// Make request headers
|
|
|
|
string date = get_date();
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, string("Date: " + date).c_str());
|
|
|
|
|
|
|
|
string ContentType;
|
|
|
|
for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){
|
|
|
|
string key = (*iter).first;
|
|
|
|
string value = (*iter).second;
|
|
|
|
if(key == "Content-Type"){
|
|
|
|
ContentType = value;
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, string(key + ":" + value).c_str());
|
|
|
|
}else if(key.substr(0,9) == "x-amz-acl"){
|
|
|
|
// not set value, but after set it.
|
|
|
|
}else if(key.substr(0,10) == "x-amz-meta"){
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, string(key + ":" + value).c_str());
|
|
|
|
}else if(key == "x-amz-copy-source"){
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, string(key + ":" + value).c_str());
|
|
|
|
}else if(!ow_sse_flg && key == "x-amz-server-side-encryption"){
|
|
|
|
// If ow_sse_flg is false, SSE inherit from meta.
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, string(key + ":" + value).c_str());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// "x-amz-acl", rrs, sse
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, string("x-amz-acl:" + S3fsCurl::default_acl).c_str());
|
|
|
|
if(S3fsCurl::is_use_rrs){
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-storage-class:REDUCED_REDUNDANCY");
|
|
|
|
}
|
|
|
|
if(ow_sse_flg && S3fsCurl::is_use_sse){
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-server-side-encryption:AES256");
|
|
|
|
}
|
|
|
|
if(!S3fsCurl::IsPublicBucket()){
|
|
|
|
requestHeaders = curl_slist_sort_insert(
|
|
|
|
requestHeaders,
|
|
|
|
string("Authorization: AWS " + AWSAccessKeyId + ":" +
|
|
|
|
CalcSignature("PUT", "", ContentType, date, resource)).c_str());
|
2013-03-30 13:37:14 +00:00
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
// setopt
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str());
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_UPLOAD, true); // HTTP PUT
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)bodydata);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_INFILESIZE, 0); // Content-Length
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HTTPHEADER, requestHeaders);
|
|
|
|
|
|
|
|
FGPRINT(" copying... [path=%s]\n", tpath);
|
|
|
|
SYSLOGDBG("copy path=%s", tpath);
|
|
|
|
|
|
|
|
int result = RequestPerform();
|
|
|
|
delete bodydata;
|
|
|
|
bodydata = NULL;
|
|
|
|
|
|
|
|
return result;
|
2013-03-30 13:37:14 +00:00
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
int S3fsCurl::PutRequest(const char* tpath, headers_t& meta, int fd, bool ow_sse_flg)
|
2013-03-30 13:37:14 +00:00
|
|
|
{
|
2013-07-05 02:28:31 +00:00
|
|
|
struct stat st;
|
|
|
|
FILE* file = NULL;
|
2013-03-30 13:37:14 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
FGPRINT("S3fsCurl::PutRequest [tpath=%s]\n", SAFESTRPTR(tpath));
|
2013-03-30 13:37:14 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
if(!tpath){
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if(-1 != fd){
|
|
|
|
if(-1 == fstat(fd, &st) || NULL == (file = fdopen(fd, "rb"))){
|
|
|
|
FGPRINT("S3fsCurl::PutRequest : Invalid file discriptor(errno=%d)\n", errno);
|
|
|
|
SYSLOGERR("Invalid file discriptor(errno=%d)", errno);
|
|
|
|
return -errno;
|
|
|
|
}
|
|
|
|
}else{
|
|
|
|
// This case is creating zero byte obejct.(calling by create_file_object())
|
|
|
|
FGPRINT("S3fsCurl::PutRequest : create zero byte file object.\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
if(!CreateCurlHandle(true)){
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
string resource;
|
|
|
|
string turl;
|
|
|
|
MakeUrlResource(get_realpath(tpath).c_str(), resource, turl);
|
|
|
|
|
|
|
|
url = prepare_url(turl.c_str());
|
|
|
|
path = tpath;
|
|
|
|
requestHeaders = NULL;
|
|
|
|
responseHeaders.clear();
|
|
|
|
bodydata = new BodyData();
|
|
|
|
|
|
|
|
// Make request headers
|
|
|
|
string date = get_date();
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, string("Date: " + date).c_str());
|
|
|
|
|
|
|
|
string strMD5;
|
|
|
|
if(-1 != fd && S3fsCurl::is_content_md5){
|
|
|
|
strMD5 = GetContentMD5(fd);
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, string("Content-MD5: " + strMD5).c_str());
|
|
|
|
}
|
|
|
|
|
|
|
|
string ContentType;
|
|
|
|
for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){
|
|
|
|
string key = (*iter).first;
|
|
|
|
string value = (*iter).second;
|
|
|
|
if(key == "Content-Type"){
|
|
|
|
ContentType = value;
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, string(key + ":" + value).c_str());
|
|
|
|
}else if(key.substr(0,9) == "x-amz-acl"){
|
|
|
|
// not set value, but after set it.
|
|
|
|
}else if(key.substr(0,10) == "x-amz-meta"){
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, string(key + ":" + value).c_str());
|
|
|
|
}else if(!ow_sse_flg && key == "x-amz-server-side-encryption"){
|
|
|
|
// If ow_sse_flg is false, SSE inherit from meta.
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, string(key + ":" + value).c_str());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// "x-amz-acl", rrs, sse
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, string("x-amz-acl:" + S3fsCurl::default_acl).c_str());
|
|
|
|
if(S3fsCurl::is_use_rrs){
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-storage-class:REDUCED_REDUNDANCY");
|
|
|
|
}
|
|
|
|
if(ow_sse_flg && S3fsCurl::is_use_sse){
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-server-side-encryption:AES256");
|
|
|
|
}
|
|
|
|
if(!S3fsCurl::IsPublicBucket()){
|
|
|
|
requestHeaders = curl_slist_sort_insert(
|
|
|
|
requestHeaders,
|
|
|
|
string("Authorization: AWS " + AWSAccessKeyId + ":" +
|
|
|
|
CalcSignature("PUT", strMD5, ContentType, date, resource)).c_str());
|
|
|
|
}
|
|
|
|
|
|
|
|
// setopt
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str());
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_UPLOAD, true); // HTTP PUT
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)bodydata);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HTTPHEADER, requestHeaders);
|
|
|
|
if(file){
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_INFILESIZE_LARGE, static_cast<curl_off_t>(st.st_size)); // Content-Length
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_INFILE, file);
|
|
|
|
}else{
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_INFILESIZE, 0); // Content-Length: 0
|
|
|
|
}
|
|
|
|
|
|
|
|
FGPRINT(" uploading... [path=%s][fd=%d][size=%zd]\n", tpath, fd, (-1 != fd ? st.st_size : 0));
|
|
|
|
SYSLOGDBG("upload path=%s", tpath);
|
|
|
|
|
|
|
|
int result = RequestPerform();
|
|
|
|
delete bodydata;
|
|
|
|
bodydata = NULL;
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
int S3fsCurl::GetObjectRequest(const char* tpath, int fd)
|
|
|
|
{
|
|
|
|
FILE* file;
|
|
|
|
int fd2;
|
|
|
|
FGPRINT("S3fsCurl::GetRequest [tpath=%s]\n", SAFESTRPTR(tpath));
|
|
|
|
|
|
|
|
if(!tpath){
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
// duplicate fd
|
|
|
|
if(-1 == (fd2 = dup(fd)) || 0 != lseek(fd2, 0, SEEK_SET) || NULL == (file = fdopen(fd2, "w+"))){
|
|
|
|
FGPRINT("S3fsCurl::GetRequest : Cloud not duplicate file discriptor(errno=%d)\n", errno);
|
|
|
|
SYSLOGERR("Cloud not duplicate file discriptor(errno=%d)", errno);
|
|
|
|
return -errno;
|
|
|
|
}
|
|
|
|
|
|
|
|
if(!CreateCurlHandle(true)){
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
string resource;
|
|
|
|
string turl;
|
|
|
|
MakeUrlResource(get_realpath(tpath).c_str(), resource, turl);
|
|
|
|
|
|
|
|
url = prepare_url(turl.c_str());
|
|
|
|
path = tpath;
|
|
|
|
requestHeaders = NULL;
|
|
|
|
responseHeaders.clear();
|
|
|
|
|
|
|
|
string date = get_date();
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, string("Date: " + date).c_str());
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "Content-Type: ");
|
|
|
|
|
|
|
|
if(!S3fsCurl::IsPublicBucket()){
|
|
|
|
requestHeaders = curl_slist_sort_insert(
|
|
|
|
requestHeaders,
|
|
|
|
string("Authorization: AWS " + AWSAccessKeyId + ":" +
|
|
|
|
CalcSignature("GET", "", "", date, resource)).c_str());
|
|
|
|
}
|
|
|
|
|
|
|
|
// setopt
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str());
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HTTPHEADER, requestHeaders);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_FILE, file);
|
|
|
|
|
|
|
|
FGPRINT(" downloading... [path=%s][fd=%d]\n", tpath, fd);
|
|
|
|
SYSLOGDBG("LOCAL FD");
|
|
|
|
|
|
|
|
int result = RequestPerform();
|
|
|
|
|
|
|
|
fflush(file);
|
|
|
|
fclose(file);
|
|
|
|
if(0 != lseek(fd, 0, SEEK_SET)){
|
|
|
|
FGPRINT("S3fsCurl::GetRequest : Cloud not seek file discriptor(errno=%d)\n", errno);
|
|
|
|
SYSLOGERR("Cloud not seek file discriptor(errno=%d)", errno);
|
|
|
|
return -errno;
|
|
|
|
}
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
int S3fsCurl::CheckBucket(void)
|
|
|
|
{
|
|
|
|
FGPRINT("S3fsCurl::CheckBucket\n");
|
|
|
|
|
|
|
|
if(!CreateCurlHandle(true)){
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
string resource;
|
|
|
|
string turl;
|
|
|
|
MakeUrlResource("", resource, turl); // must be path = "".
|
|
|
|
|
|
|
|
url = turl; // don't use prepare_url() function.
|
|
|
|
path = "";
|
|
|
|
requestHeaders = NULL;
|
|
|
|
responseHeaders.clear();
|
|
|
|
bodydata = new BodyData();
|
|
|
|
|
|
|
|
string date = get_date();
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, string("Date: " + date).c_str());
|
|
|
|
|
|
|
|
if(!S3fsCurl::IsPublicBucket()){
|
|
|
|
requestHeaders = curl_slist_sort_insert(
|
|
|
|
requestHeaders,
|
|
|
|
string("Authorization: AWS " + AWSAccessKeyId + ":" +
|
|
|
|
CalcSignature("GET", "", "", date, resource)).c_str());
|
|
|
|
}
|
|
|
|
|
|
|
|
// setopt
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str());
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_FAILONERROR, true);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)bodydata);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HTTPHEADER, requestHeaders);
|
|
|
|
|
|
|
|
int result = RequestPerform();
|
|
|
|
delete bodydata;
|
|
|
|
bodydata = NULL;
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
int S3fsCurl::ListBucketRequest(const char* tpath, const char* query)
|
|
|
|
{
|
|
|
|
FGPRINT("S3fsCurl::ListBucketRequest [tpath=%s]\n", SAFESTRPTR(tpath));
|
|
|
|
|
|
|
|
if(!tpath){
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if(!CreateCurlHandle(true)){
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
string resource;
|
|
|
|
string turl;
|
|
|
|
MakeUrlResource("", resource, turl); // NOTICE: path is "".
|
|
|
|
if(query){
|
|
|
|
turl += "?";
|
|
|
|
turl += query;
|
|
|
|
}
|
|
|
|
|
|
|
|
url = prepare_url(turl.c_str());
|
|
|
|
path = tpath;
|
|
|
|
requestHeaders = NULL;
|
|
|
|
responseHeaders.clear();
|
|
|
|
bodydata = new BodyData();
|
|
|
|
|
|
|
|
string date = get_date();
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, string("Date: " + date).c_str());
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "Content-Type: ");
|
|
|
|
|
|
|
|
if(!S3fsCurl::IsPublicBucket()){
|
|
|
|
requestHeaders = curl_slist_sort_insert(
|
|
|
|
requestHeaders,
|
|
|
|
string("Authorization: AWS " + AWSAccessKeyId + ":" +
|
|
|
|
CalcSignature("GET", "", "", date, (resource + "/"))).c_str());
|
|
|
|
}
|
|
|
|
|
|
|
|
// setopt
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str());
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)bodydata);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HTTPHEADER, requestHeaders);
|
|
|
|
|
|
|
|
return RequestPerform();
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// Initialize multipart upload
|
|
|
|
//
|
|
|
|
// Example :
|
|
|
|
// POST /example-object?uploads HTTP/1.1
|
|
|
|
// Host: example-bucket.s3.amazonaws.com
|
|
|
|
// Date: Mon, 1 Nov 2010 20:34:56 GMT
|
|
|
|
// Authorization: AWS VGhpcyBtZXNzYWdlIHNpZ25lZCBieSBlbHZpbmc=
|
|
|
|
//
|
|
|
|
int S3fsCurl::PreMultipartPostRequest(const char* tpath, headers_t& meta, string& upload_id, bool ow_sse_flg)
|
|
|
|
{
|
|
|
|
FGPRINT("S3fsCurl::PreMultipartPostRequest [tpath=%s]\n", SAFESTRPTR(tpath));
|
|
|
|
|
|
|
|
if(!tpath){
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if(!CreateCurlHandle(true)){
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
string resource;
|
|
|
|
string turl;
|
|
|
|
MakeUrlResource(get_realpath(tpath).c_str(), resource, turl);
|
|
|
|
|
|
|
|
turl += "?uploads";
|
|
|
|
resource += "?uploads";
|
|
|
|
url = prepare_url(turl.c_str());
|
|
|
|
path = tpath;
|
|
|
|
requestHeaders = NULL;
|
|
|
|
responseHeaders.clear();
|
|
|
|
bodydata = new BodyData();
|
|
|
|
|
|
|
|
string date = get_date();
|
|
|
|
string contype = S3fsCurl::LookupMimeType(string(tpath));
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, string("Date: " + date).c_str());
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "Accept: ");
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "Content-Length: ");
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, string("Content-Type: " + contype).c_str());
|
|
|
|
|
|
|
|
for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){
|
|
|
|
string key = (*iter).first;
|
|
|
|
string value = (*iter).second;
|
|
|
|
|
|
|
|
if(key.substr(0,9) == "x-amz-acl"){
|
|
|
|
// not set value, but after set it.
|
|
|
|
}else if(key.substr(0,10) == "x-amz-meta"){
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, string(key + ":" + value).c_str());
|
|
|
|
}else if(!ow_sse_flg && key == "x-amz-server-side-encryption"){
|
|
|
|
// If ow_sse_flg is false, SSE inherit from meta.
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, string(key + ":" + value).c_str());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// "x-amz-acl", rrs, sse
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, string("x-amz-acl:" + S3fsCurl::default_acl).c_str());
|
|
|
|
if(S3fsCurl::is_use_rrs){
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-storage-class:REDUCED_REDUNDANCY");
|
|
|
|
}
|
|
|
|
if(ow_sse_flg && S3fsCurl::is_use_sse){
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-server-side-encryption:AES256");
|
|
|
|
}
|
|
|
|
if(!S3fsCurl::IsPublicBucket()){
|
|
|
|
requestHeaders = curl_slist_sort_insert(
|
|
|
|
requestHeaders,
|
|
|
|
string("Authorization: AWS " + AWSAccessKeyId + ":" +
|
|
|
|
CalcSignature("POST", "", contype, date, resource)).c_str());
|
|
|
|
}
|
|
|
|
|
|
|
|
// setopt
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str());
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_POST, true); // POST
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)bodydata);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_POSTFIELDSIZE, 0);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HTTPHEADER, requestHeaders);
|
|
|
|
|
|
|
|
// request
|
|
|
|
int result;
|
|
|
|
if(0 != (result = RequestPerform())){
|
|
|
|
delete bodydata;
|
|
|
|
bodydata = NULL;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Parse XML body for UploadId
|
|
|
|
if(!S3fsCurl::GetUploadId(upload_id)){
|
|
|
|
delete bodydata;
|
|
|
|
bodydata = NULL;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
delete bodydata;
|
|
|
|
bodydata = NULL;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int S3fsCurl::CompleteMultipartPostRequest(const char* tpath, string& upload_id, filepartList_t& parts)
|
|
|
|
{
|
|
|
|
FGPRINT("S3fsCurl::CompleteMultipartPostRequest [tpath=%s][parts=%zd]\n", SAFESTRPTR(tpath), parts.size());
|
|
|
|
|
|
|
|
if(!tpath){
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
// make contents
|
|
|
|
string postContent;
|
|
|
|
postContent += "<CompleteMultipartUpload>\n";
|
|
|
|
for(int cnt = 0; cnt < (int)parts.size(); cnt++){
|
|
|
|
if(!parts[cnt].uploaded){
|
|
|
|
FGPRINT("S3fsCurl::CompleteMultipartPostRequest : %d file part is not uploaded.\n", cnt + 1);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
postContent += "<Part>\n";
|
|
|
|
postContent += " <PartNumber>" + IntToStr(cnt + 1) + "</PartNumber>\n";
|
|
|
|
postContent += " <ETag>\"" + parts[cnt].etag + "\"</ETag>\n";
|
|
|
|
postContent += "</Part>\n";
|
|
|
|
}
|
|
|
|
postContent += "</CompleteMultipartUpload>\n";
|
|
|
|
|
|
|
|
// set postdata
|
|
|
|
postdata = reinterpret_cast<const unsigned char*>(postContent.c_str());
|
|
|
|
postdata_remaining = postContent.size(); // without null
|
|
|
|
|
|
|
|
if(!CreateCurlHandle(true)){
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
string resource;
|
|
|
|
string turl;
|
|
|
|
MakeUrlResource(get_realpath(tpath).c_str(), resource, turl);
|
|
|
|
|
|
|
|
turl += "?uploadId=" + upload_id;
|
|
|
|
resource += "?uploadId=" + upload_id;
|
|
|
|
url = prepare_url(turl.c_str());
|
|
|
|
path = tpath;
|
|
|
|
requestHeaders = NULL;
|
|
|
|
responseHeaders.clear();
|
|
|
|
bodydata = new BodyData();
|
|
|
|
|
|
|
|
string date = get_date();
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, string("Date: " + date).c_str());
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "Accept:");
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "Content-Type:");
|
|
|
|
|
|
|
|
if(!S3fsCurl::IsPublicBucket()){
|
|
|
|
requestHeaders = curl_slist_sort_insert(
|
|
|
|
requestHeaders,
|
|
|
|
string("Authorization: AWS " + AWSAccessKeyId + ":" +
|
|
|
|
CalcSignature("POST", "", "", date, resource)).c_str());
|
|
|
|
}
|
|
|
|
|
|
|
|
// setopt
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str());
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HTTPHEADER, requestHeaders);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_POST, true); // POST
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)bodydata);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_POSTFIELDSIZE, (curl_off_t)postdata_remaining);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_READDATA, (void*)this);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_READFUNCTION, S3fsCurl::ReadCallback);
|
|
|
|
|
|
|
|
// request
|
|
|
|
int result = RequestPerform();
|
|
|
|
delete bodydata;
|
|
|
|
bodydata = NULL;
|
|
|
|
postdata = NULL;
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
int S3fsCurl::MultipartListRequest(string& body)
|
|
|
|
{
|
|
|
|
FGPRINT("S3fsCurl::MultipartListRequest\n");
|
|
|
|
|
|
|
|
if(!CreateCurlHandle(true)){
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
string resource;
|
|
|
|
string turl;
|
|
|
|
path = "/";
|
|
|
|
MakeUrlResource(get_realpath(path.c_str()).c_str(), resource, turl);
|
|
|
|
|
|
|
|
turl += "?uploads";
|
|
|
|
resource += "?uploads";
|
|
|
|
url = prepare_url(turl.c_str());
|
|
|
|
requestHeaders = NULL;
|
|
|
|
responseHeaders.clear();
|
|
|
|
bodydata = new BodyData();
|
|
|
|
|
|
|
|
string date = get_date();
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, string("Date: " + date).c_str());
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "Accept: ");
|
|
|
|
|
|
|
|
if(!S3fsCurl::IsPublicBucket()){
|
|
|
|
requestHeaders = curl_slist_sort_insert(
|
|
|
|
requestHeaders,
|
|
|
|
string("Authorization: AWS " + AWSAccessKeyId + ":" +
|
|
|
|
CalcSignature("GET", "", "", date, resource)).c_str());
|
|
|
|
}
|
|
|
|
|
|
|
|
// setopt
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str());
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)bodydata);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HTTPHEADER, requestHeaders);
|
|
|
|
|
|
|
|
int result;
|
|
|
|
if(0 == (result = RequestPerform()) && 0 < bodydata->size()){
|
|
|
|
body = bodydata->str();
|
|
|
|
}else{
|
|
|
|
body = "";
|
|
|
|
}
|
|
|
|
delete bodydata;
|
|
|
|
bodydata = NULL;
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// PUT /ObjectName?partNumber=PartNumber&uploadId=UploadId HTTP/1.1
|
|
|
|
// Host: BucketName.s3.amazonaws.com
|
|
|
|
// Date: date
|
|
|
|
// Content-Length: Size
|
|
|
|
// Authorization: Signature
|
|
|
|
//
|
|
|
|
// PUT /my-movie.m2ts?partNumber=1&uploadId=VCVsb2FkIElEIGZvciBlbZZpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZR HTTP/1.1
|
|
|
|
// Host: example-bucket.s3.amazonaws.com
|
|
|
|
// Date: Mon, 1 Nov 2010 20:34:56 GMT
|
|
|
|
// Content-Length: 10485760
|
|
|
|
// Content-MD5: pUNXr/BjKK5G2UKvaRRrOA==
|
|
|
|
// Authorization: AWS VGhpcyBtZXNzYWdlIHNpZ25lZGGieSRlbHZpbmc=
|
|
|
|
//
|
|
|
|
int S3fsCurl::UploadMultipartPostRequest(const char* tpath, const char* part_path, int part_num, string& upload_id, string& ETag)
|
|
|
|
{
|
|
|
|
int part_fd;
|
|
|
|
FILE* part_file;
|
|
|
|
struct stat st;
|
|
|
|
string md5;
|
|
|
|
|
|
|
|
FGPRINT("S3fsCurl::UploadMultipartPostRequest [tpath=%s][fpath=%s][part=%d]\n", SAFESTRPTR(tpath), SAFESTRPTR(part_path), part_num);
|
|
|
|
|
|
|
|
// make md5 and file pointer
|
|
|
|
if(-1 == (part_fd = open(part_path, O_RDONLY))){
|
|
|
|
FGPRINT("S3fsCurl::UploadMultipartPostRequest : Could not open file(%s) - errorno(%d)\n", part_path, errno);
|
|
|
|
SYSLOGERR("Could not open file(%s) - errno(%d)", part_path, errno);
|
|
|
|
return -errno;
|
|
|
|
}
|
|
|
|
if(-1 == fstat(part_fd, &st)){
|
|
|
|
FGPRINT("S3fsCurl::UploadMultipartPostRequest: Invalid file(%s) discriptor(errno=%d)\n", part_path, errno);
|
|
|
|
SYSLOGERR("Invalid file(%s) discriptor(errno=%d)", part_path, errno);
|
|
|
|
close(part_fd);
|
|
|
|
return -errno;
|
|
|
|
}
|
|
|
|
md5 = md5sum(part_fd);
|
|
|
|
if(md5.empty()){
|
|
|
|
FGPRINT("S3fsCurl::UploadMultipartPostRequest: Could not make md5 for file(%s)\n", part_path);
|
|
|
|
SYSLOGERR("Could not make md5 for file(%s)", part_path);
|
|
|
|
close(part_fd);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if(NULL == (part_file = fdopen(part_fd, "rb"))){
|
|
|
|
FGPRINT("S3fsCurl::UploadMultipartPostRequest: Invalid file(%s) discriptor(errno=%d)\n", part_path, errno);
|
|
|
|
SYSLOGERR("Invalid file(%s) discriptor(errno=%d)", part_path, errno);
|
|
|
|
close(part_fd);
|
|
|
|
return -errno;
|
|
|
|
}
|
|
|
|
|
|
|
|
if(!CreateCurlHandle(true)){
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
string urlargs = "?partNumber=" + IntToStr(part_num) + "&uploadId=" + upload_id;
|
|
|
|
string resource;
|
|
|
|
string turl;
|
|
|
|
MakeUrlResource(get_realpath(tpath).c_str(), resource, turl);
|
|
|
|
|
|
|
|
resource += urlargs;
|
|
|
|
turl += urlargs;
|
|
|
|
url = prepare_url(turl.c_str());
|
|
|
|
path = tpath;
|
|
|
|
requestHeaders = NULL;
|
|
|
|
responseHeaders.clear();
|
|
|
|
bodydata = new BodyData();
|
|
|
|
headdata = new BodyData();
|
|
|
|
|
|
|
|
string date = get_date();
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, string("Date: " + date).c_str());
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "Accept: ");
|
|
|
|
|
|
|
|
if(!S3fsCurl::IsPublicBucket()){
|
|
|
|
requestHeaders = curl_slist_sort_insert(
|
|
|
|
requestHeaders,
|
|
|
|
string("Authorization: AWS " + AWSAccessKeyId + ":" +
|
|
|
|
CalcSignature("PUT", "", "", date, resource)).c_str());
|
|
|
|
}
|
|
|
|
|
|
|
|
// setopt
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str());
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_UPLOAD, true); // HTTP PUT
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)bodydata);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HEADERDATA, (void*)headdata);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HEADERFUNCTION, WriteMemoryCallback);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_INFILESIZE_LARGE, static_cast<curl_off_t>(st.st_size)); // Content-Length
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_INFILE, part_file);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HTTPHEADER, requestHeaders);
|
|
|
|
|
|
|
|
// request
|
|
|
|
int result = RequestPerform();
|
|
|
|
if(NULL != strstr(headdata->str(), md5.c_str())){
|
|
|
|
ETag = md5;
|
|
|
|
}
|
|
|
|
// closing
|
|
|
|
fclose(part_file);
|
|
|
|
delete bodydata;
|
|
|
|
bodydata = NULL;
|
|
|
|
delete headdata;
|
|
|
|
headdata = NULL;
|
|
|
|
|
|
|
|
if(0 != result){
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int S3fsCurl::CopyMultipartPostRequest(const char* from, const char* to, int part_num, string& upload_id, headers_t& meta, string& ETag, bool ow_sse_flg)
|
|
|
|
{
|
|
|
|
FGPRINT("S3fsCurl::CopyMultipartPostRequest [from=%s][to=%s][part=%d]\n", SAFESTRPTR(from), SAFESTRPTR(to), part_num);
|
|
|
|
|
|
|
|
if(!from || !to){
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if(!CreateCurlHandle(true)){
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
string urlargs = "?partNumber=" + IntToStr(part_num) + "&uploadId=" + upload_id;
|
|
|
|
string resource;
|
|
|
|
string turl;
|
|
|
|
MakeUrlResource(get_realpath(to).c_str(), resource, turl);
|
|
|
|
|
|
|
|
resource += urlargs;
|
|
|
|
turl += urlargs;
|
|
|
|
url = prepare_url(turl.c_str());
|
|
|
|
path = to;
|
|
|
|
requestHeaders = NULL;
|
|
|
|
responseHeaders.clear();
|
|
|
|
bodydata = new BodyData();
|
|
|
|
headdata = new BodyData();
|
|
|
|
|
|
|
|
// Make request headers
|
|
|
|
string date = get_date();
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, string("Date: " + date).c_str());
|
|
|
|
|
|
|
|
string ContentType;
|
|
|
|
for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){
|
|
|
|
string key = (*iter).first;
|
|
|
|
string value = (*iter).second;
|
|
|
|
if(key == "Content-Type"){
|
|
|
|
ContentType = value;
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, string(key + ":" + value).c_str());
|
|
|
|
}else if(key == "x-amz-copy-source"){
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, string(key + ":" + value).c_str());
|
|
|
|
}else if(key == "x-amz-copy-source-range"){
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, string(key + ":" + value).c_str());
|
|
|
|
}else if(key.substr(0,9) == "x-amz-acl"){
|
|
|
|
// not set value, but after set it.
|
|
|
|
}else if(!ow_sse_flg && key == "x-amz-server-side-encryption"){
|
|
|
|
// If ow_sse_flg is false, SSE inherit from meta.
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, string(key + ":" + value).c_str());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// "x-amz-acl", rrs, sse
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, string("x-amz-acl:" + S3fsCurl::default_acl).c_str());
|
|
|
|
if(S3fsCurl::is_use_rrs){
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-storage-class:REDUCED_REDUNDANCY");
|
|
|
|
}
|
|
|
|
if(ow_sse_flg && S3fsCurl::is_use_sse){
|
|
|
|
requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-server-side-encryption:AES256");
|
|
|
|
}
|
|
|
|
if(!S3fsCurl::IsPublicBucket()){
|
|
|
|
requestHeaders = curl_slist_sort_insert(
|
|
|
|
requestHeaders,
|
|
|
|
string("Authorization: AWS " + AWSAccessKeyId + ":" +
|
|
|
|
CalcSignature("PUT", "", ContentType, date, resource)).c_str());
|
|
|
|
}
|
|
|
|
|
|
|
|
// setopt
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str());
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_UPLOAD, true); // HTTP PUT
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)bodydata);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HEADERDATA, (void*)headdata);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HEADERFUNCTION, WriteMemoryCallback);
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_INFILESIZE, 0); // Content-Length
|
|
|
|
curl_easy_setopt(hCurl, CURLOPT_HTTPHEADER, requestHeaders);
|
|
|
|
|
|
|
|
// request
|
|
|
|
FGPRINT(" copying... [from=%s][to=%s][part=%d]\n", from, to, part_num);
|
|
|
|
SYSLOGDBG("copy path from=%s, to=%s, part=%d", from, to, part_num);
|
|
|
|
|
|
|
|
int result = RequestPerform();
|
|
|
|
if(0 == result){
|
|
|
|
const char* start_etag= strstr(bodydata->str(), "ETag");
|
|
|
|
const char* end_etag = strstr(bodydata->str(), "/ETag>");
|
|
|
|
ETag.assign((start_etag + 11), (size_t)(end_etag - (start_etag + 11) - 7));
|
|
|
|
}
|
|
|
|
delete bodydata;
|
|
|
|
bodydata = NULL;
|
|
|
|
delete headdata;
|
|
|
|
headdata = NULL;
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
int S3fsCurl::MultipartHeadRequest(const char* tpath, off_t size, headers_t& meta, bool ow_sse_flg)
|
|
|
|
{
|
|
|
|
int result;
|
|
|
|
string upload_id;
|
|
|
|
off_t chunk;
|
|
|
|
off_t bytes_remaining;
|
|
|
|
filepart part;
|
|
|
|
filepartList_t list;
|
|
|
|
stringstream strrange;
|
|
|
|
|
|
|
|
FGPRINT("S3fsCurl::MultipartHeadRequest [tpath=%s]\n", SAFESTRPTR(tpath));
|
|
|
|
|
|
|
|
if(0 != (result = PreMultipartPostRequest(tpath, meta, upload_id, ow_sse_flg))){
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
DestroyCurlHandle();
|
|
|
|
|
|
|
|
part.uploaded = true;
|
|
|
|
part.partfile = "";
|
|
|
|
for(bytes_remaining = size, chunk = 0; 0 < bytes_remaining; bytes_remaining -= chunk){
|
|
|
|
chunk = bytes_remaining > MAX_MULTI_COPY_SOURCE_SIZE ? MAX_MULTI_COPY_SOURCE_SIZE : bytes_remaining;
|
|
|
|
|
|
|
|
strrange << "bytes=" << (size - bytes_remaining) << "-" << (size - bytes_remaining + chunk - 1);
|
|
|
|
meta["x-amz-copy-source-range"] = strrange.str();
|
|
|
|
strrange.clear(stringstream::goodbit);
|
|
|
|
|
|
|
|
if(0 != (result = CopyMultipartPostRequest(tpath, tpath, (list.size() + 1), upload_id, meta, part.etag, ow_sse_flg))){
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
DestroyCurlHandle();
|
|
|
|
list.push_back(part);
|
|
|
|
}
|
|
|
|
|
|
|
|
if(0 != (result = CompleteMultipartPostRequest(tpath, upload_id, list))){
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int S3fsCurl::MultipartUploadRequest(const char* tpath, headers_t& meta, int fd, bool ow_sse_flg)
|
|
|
|
{
|
|
|
|
int result;
|
|
|
|
string upload_id;
|
|
|
|
struct stat st;
|
|
|
|
int fd2;
|
|
|
|
FILE* file;
|
|
|
|
int partfd;
|
|
|
|
FILE* partfile;
|
|
|
|
filepart part;
|
|
|
|
filepartList_t list;
|
|
|
|
off_t remaining_bytes;
|
|
|
|
off_t chunk;
|
|
|
|
unsigned char* buf;
|
|
|
|
char tmpfile[256];
|
|
|
|
|
|
|
|
FGPRINT("S3fsCurl::MultipartUploadRequest [tpath=%s][fd=%d]\n", SAFESTRPTR(tpath), fd);
|
|
|
|
|
|
|
|
// duplicate fd
|
|
|
|
if(-1 == (fd2 = dup(fd)) || 0 != lseek(fd2, 0, SEEK_SET) || NULL == (file = fdopen(fd2, "rb"))){
|
|
|
|
FGPRINT("S3fsCurl::MultipartUploadRequest : Cloud not duplicate file discriptor(errno=%d)\n", errno);
|
|
|
|
SYSLOGERR("Cloud not duplicate file discriptor(errno=%d)", errno);
|
2013-07-05 06:36:11 +00:00
|
|
|
if(-1 != fd2){
|
|
|
|
close(fd2);
|
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
return -errno;
|
|
|
|
}
|
|
|
|
if(-1 == fstat(fd2, &st)){
|
|
|
|
FGPRINT("S3fsCurl::MultipartUploadRequest: Invalid file discriptor(errno=%d)\n", errno);
|
|
|
|
SYSLOGERR("Invalid file discriptor(errno=%d)", errno);
|
2013-07-05 06:36:11 +00:00
|
|
|
fclose(file);
|
2013-07-05 02:28:31 +00:00
|
|
|
return -errno;
|
|
|
|
}
|
|
|
|
|
|
|
|
// make Tempolary buf(maximum size + 4)
|
|
|
|
if(NULL == (buf = (unsigned char*)malloc(sizeof(unsigned char) * (MULTIPART_SIZE + 4)))){
|
|
|
|
SYSLOGCRIT("Could not allocate memory for buffer\n");
|
2013-07-05 06:36:11 +00:00
|
|
|
fclose(file);
|
2013-07-05 02:28:31 +00:00
|
|
|
S3FS_FUSE_EXIT();
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
if(0 != (result = PreMultipartPostRequest(tpath, meta, upload_id, ow_sse_flg))){
|
2013-07-05 06:36:11 +00:00
|
|
|
free(buf);
|
|
|
|
fclose(file);
|
2013-07-05 02:28:31 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
DestroyCurlHandle();
|
|
|
|
|
|
|
|
// cycle through open fd, pulling off 10MB chunks at a time
|
|
|
|
for(remaining_bytes = st.st_size; 0 < remaining_bytes; remaining_bytes -= chunk){
|
|
|
|
off_t copy_total;
|
|
|
|
off_t copied;
|
|
|
|
chunk = remaining_bytes > MULTIPART_SIZE ? MULTIPART_SIZE : remaining_bytes;
|
|
|
|
|
|
|
|
// copy the file portion into the buffer
|
|
|
|
for(copy_total = 0; copy_total < chunk; copy_total += copied){
|
|
|
|
copied = fread(&buf[copy_total], sizeof(unsigned char), (chunk - copy_total), file);
|
|
|
|
if(copied != (chunk - copy_total)){
|
|
|
|
if(0 != ferror(file) || feof(file)){
|
|
|
|
FGPRINT("S3fsCurl::MultipartUploadRequest: read file error(%d)\n", ferror(file));
|
|
|
|
SYSLOGERR("read file error(%d)", ferror(file));
|
|
|
|
free(buf);
|
|
|
|
fclose(file);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// create uniq temporary file
|
|
|
|
strncpy(tmpfile, "/tmp/s3fs.XXXXXX", sizeof(tmpfile));
|
|
|
|
if(-1 == (partfd = mkstemp(tmpfile))){
|
|
|
|
FGPRINT("S3fsCurl::MultipartUploadRequest: Could not open tempolary file(%s) - errno(%d)\n", tmpfile, errno);
|
|
|
|
SYSLOGERR("Could not open tempolary file(%s) - errno(%d)", tmpfile, errno);
|
|
|
|
free(buf);
|
|
|
|
fclose(file);
|
|
|
|
return -errno;
|
|
|
|
}
|
|
|
|
if(NULL == (partfile = fdopen(partfd, "wb"))){
|
|
|
|
FGPRINT("S3fsCurl::MultipartUploadRequest: Could not open tempolary file(%s) - errno(%d)\n", tmpfile, errno);
|
|
|
|
SYSLOGERR("Could not open tempolary file(%s) - errno(%d)", tmpfile, errno);
|
|
|
|
free(buf);
|
|
|
|
fclose(file);
|
|
|
|
close(partfd);
|
|
|
|
return -errno;
|
|
|
|
}
|
|
|
|
|
|
|
|
// copy buffer to temporary file
|
|
|
|
for(copy_total = 0; copy_total < chunk; copy_total += copied){
|
|
|
|
copied = fwrite(&buf[copy_total], sizeof(unsigned char), (chunk - copy_total), partfile);
|
|
|
|
if(copied != (chunk - copy_total)){
|
|
|
|
if(0 != ferror(partfile)){
|
|
|
|
FGPRINT("S3fsCurl::MultipartUploadRequest: write file error(%d)\n", ferror(partfile));
|
|
|
|
SYSLOGERR("write file error(%d)", ferror(partfile));
|
|
|
|
free(buf);
|
|
|
|
fclose(file);
|
|
|
|
fclose(partfile);
|
|
|
|
remove(tmpfile);
|
|
|
|
return -EIO;
|
2013-03-30 13:37:14 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
fclose(partfile);
|
|
|
|
|
|
|
|
// upload part
|
|
|
|
if(0 != (result = UploadMultipartPostRequest(tpath, tmpfile, (list.size() + 1), upload_id, part.etag))){
|
|
|
|
FGPRINT("S3fsCurl::MultipartUploadRequest: failed uploading part(%d)\n", result);
|
|
|
|
SYSLOGERR("failed uploading part(%d)", result);
|
|
|
|
free(buf);
|
|
|
|
fclose(file);
|
|
|
|
remove(tmpfile);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
remove(tmpfile);
|
|
|
|
DestroyCurlHandle();
|
|
|
|
|
|
|
|
part.uploaded = true;
|
|
|
|
part.partfile = tmpfile;
|
|
|
|
list.push_back(part);
|
2013-03-30 13:37:14 +00:00
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
free(buf);
|
|
|
|
fclose(file);
|
|
|
|
|
|
|
|
if(0 != (result = CompleteMultipartPostRequest(tpath, upload_id, list))){
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
return 0;
|
2013-03-30 13:37:14 +00:00
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
int S3fsCurl::MultipartRenameRequest(const char* from, const char* to, headers_t& meta, off_t size)
|
2013-03-30 13:37:14 +00:00
|
|
|
{
|
2013-07-05 02:28:31 +00:00
|
|
|
int result;
|
|
|
|
string upload_id;
|
|
|
|
off_t chunk;
|
|
|
|
off_t bytes_remaining;
|
|
|
|
filepart part;
|
|
|
|
filepartList_t list;
|
|
|
|
stringstream strrange;
|
2013-03-30 13:37:14 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
FGPRINT("S3fsCurl::MultipartRenameRequest [from=%s][to=%s]\n", SAFESTRPTR(from), SAFESTRPTR(to));
|
|
|
|
|
|
|
|
string srcresource;
|
|
|
|
string srcurl;
|
|
|
|
MakeUrlResource(get_realpath(from).c_str(), srcresource, srcurl);
|
|
|
|
|
|
|
|
meta["Content-Type"] = S3fsCurl::LookupMimeType(string(to));
|
|
|
|
meta["x-amz-copy-source"] = srcresource;
|
|
|
|
|
|
|
|
if(0 != (result = PreMultipartPostRequest(to, meta, upload_id, false))){
|
2013-03-30 13:37:14 +00:00
|
|
|
return result;
|
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
DestroyCurlHandle();
|
|
|
|
|
|
|
|
part.uploaded = true;
|
|
|
|
part.partfile = "";
|
|
|
|
for(bytes_remaining = size, chunk = 0; 0 < bytes_remaining; bytes_remaining -= chunk){
|
|
|
|
chunk = bytes_remaining > MAX_MULTI_COPY_SOURCE_SIZE ? MAX_MULTI_COPY_SOURCE_SIZE : bytes_remaining;
|
|
|
|
|
|
|
|
strrange << "bytes=" << (size - bytes_remaining) << "-" << (size - bytes_remaining + chunk - 1);
|
|
|
|
meta["x-amz-copy-source-range"] = strrange.str();
|
|
|
|
strrange.clear(stringstream::goodbit);
|
|
|
|
|
|
|
|
if(0 != (result = CopyMultipartPostRequest(from, to, list.size(), upload_id, meta, part.etag, false))){
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
DestroyCurlHandle();
|
|
|
|
list.push_back(part);
|
2013-03-30 13:37:14 +00:00
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
|
|
|
|
if(0 != (result = CompleteMultipartPostRequest(to, upload_id, list))){
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
//-------------------------------------------------------------------
|
|
|
|
// Class S3fsMultiCurl
|
|
|
|
//-------------------------------------------------------------------
|
|
|
|
#define MAX_MULTI_HEADREQ 500 // default: max request count in readdir curl_multi.
|
|
|
|
|
|
|
|
//-------------------------------------------------------------------
|
|
|
|
// Class method for S3fsMultiCurl
|
|
|
|
//-------------------------------------------------------------------
|
|
|
|
int S3fsMultiCurl::max_multireq = MAX_MULTI_HEADREQ;
|
|
|
|
|
|
|
|
int S3fsMultiCurl::SetMaxMultiRequest(int max)
|
|
|
|
{
|
|
|
|
int old = S3fsMultiCurl::max_multireq;
|
|
|
|
S3fsMultiCurl::max_multireq= max;
|
|
|
|
return old;
|
|
|
|
}
|
|
|
|
|
|
|
|
//-------------------------------------------------------------------
|
|
|
|
// method for S3fsMultiCurl
|
|
|
|
//-------------------------------------------------------------------
|
|
|
|
S3fsMultiCurl::S3fsMultiCurl() : hMulti(NULL), SuccessCallback(NULL), RetryCallback(NULL)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
S3fsMultiCurl::~S3fsMultiCurl()
|
|
|
|
{
|
|
|
|
Clear();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool S3fsMultiCurl::Clear(void)
|
|
|
|
{
|
|
|
|
if(hMulti){
|
|
|
|
curl_multi_cleanup(hMulti);
|
|
|
|
hMulti = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
s3fscurlmap_t::iterator iter;
|
|
|
|
for(iter = cMap_all.begin(); iter != cMap_all.end(); iter++){
|
|
|
|
S3fsCurl* s3fscurl = (*iter).second;
|
|
|
|
s3fscurl->DestroyCurlHandle();
|
|
|
|
delete s3fscurl;
|
|
|
|
}
|
|
|
|
cMap_all.clear();
|
|
|
|
|
|
|
|
for(iter = cMap_req.begin(); iter != cMap_req.end(); iter++){
|
|
|
|
S3fsCurl* s3fscurl = (*iter).second;
|
|
|
|
s3fscurl->DestroyCurlHandle();
|
|
|
|
delete s3fscurl;
|
|
|
|
}
|
|
|
|
cMap_req.clear();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
S3fsMultiSuccessCallback S3fsMultiCurl::SetSuccessCallback(S3fsMultiSuccessCallback function)
|
|
|
|
{
|
|
|
|
S3fsMultiSuccessCallback old = SuccessCallback;
|
|
|
|
SuccessCallback = function;
|
|
|
|
return old;
|
|
|
|
}
|
|
|
|
|
|
|
|
S3fsMultiRetryCallback S3fsMultiCurl::SetRetryCallback(S3fsMultiRetryCallback function)
|
|
|
|
{
|
|
|
|
S3fsMultiRetryCallback old = RetryCallback;
|
|
|
|
RetryCallback = function;
|
|
|
|
return old;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool S3fsMultiCurl::SetS3fsCurlObject(S3fsCurl* s3fscurl)
|
|
|
|
{
|
|
|
|
if(!s3fscurl){
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if(cMap_all.end() != cMap_all.find(s3fscurl->hCurl)){
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
cMap_all[s3fscurl->hCurl] = s3fscurl;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
int S3fsMultiCurl::MultiPerform(void)
|
|
|
|
{
|
|
|
|
CURLMcode curlm_code;
|
|
|
|
int still_running;
|
|
|
|
|
|
|
|
if(!hMulti){
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Send multi request.
|
|
|
|
do{
|
|
|
|
// Start making requests and check running.
|
|
|
|
still_running = 0;
|
|
|
|
do {
|
|
|
|
curlm_code = curl_multi_perform(hMulti, &still_running);
|
|
|
|
} while(curlm_code == CURLM_CALL_MULTI_PERFORM);
|
|
|
|
|
|
|
|
if(curlm_code != CURLM_OK) {
|
|
|
|
FGPRINT("S3fsMultiCurl::MultiPerform: curl_multi_perform code: %d msg: %s\n", curlm_code, curl_multi_strerror(curlm_code));
|
|
|
|
SYSLOGERR("curl_multi_perform code: %d msg: %s", curlm_code, curl_multi_strerror(curlm_code));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set timer when still running
|
|
|
|
if(still_running) {
|
|
|
|
long milliseconds;
|
|
|
|
fd_set r_fd;
|
|
|
|
fd_set w_fd;
|
|
|
|
fd_set e_fd;
|
|
|
|
FD_ZERO(&r_fd);
|
|
|
|
FD_ZERO(&w_fd);
|
|
|
|
FD_ZERO(&e_fd);
|
|
|
|
|
|
|
|
if(CURLM_OK != (curlm_code = curl_multi_timeout(hMulti, &milliseconds))){
|
|
|
|
FGPRINT("S3fsMultiCurl::MultiPerform: curl_multi_timeout code: %d msg: %s\n", curlm_code, curl_multi_strerror(curlm_code));
|
|
|
|
SYSLOGERR("curl_multi_timeout code: %d msg: %s", curlm_code, curl_multi_strerror(curlm_code));
|
|
|
|
}
|
|
|
|
if(milliseconds < 0){
|
|
|
|
milliseconds = 50;
|
|
|
|
}
|
|
|
|
if(milliseconds > 0) {
|
|
|
|
int max_fd;
|
|
|
|
struct timeval timeout;
|
|
|
|
timeout.tv_sec = 1000 * milliseconds / 1000000;
|
|
|
|
timeout.tv_usec = 1000 * milliseconds % 1000000;
|
|
|
|
|
|
|
|
if(CURLM_OK != (curlm_code = curl_multi_fdset(hMulti, &r_fd, &w_fd, &e_fd, &max_fd))){
|
|
|
|
FGPRINT("S3fsMultiCurl::MultiPerform: curl_multi_fdset code: %d msg: %s\n", curlm_code, curl_multi_strerror(curlm_code));
|
|
|
|
SYSLOGERR("curl_multi_fdset code: %d msg: %s", curlm_code, curl_multi_strerror(curlm_code));
|
|
|
|
return -EIO;
|
2013-03-30 13:37:14 +00:00
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
if(-1 == select(max_fd + 1, &r_fd, &w_fd, &e_fd, &timeout)){
|
|
|
|
FGPRINT("S3fsMultiCurl::MultiPerform: failed select - errno(%d)\n", errno);
|
|
|
|
SYSLOGERR("failed select - errno(%d)", errno);
|
|
|
|
return -errno;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}while(still_running);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int S3fsMultiCurl::MultiRead(void)
|
|
|
|
{
|
|
|
|
CURLMsg* msg;
|
|
|
|
int remaining_messages;
|
|
|
|
CURL* hCurl = NULL;
|
|
|
|
S3fsCurl* s3fscurl = NULL;
|
|
|
|
S3fsCurl* retrycurl= NULL;
|
|
|
|
|
|
|
|
while(NULL != (msg = curl_multi_info_read(hMulti, &remaining_messages))){
|
|
|
|
if(CURLMSG_DONE != msg->msg){
|
|
|
|
FGPRINT("S3fsMultiCurl::MultiRead: curl_multi_info_read code: %d\n", msg->msg);
|
|
|
|
SYSLOGERR("curl_multi_info_read code: %d", msg->msg);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
hCurl = msg->easy_handle;
|
|
|
|
s3fscurl = cMap_req[hCurl];
|
|
|
|
retrycurl= NULL;
|
|
|
|
|
|
|
|
if(CURLE_OK == msg->data.result && s3fscurl){
|
|
|
|
long responseCode;
|
|
|
|
if(s3fscurl->GetResponseCode(responseCode) && 400 > responseCode){
|
|
|
|
// add into stat cache
|
|
|
|
if(SuccessCallback && !SuccessCallback(s3fscurl)){
|
|
|
|
FGPRINT("S3fsMultiCurl::MultiRead: error from callback function(%s).\n", s3fscurl->base_path.c_str());
|
|
|
|
}
|
|
|
|
}else{
|
|
|
|
// This case is directory object("dir", "non dir object", "_$folder$", etc)
|
|
|
|
//FGPRINT("S3fsMultiCurl::MultiRead: failed a request(%s)\n", s3fscurl->base_path.c_str());
|
|
|
|
}
|
|
|
|
|
|
|
|
}else{
|
|
|
|
FGPRINT("S3fsMultiCurl::MultiRead: failed to read(remaining: %i code: %d msg: %s), so retry this.\n",
|
|
|
|
remaining_messages, msg->data.result, curl_easy_strerror(msg->data.result));
|
|
|
|
SYSLOGDBGERR("failed to read(remaining: %i code: %d msg: %s), so retry this.",
|
|
|
|
remaining_messages, msg->data.result, curl_easy_strerror(msg->data.result));
|
|
|
|
|
|
|
|
// For retry
|
|
|
|
if(RetryCallback){
|
|
|
|
retrycurl = RetryCallback(s3fscurl);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Cleanup this curl object and set retrying object(if there is).
|
|
|
|
curl_multi_remove_handle(hMulti, hCurl);
|
|
|
|
cMap_req.erase(hCurl);
|
|
|
|
if(s3fscurl && s3fscurl != retrycurl){
|
|
|
|
delete s3fscurl; // with destroy curl handle.
|
|
|
|
}
|
|
|
|
if(retrycurl){
|
|
|
|
cMap_all[retrycurl->hCurl] = retrycurl;
|
|
|
|
}
|
2013-03-30 13:37:14 +00:00
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2013-03-30 13:37:14 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
int S3fsMultiCurl::Request(void)
|
|
|
|
{
|
|
|
|
int result;
|
|
|
|
CURLMcode curlm_code;
|
|
|
|
|
|
|
|
FGPRINT("S3fsMultiCurl::Request[count=%ld]\n", cMap_all.size());
|
|
|
|
|
|
|
|
if(hMulti){
|
|
|
|
Clear();
|
2013-03-30 13:37:14 +00:00
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
// Make request list.
|
|
|
|
//
|
|
|
|
// Send multi request loop( with retry )
|
|
|
|
// (When many request is sends, sometimes gets "Couldn't connect to server")
|
|
|
|
//
|
|
|
|
while(0 < cMap_all.size()){
|
|
|
|
// populate the multi interface with an initial set of requests
|
|
|
|
if(NULL == (hMulti = curl_multi_init())){
|
|
|
|
Clear();
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
// set curl handle to multi handle
|
|
|
|
int cnt;
|
|
|
|
s3fscurlmap_t::iterator iter;
|
|
|
|
for(cnt = 0, iter = cMap_all.begin(); cnt < S3fsMultiCurl::max_multireq && iter != cMap_all.end(); cMap_all.erase(iter++), cnt++){
|
|
|
|
CURL* hCurl = (*iter).first;
|
|
|
|
S3fsCurl* s3fscurl = (*iter).second;
|
|
|
|
|
|
|
|
if(CURLM_OK != (curlm_code = curl_multi_add_handle(hMulti, hCurl))){
|
|
|
|
FGPRINT("S3fsMultiCurl::Request: curl_multi_add_handle code: %d msg: %s\n", curlm_code, curl_multi_strerror(curlm_code));
|
|
|
|
SYSLOGERR("curl_multi_add_handle code: %d msg: %s", curlm_code, curl_multi_strerror(curlm_code));
|
|
|
|
Clear();
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
cMap_req[hCurl] = s3fscurl;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Send multi request.
|
|
|
|
if(0 != (result = MultiPerform())){
|
|
|
|
Clear();
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read the result
|
|
|
|
if(0 != (result = MultiRead())){
|
|
|
|
Clear();
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
// cleanup
|
|
|
|
curl_multi_cleanup(hMulti);
|
2013-03-30 13:37:14 +00:00
|
|
|
}
|
2013-07-05 02:28:31 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2013-03-30 13:37:14 +00:00
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
//-------------------------------------------------------------------
|
|
|
|
// Utility functions
|
|
|
|
//-------------------------------------------------------------------
|
|
|
|
string GetContentMD5(int fd)
|
|
|
|
{
|
|
|
|
BIO* b64;
|
|
|
|
BIO* bmem;
|
|
|
|
BUF_MEM* bptr;
|
|
|
|
string Signature;
|
|
|
|
unsigned char* md5hex;
|
|
|
|
|
|
|
|
if(NULL == (md5hex = md5hexsum(fd))){
|
|
|
|
return string("");
|
|
|
|
}
|
|
|
|
|
|
|
|
b64 = BIO_new(BIO_f_base64());
|
|
|
|
bmem = BIO_new(BIO_s_mem());
|
|
|
|
b64 = BIO_push(b64, bmem);
|
|
|
|
|
|
|
|
BIO_write(b64, md5hex, MD5_DIGEST_LENGTH);
|
|
|
|
free(md5hex);
|
|
|
|
if(1 != BIO_flush(b64)){
|
|
|
|
BIO_free_all(b64);
|
|
|
|
return string("");
|
|
|
|
}
|
|
|
|
BIO_get_mem_ptr(b64, &bptr);
|
|
|
|
|
|
|
|
Signature.resize(bptr->length - 1);
|
|
|
|
memcpy(&Signature[0], bptr->data, bptr->length - 1);
|
|
|
|
|
|
|
|
BIO_free_all(b64);
|
|
|
|
|
|
|
|
return Signature;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned char* md5hexsum(int fd)
|
|
|
|
{
|
|
|
|
MD5_CTX c;
|
|
|
|
char buf[512];
|
|
|
|
ssize_t bytes;
|
|
|
|
unsigned char* result = (unsigned char*)malloc(MD5_DIGEST_LENGTH);
|
|
|
|
|
|
|
|
// seek to top of file.
|
|
|
|
if(-1 == lseek(fd, 0, SEEK_SET)){
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(buf, 0, 512);
|
|
|
|
MD5_Init(&c);
|
|
|
|
while((bytes = read(fd, buf, 512)) > 0) {
|
|
|
|
MD5_Update(&c, buf, bytes);
|
|
|
|
memset(buf, 0, 512);
|
|
|
|
}
|
|
|
|
MD5_Final(result, &c);
|
|
|
|
|
|
|
|
if(-1 == lseek(fd, 0, SEEK_SET)){
|
|
|
|
free(result);
|
|
|
|
return NULL;
|
2013-03-30 13:37:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2013-07-05 02:28:31 +00:00
|
|
|
string md5sum(int fd)
|
|
|
|
{
|
|
|
|
char md5[2 * MD5_DIGEST_LENGTH + 1];
|
|
|
|
char hexbuf[3];
|
|
|
|
unsigned char* md5hex;
|
|
|
|
|
|
|
|
if(NULL == (md5hex = md5hexsum(fd))){
|
|
|
|
return string("");
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(md5, 0, 2 * MD5_DIGEST_LENGTH + 1);
|
|
|
|
for(int i = 0; i < MD5_DIGEST_LENGTH; i++) {
|
|
|
|
snprintf(hexbuf, 3, "%02x", md5hex[i]);
|
|
|
|
strncat(md5, hexbuf, 2);
|
|
|
|
}
|
|
|
|
free(md5hex);
|
|
|
|
|
|
|
|
return string(md5);
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// curl_slist_sort_insert
|
|
|
|
// This function is like curl_slist_append function, but this adds data by a-sorting.
|
|
|
|
// Because AWS signature needs sorted header.
|
|
|
|
//
|
|
|
|
struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* data)
|
|
|
|
{
|
|
|
|
struct curl_slist* curpos;
|
|
|
|
struct curl_slist* lastpos;
|
|
|
|
struct curl_slist* new_item;
|
|
|
|
|
|
|
|
if(!data){
|
|
|
|
return list;
|
|
|
|
}
|
|
|
|
if(NULL == (new_item = (struct curl_slist*)malloc(sizeof(struct curl_slist)))){
|
|
|
|
return list;
|
|
|
|
}
|
|
|
|
if(NULL == (new_item->data = strdup(data))){
|
|
|
|
free(new_item);
|
|
|
|
return list;
|
|
|
|
}
|
|
|
|
new_item->next = NULL;
|
|
|
|
|
|
|
|
for(lastpos = NULL, curpos = list; curpos; curpos = curpos->next){
|
|
|
|
int result = strcmp(data, curpos->data);
|
|
|
|
if(0 == result){
|
|
|
|
// same data, so replace it.
|
|
|
|
if(lastpos){
|
|
|
|
lastpos->next = new_item;
|
|
|
|
}else{
|
|
|
|
list = new_item;
|
|
|
|
}
|
|
|
|
new_item->next = curpos->next;
|
2013-07-05 05:41:46 +00:00
|
|
|
free(curpos->data);
|
2013-07-05 02:28:31 +00:00
|
|
|
free(curpos);
|
|
|
|
break;
|
|
|
|
|
|
|
|
}else if(0 > result){
|
|
|
|
// add data before curpos.
|
|
|
|
if(lastpos){
|
|
|
|
lastpos->next = new_item;
|
|
|
|
}else{
|
|
|
|
list = new_item;
|
|
|
|
}
|
|
|
|
new_item->next = curpos;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
lastpos = curpos;
|
|
|
|
}
|
|
|
|
if(!curpos){
|
|
|
|
// append to last pos
|
|
|
|
if(lastpos){
|
|
|
|
lastpos->next = new_item;
|
|
|
|
}else{
|
|
|
|
// a case of list is null
|
|
|
|
list = new_item;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return list;
|
|
|
|
}
|
|
|
|
|
|
|
|
// function for using global values
|
|
|
|
bool MakeUrlResource(const char* realpath, string& resourcepath, string& url)
|
|
|
|
{
|
|
|
|
if(!realpath){
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
resourcepath = urlEncode(service_path + bucket + realpath);
|
|
|
|
url = host + resourcepath;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// END
|