2016-02-20 21:05:48 +00:00
|
|
|
package rest
|
|
|
|
|
|
|
|
import (
|
2017-06-03 15:39:57 +00:00
|
|
|
"context"
|
2016-02-20 21:05:48 +00:00
|
|
|
"encoding/json"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
Fix REST backend HTTP keepalive
This is subtle. A combination od fast client disk (read: SSD) with lots
of files and fast network connection to restic-server would suddenly
start getting lots of "dial tcp: connect: cannot assign requested
address" errors during backup stage. Further inspection revealed that
client machine was plagued with TCP sockets in TIME_WAIT state. When
ephemeral port range was finally exhausted, no more sockets could be
opened, so restic would freak out.
To understand the magnitude of this problem, with ~18k ports and default
timeout of 60 seconds, it means more than 300 HTTP connections per
seconds were created and teared down. Yeah, restic-server is that
fast. :)
As it turns out, this behavior was product of 2 subtle issues:
1) The body of HTTP response wasn't read completely with io.ReadFull()
at the end of the Load() function. This deactivated HTTP keepalive,
so already open connections were not reused, but closed instead, and
new ones opened for every new request. io.Copy(ioutil.Discard,
resp.Body) before resp.Body.Close() remedies this.
2) Even with the above fix, somehow having MaxIdleConnsPerHost at its
default value of 2 wasn't enough to stop reconnecting. It is hard to
understand why this would be so detrimental, it could even be some
subtle Go runtime bug. Anyhow, setting this value to match the
connection limit, as set by connLimit global variable, finally nails
this ugly bug.
I fixed several other places where the response body wasn't read in
full (or at all). For example, json.NewDecoder() is also known not to
read the whole body of response.
Unfortunately, this is not over yet. :( The check command is firing up
to 40 simultaneous connections to the restic-server. Then, once again,
MaxIdleConnsPerHost is too low to support keepalive, and sockets in the
TIME_WAIT state pile up. But, as this kind of concurrency absolutely
kill the poor disk on the server side, this is a completely different
bug then.
2016-11-09 21:37:20 +00:00
|
|
|
"io/ioutil"
|
2016-02-20 21:05:48 +00:00
|
|
|
"net/http"
|
|
|
|
"net/url"
|
|
|
|
"path"
|
2016-08-31 20:39:36 +00:00
|
|
|
"restic"
|
2016-02-21 15:35:25 +00:00
|
|
|
"strings"
|
2016-02-20 21:05:48 +00:00
|
|
|
|
2017-06-03 15:39:57 +00:00
|
|
|
"golang.org/x/net/context/ctxhttp"
|
|
|
|
|
2017-01-22 21:01:12 +00:00
|
|
|
"restic/debug"
|
2016-09-01 20:17:37 +00:00
|
|
|
"restic/errors"
|
2016-08-21 15:46:23 +00:00
|
|
|
|
2016-02-20 21:05:48 +00:00
|
|
|
"restic/backend"
|
|
|
|
)
|
|
|
|
|
2017-01-22 11:32:20 +00:00
|
|
|
// make sure the rest backend implements restic.Backend
|
|
|
|
var _ restic.Backend = &restBackend{}
|
|
|
|
|
2016-02-20 21:05:48 +00:00
|
|
|
type restBackend struct {
|
2017-06-05 22:25:22 +00:00
|
|
|
url *url.URL
|
|
|
|
sem *backend.Semaphore
|
2017-06-07 17:48:32 +00:00
|
|
|
client *http.Client
|
2017-04-11 19:47:57 +00:00
|
|
|
backend.Layout
|
2016-02-20 21:05:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Open opens the REST backend with the given config.
|
2016-08-31 20:51:35 +00:00
|
|
|
func Open(cfg Config) (restic.Backend, error) {
|
2017-06-03 15:39:57 +00:00
|
|
|
client := &http.Client{Transport: backend.Transport()}
|
2016-02-20 21:05:48 +00:00
|
|
|
|
2017-06-05 22:25:22 +00:00
|
|
|
sem, err := backend.NewSemaphore(cfg.Connections)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-04-11 19:47:57 +00:00
|
|
|
// use url without trailing slash for layout
|
|
|
|
url := cfg.URL.String()
|
|
|
|
if url[len(url)-1] == '/' {
|
|
|
|
url = url[:len(url)-1]
|
|
|
|
}
|
|
|
|
|
|
|
|
be := &restBackend{
|
2017-06-05 22:25:22 +00:00
|
|
|
url: cfg.URL,
|
|
|
|
client: client,
|
|
|
|
Layout: &backend.RESTLayout{URL: url, Join: path.Join},
|
|
|
|
sem: sem,
|
2017-04-11 19:47:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return be, nil
|
2016-02-20 21:05:48 +00:00
|
|
|
}
|
|
|
|
|
2017-03-16 20:50:26 +00:00
|
|
|
// Create creates a new REST on server configured in config.
|
|
|
|
func Create(cfg Config) (restic.Backend, error) {
|
|
|
|
be, err := Open(cfg)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-06-03 15:39:57 +00:00
|
|
|
_, err = be.Stat(context.TODO(), restic.Handle{Type: restic.ConfigFile})
|
2017-03-16 20:50:26 +00:00
|
|
|
if err == nil {
|
|
|
|
return nil, errors.Fatal("config file already exists")
|
|
|
|
}
|
|
|
|
|
|
|
|
url := *cfg.URL
|
|
|
|
values := url.Query()
|
|
|
|
values.Set("create", "true")
|
|
|
|
url.RawQuery = values.Encode()
|
|
|
|
|
|
|
|
resp, err := http.Post(url.String(), "binary/octet-stream", strings.NewReader(""))
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if resp.StatusCode != http.StatusOK {
|
|
|
|
return nil, errors.Fatalf("server response unexpected: %v (%v)", resp.Status, resp.StatusCode)
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err = io.Copy(ioutil.Discard, resp.Body)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
err = resp.Body.Close()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return be, nil
|
|
|
|
}
|
|
|
|
|
2016-02-20 21:05:48 +00:00
|
|
|
// Location returns this backend's location (the server's URL).
|
|
|
|
func (b *restBackend) Location() string {
|
|
|
|
return b.url.String()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Save stores data in the backend at the handle.
|
2017-06-03 15:39:57 +00:00
|
|
|
func (b *restBackend) Save(ctx context.Context, h restic.Handle, rd io.Reader) (err error) {
|
2016-02-20 21:05:48 +00:00
|
|
|
if err := h.Valid(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-06-03 15:39:57 +00:00
|
|
|
ctx, cancel := context.WithCancel(ctx)
|
|
|
|
defer cancel()
|
|
|
|
|
2017-01-25 16:07:58 +00:00
|
|
|
// make sure that client.Post() cannot close the reader by wrapping it in
|
|
|
|
// backend.Closer, which has a noop method.
|
|
|
|
rd = backend.Closer{Reader: rd}
|
|
|
|
|
2017-06-05 22:25:22 +00:00
|
|
|
b.sem.GetToken()
|
2017-06-03 15:39:57 +00:00
|
|
|
resp, err := ctxhttp.Post(ctx, b.client, b.Filename(h), "binary/octet-stream", rd)
|
2017-06-05 22:25:22 +00:00
|
|
|
b.sem.ReleaseToken()
|
2016-02-20 21:05:48 +00:00
|
|
|
|
|
|
|
if resp != nil {
|
|
|
|
defer func() {
|
2017-06-03 15:39:57 +00:00
|
|
|
_, _ = io.Copy(ioutil.Discard, resp.Body)
|
2016-02-20 21:05:48 +00:00
|
|
|
e := resp.Body.Close()
|
|
|
|
|
|
|
|
if err == nil {
|
2016-08-29 19:54:50 +00:00
|
|
|
err = errors.Wrap(e, "Close")
|
2016-02-20 21:05:48 +00:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
2016-08-29 19:54:50 +00:00
|
|
|
return errors.Wrap(err, "client.Post")
|
2016-02-20 21:05:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if resp.StatusCode != 200 {
|
2017-05-28 10:31:19 +00:00
|
|
|
return errors.Errorf("server response unexpected: %v (%v)", resp.Status, resp.StatusCode)
|
2016-02-20 21:05:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-01-23 17:11:10 +00:00
|
|
|
// Load returns a reader that yields the contents of the file at h at the
|
2017-01-22 21:01:12 +00:00
|
|
|
// given offset. If length is nonzero, only a portion of the file is
|
|
|
|
// returned. rd must be closed after use.
|
2017-06-03 15:39:57 +00:00
|
|
|
func (b *restBackend) Load(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
|
2017-01-23 17:11:10 +00:00
|
|
|
debug.Log("Load %v, length %v, offset %v", h, length, offset)
|
2017-01-22 21:01:12 +00:00
|
|
|
if err := h.Valid(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if offset < 0 {
|
|
|
|
return nil, errors.New("offset is negative")
|
|
|
|
}
|
|
|
|
|
|
|
|
if length < 0 {
|
|
|
|
return nil, errors.Errorf("invalid length %d", length)
|
|
|
|
}
|
|
|
|
|
2017-04-11 19:47:57 +00:00
|
|
|
req, err := http.NewRequest("GET", b.Filename(h), nil)
|
2017-01-22 21:01:12 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "http.NewRequest")
|
|
|
|
}
|
|
|
|
|
|
|
|
byteRange := fmt.Sprintf("bytes=%d-", offset)
|
|
|
|
if length > 0 {
|
|
|
|
byteRange = fmt.Sprintf("bytes=%d-%d", offset, offset+int64(length)-1)
|
|
|
|
}
|
|
|
|
req.Header.Add("Range", byteRange)
|
2017-01-23 17:11:10 +00:00
|
|
|
debug.Log("Load(%v) send range %v", h, byteRange)
|
2017-01-22 21:01:12 +00:00
|
|
|
|
2017-06-05 22:25:22 +00:00
|
|
|
b.sem.GetToken()
|
2017-06-03 15:39:57 +00:00
|
|
|
resp, err := ctxhttp.Do(ctx, b.client, req)
|
2017-06-05 22:25:22 +00:00
|
|
|
b.sem.ReleaseToken()
|
2017-01-22 21:01:12 +00:00
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
if resp != nil {
|
2017-06-03 15:39:57 +00:00
|
|
|
_, _ = io.Copy(ioutil.Discard, resp.Body)
|
|
|
|
_ = resp.Body.Close()
|
2017-01-22 21:01:12 +00:00
|
|
|
}
|
|
|
|
return nil, errors.Wrap(err, "client.Do")
|
|
|
|
}
|
|
|
|
|
|
|
|
if resp.StatusCode != 200 && resp.StatusCode != 206 {
|
2017-06-03 15:39:57 +00:00
|
|
|
_ = resp.Body.Close()
|
2017-05-28 10:31:19 +00:00
|
|
|
return nil, errors.Errorf("unexpected HTTP response (%v): %v", resp.StatusCode, resp.Status)
|
2017-01-22 21:01:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return resp.Body, nil
|
|
|
|
}
|
|
|
|
|
2016-02-20 21:05:48 +00:00
|
|
|
// Stat returns information about a blob.
|
2017-06-03 15:39:57 +00:00
|
|
|
func (b *restBackend) Stat(ctx context.Context, h restic.Handle) (restic.FileInfo, error) {
|
2016-02-20 21:05:48 +00:00
|
|
|
if err := h.Valid(); err != nil {
|
2016-08-31 20:51:35 +00:00
|
|
|
return restic.FileInfo{}, err
|
2016-02-20 21:05:48 +00:00
|
|
|
}
|
|
|
|
|
2017-06-05 22:25:22 +00:00
|
|
|
b.sem.GetToken()
|
2017-06-03 15:39:57 +00:00
|
|
|
resp, err := ctxhttp.Head(ctx, b.client, b.Filename(h))
|
2017-06-05 22:25:22 +00:00
|
|
|
b.sem.ReleaseToken()
|
2016-02-20 21:05:48 +00:00
|
|
|
if err != nil {
|
2016-08-31 20:51:35 +00:00
|
|
|
return restic.FileInfo{}, errors.Wrap(err, "client.Head")
|
2016-02-20 21:05:48 +00:00
|
|
|
}
|
|
|
|
|
2017-06-03 15:39:57 +00:00
|
|
|
_, _ = io.Copy(ioutil.Discard, resp.Body)
|
2016-02-20 21:05:48 +00:00
|
|
|
if err = resp.Body.Close(); err != nil {
|
2016-08-31 20:51:35 +00:00
|
|
|
return restic.FileInfo{}, errors.Wrap(err, "Close")
|
2016-02-20 21:05:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if resp.StatusCode != 200 {
|
2017-05-28 10:31:19 +00:00
|
|
|
return restic.FileInfo{}, errors.Errorf("unexpected HTTP response (%v): %v", resp.StatusCode, resp.Status)
|
2016-02-20 21:05:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if resp.ContentLength < 0 {
|
2016-08-31 20:51:35 +00:00
|
|
|
return restic.FileInfo{}, errors.New("negative content length")
|
2016-02-20 21:05:48 +00:00
|
|
|
}
|
|
|
|
|
2016-08-31 20:51:35 +00:00
|
|
|
bi := restic.FileInfo{
|
2016-02-20 21:05:48 +00:00
|
|
|
Size: resp.ContentLength,
|
|
|
|
}
|
|
|
|
|
|
|
|
return bi, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test returns true if a blob of the given type and name exists in the backend.
|
2017-06-03 15:39:57 +00:00
|
|
|
func (b *restBackend) Test(ctx context.Context, h restic.Handle) (bool, error) {
|
|
|
|
_, err := b.Stat(ctx, h)
|
2016-02-20 21:05:48 +00:00
|
|
|
if err != nil {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove removes the blob with the given name and type.
|
2017-06-03 15:39:57 +00:00
|
|
|
func (b *restBackend) Remove(ctx context.Context, h restic.Handle) error {
|
2016-02-20 21:05:48 +00:00
|
|
|
if err := h.Valid(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-04-11 19:47:57 +00:00
|
|
|
req, err := http.NewRequest("DELETE", b.Filename(h), nil)
|
2016-02-20 21:05:48 +00:00
|
|
|
if err != nil {
|
2016-08-29 19:54:50 +00:00
|
|
|
return errors.Wrap(err, "http.NewRequest")
|
2016-02-20 21:05:48 +00:00
|
|
|
}
|
2017-06-05 22:25:22 +00:00
|
|
|
b.sem.GetToken()
|
2017-06-03 15:39:57 +00:00
|
|
|
resp, err := ctxhttp.Do(ctx, b.client, req)
|
2017-06-05 22:25:22 +00:00
|
|
|
b.sem.ReleaseToken()
|
2016-02-20 21:05:48 +00:00
|
|
|
|
|
|
|
if err != nil {
|
2016-08-29 19:54:50 +00:00
|
|
|
return errors.Wrap(err, "client.Do")
|
2016-02-20 21:05:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if resp.StatusCode != 200 {
|
2017-03-16 20:50:26 +00:00
|
|
|
return errors.Errorf("blob not removed, server response: %v (%v)", resp.Status, resp.StatusCode)
|
2016-02-20 21:05:48 +00:00
|
|
|
}
|
|
|
|
|
2017-06-03 15:39:57 +00:00
|
|
|
_, err = io.Copy(ioutil.Discard, resp.Body)
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "Copy")
|
|
|
|
}
|
|
|
|
|
|
|
|
return errors.Wrap(resp.Body.Close(), "Close")
|
2016-02-20 21:05:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// List returns a channel that yields all names of blobs of type t. A
|
|
|
|
// goroutine is started for this. If the channel done is closed, sending
|
|
|
|
// stops.
|
2017-06-03 15:39:57 +00:00
|
|
|
func (b *restBackend) List(ctx context.Context, t restic.FileType) <-chan string {
|
2016-02-20 21:05:48 +00:00
|
|
|
ch := make(chan string)
|
|
|
|
|
2017-04-11 19:47:57 +00:00
|
|
|
url := b.Dirname(restic.Handle{Type: t})
|
2016-02-21 15:35:25 +00:00
|
|
|
if !strings.HasSuffix(url, "/") {
|
|
|
|
url += "/"
|
|
|
|
}
|
|
|
|
|
2017-06-05 22:25:22 +00:00
|
|
|
b.sem.GetToken()
|
2017-06-03 15:39:57 +00:00
|
|
|
resp, err := ctxhttp.Get(ctx, b.client, url)
|
2017-06-05 22:25:22 +00:00
|
|
|
b.sem.ReleaseToken()
|
2016-02-20 21:05:48 +00:00
|
|
|
|
|
|
|
if resp != nil {
|
Fix REST backend HTTP keepalive
This is subtle. A combination od fast client disk (read: SSD) with lots
of files and fast network connection to restic-server would suddenly
start getting lots of "dial tcp: connect: cannot assign requested
address" errors during backup stage. Further inspection revealed that
client machine was plagued with TCP sockets in TIME_WAIT state. When
ephemeral port range was finally exhausted, no more sockets could be
opened, so restic would freak out.
To understand the magnitude of this problem, with ~18k ports and default
timeout of 60 seconds, it means more than 300 HTTP connections per
seconds were created and teared down. Yeah, restic-server is that
fast. :)
As it turns out, this behavior was product of 2 subtle issues:
1) The body of HTTP response wasn't read completely with io.ReadFull()
at the end of the Load() function. This deactivated HTTP keepalive,
so already open connections were not reused, but closed instead, and
new ones opened for every new request. io.Copy(ioutil.Discard,
resp.Body) before resp.Body.Close() remedies this.
2) Even with the above fix, somehow having MaxIdleConnsPerHost at its
default value of 2 wasn't enough to stop reconnecting. It is hard to
understand why this would be so detrimental, it could even be some
subtle Go runtime bug. Anyhow, setting this value to match the
connection limit, as set by connLimit global variable, finally nails
this ugly bug.
I fixed several other places where the response body wasn't read in
full (or at all). For example, json.NewDecoder() is also known not to
read the whole body of response.
Unfortunately, this is not over yet. :( The check command is firing up
to 40 simultaneous connections to the restic-server. Then, once again,
MaxIdleConnsPerHost is too low to support keepalive, and sockets in the
TIME_WAIT state pile up. But, as this kind of concurrency absolutely
kill the poor disk on the server side, this is a completely different
bug then.
2016-11-09 21:37:20 +00:00
|
|
|
defer func() {
|
2017-06-03 15:39:57 +00:00
|
|
|
_, _ = io.Copy(ioutil.Discard, resp.Body)
|
Fix REST backend HTTP keepalive
This is subtle. A combination od fast client disk (read: SSD) with lots
of files and fast network connection to restic-server would suddenly
start getting lots of "dial tcp: connect: cannot assign requested
address" errors during backup stage. Further inspection revealed that
client machine was plagued with TCP sockets in TIME_WAIT state. When
ephemeral port range was finally exhausted, no more sockets could be
opened, so restic would freak out.
To understand the magnitude of this problem, with ~18k ports and default
timeout of 60 seconds, it means more than 300 HTTP connections per
seconds were created and teared down. Yeah, restic-server is that
fast. :)
As it turns out, this behavior was product of 2 subtle issues:
1) The body of HTTP response wasn't read completely with io.ReadFull()
at the end of the Load() function. This deactivated HTTP keepalive,
so already open connections were not reused, but closed instead, and
new ones opened for every new request. io.Copy(ioutil.Discard,
resp.Body) before resp.Body.Close() remedies this.
2) Even with the above fix, somehow having MaxIdleConnsPerHost at its
default value of 2 wasn't enough to stop reconnecting. It is hard to
understand why this would be so detrimental, it could even be some
subtle Go runtime bug. Anyhow, setting this value to match the
connection limit, as set by connLimit global variable, finally nails
this ugly bug.
I fixed several other places where the response body wasn't read in
full (or at all). For example, json.NewDecoder() is also known not to
read the whole body of response.
Unfortunately, this is not over yet. :( The check command is firing up
to 40 simultaneous connections to the restic-server. Then, once again,
MaxIdleConnsPerHost is too low to support keepalive, and sockets in the
TIME_WAIT state pile up. But, as this kind of concurrency absolutely
kill the poor disk on the server side, this is a completely different
bug then.
2016-11-09 21:37:20 +00:00
|
|
|
e := resp.Body.Close()
|
|
|
|
|
|
|
|
if err == nil {
|
|
|
|
err = errors.Wrap(e, "Close")
|
|
|
|
}
|
|
|
|
}()
|
2016-02-20 21:05:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
close(ch)
|
|
|
|
return ch
|
|
|
|
}
|
|
|
|
|
|
|
|
dec := json.NewDecoder(resp.Body)
|
|
|
|
var list []string
|
|
|
|
if err = dec.Decode(&list); err != nil {
|
|
|
|
close(ch)
|
|
|
|
return ch
|
|
|
|
}
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
defer close(ch)
|
|
|
|
for _, m := range list {
|
|
|
|
select {
|
|
|
|
case ch <- m:
|
2017-06-03 15:39:57 +00:00
|
|
|
case <-ctx.Done():
|
2016-02-20 21:05:48 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
return ch
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close closes all open files.
|
|
|
|
func (b *restBackend) Close() error {
|
|
|
|
// this does not need to do anything, all open files are closed within the
|
|
|
|
// same function.
|
|
|
|
return nil
|
|
|
|
}
|