2
2
mirror of https://github.com/octoleo/restic.git synced 2024-12-24 11:55:28 +00:00
restic/internal/backend/rest/rest.go

387 lines
8.6 KiB
Go
Raw Normal View History

package rest
import (
2017-06-03 15:39:57 +00:00
"context"
"encoding/json"
"fmt"
"io"
Fix REST backend HTTP keepalive This is subtle. A combination od fast client disk (read: SSD) with lots of files and fast network connection to restic-server would suddenly start getting lots of "dial tcp: connect: cannot assign requested address" errors during backup stage. Further inspection revealed that client machine was plagued with TCP sockets in TIME_WAIT state. When ephemeral port range was finally exhausted, no more sockets could be opened, so restic would freak out. To understand the magnitude of this problem, with ~18k ports and default timeout of 60 seconds, it means more than 300 HTTP connections per seconds were created and teared down. Yeah, restic-server is that fast. :) As it turns out, this behavior was product of 2 subtle issues: 1) The body of HTTP response wasn't read completely with io.ReadFull() at the end of the Load() function. This deactivated HTTP keepalive, so already open connections were not reused, but closed instead, and new ones opened for every new request. io.Copy(ioutil.Discard, resp.Body) before resp.Body.Close() remedies this. 2) Even with the above fix, somehow having MaxIdleConnsPerHost at its default value of 2 wasn't enough to stop reconnecting. It is hard to understand why this would be so detrimental, it could even be some subtle Go runtime bug. Anyhow, setting this value to match the connection limit, as set by connLimit global variable, finally nails this ugly bug. I fixed several other places where the response body wasn't read in full (or at all). For example, json.NewDecoder() is also known not to read the whole body of response. Unfortunately, this is not over yet. :( The check command is firing up to 40 simultaneous connections to the restic-server. Then, once again, MaxIdleConnsPerHost is too low to support keepalive, and sockets in the TIME_WAIT state pile up. But, as this kind of concurrency absolutely kill the poor disk on the server side, this is a completely different bug then.
2016-11-09 21:37:20 +00:00
"io/ioutil"
"net/http"
"net/url"
"path"
2016-02-21 15:35:25 +00:00
"strings"
2017-06-03 15:39:57 +00:00
"golang.org/x/net/context/ctxhttp"
2017-07-23 12:21:03 +00:00
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
2017-07-24 15:42:25 +00:00
"github.com/restic/restic/internal/restic"
2017-07-23 12:21:03 +00:00
"github.com/restic/restic/internal/backend"
)
// make sure the rest backend implements restic.Backend
var _ restic.Backend = &restBackend{}
type restBackend struct {
2017-06-05 22:25:22 +00:00
url *url.URL
sem *backend.Semaphore
client *http.Client
2017-04-11 19:47:57 +00:00
backend.Layout
}
// Open opens the REST backend with the given config.
func Open(cfg Config, rt http.RoundTripper) (*restBackend, error) {
client := &http.Client{Transport: rt}
2017-06-05 22:25:22 +00:00
sem, err := backend.NewSemaphore(cfg.Connections)
if err != nil {
return nil, err
}
2017-04-11 19:47:57 +00:00
// use url without trailing slash for layout
url := cfg.URL.String()
if url[len(url)-1] == '/' {
url = url[:len(url)-1]
}
be := &restBackend{
2017-06-05 22:25:22 +00:00
url: cfg.URL,
client: client,
Layout: &backend.RESTLayout{URL: url, Join: path.Join},
sem: sem,
2017-04-11 19:47:57 +00:00
}
return be, nil
}
// Create creates a new REST on server configured in config.
func Create(cfg Config, rt http.RoundTripper) (restic.Backend, error) {
be, err := Open(cfg, rt)
if err != nil {
return nil, err
}
2017-06-03 15:39:57 +00:00
_, err = be.Stat(context.TODO(), restic.Handle{Type: restic.ConfigFile})
if err == nil {
return nil, errors.Fatal("config file already exists")
}
url := *cfg.URL
values := url.Query()
values.Set("create", "true")
url.RawQuery = values.Encode()
resp, err := be.client.Post(url.String(), "binary/octet-stream", strings.NewReader(""))
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, errors.Fatalf("server response unexpected: %v (%v)", resp.Status, resp.StatusCode)
}
_, err = io.Copy(ioutil.Discard, resp.Body)
if err != nil {
return nil, err
}
err = resp.Body.Close()
if err != nil {
return nil, err
}
return be, nil
}
// Location returns this backend's location (the server's URL).
func (b *restBackend) Location() string {
return b.url.String()
}
// Save stores data in the backend at the handle.
2017-06-03 15:39:57 +00:00
func (b *restBackend) Save(ctx context.Context, h restic.Handle, rd io.Reader) (err error) {
if err := h.Valid(); err != nil {
return err
}
2017-06-03 15:39:57 +00:00
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// make sure that client.Post() cannot close the reader by wrapping it
rd = ioutil.NopCloser(rd)
2017-06-05 22:25:22 +00:00
b.sem.GetToken()
2017-06-03 15:39:57 +00:00
resp, err := ctxhttp.Post(ctx, b.client, b.Filename(h), "binary/octet-stream", rd)
2017-06-05 22:25:22 +00:00
b.sem.ReleaseToken()
if resp != nil {
defer func() {
2017-06-03 15:39:57 +00:00
_, _ = io.Copy(ioutil.Discard, resp.Body)
e := resp.Body.Close()
if err == nil {
2016-08-29 19:54:50 +00:00
err = errors.Wrap(e, "Close")
}
}()
}
if err != nil {
2016-08-29 19:54:50 +00:00
return errors.Wrap(err, "client.Post")
}
if resp.StatusCode != 200 {
2017-05-28 10:31:19 +00:00
return errors.Errorf("server response unexpected: %v (%v)", resp.Status, resp.StatusCode)
}
return nil
}
2017-06-15 11:40:27 +00:00
// ErrIsNotExist is returned whenever the requested file does not exist on the
// server.
type ErrIsNotExist struct {
restic.Handle
}
func (e ErrIsNotExist) Error() string {
return fmt.Sprintf("%v does not exist", e.Handle)
}
// IsNotExist returns true if the error was caused by a non-existing file.
func (b *restBackend) IsNotExist(err error) bool {
err = errors.Cause(err)
_, ok := err.(ErrIsNotExist)
return ok
}
2017-01-23 17:11:10 +00:00
// Load returns a reader that yields the contents of the file at h at the
2017-01-22 21:01:12 +00:00
// given offset. If length is nonzero, only a portion of the file is
// returned. rd must be closed after use.
2017-06-03 15:39:57 +00:00
func (b *restBackend) Load(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
2017-01-23 17:11:10 +00:00
debug.Log("Load %v, length %v, offset %v", h, length, offset)
2017-01-22 21:01:12 +00:00
if err := h.Valid(); err != nil {
return nil, err
}
if offset < 0 {
return nil, errors.New("offset is negative")
}
if length < 0 {
return nil, errors.Errorf("invalid length %d", length)
}
2017-04-11 19:47:57 +00:00
req, err := http.NewRequest("GET", b.Filename(h), nil)
2017-01-22 21:01:12 +00:00
if err != nil {
return nil, errors.Wrap(err, "http.NewRequest")
}
byteRange := fmt.Sprintf("bytes=%d-", offset)
if length > 0 {
byteRange = fmt.Sprintf("bytes=%d-%d", offset, offset+int64(length)-1)
}
req.Header.Add("Range", byteRange)
2017-01-23 17:11:10 +00:00
debug.Log("Load(%v) send range %v", h, byteRange)
2017-01-22 21:01:12 +00:00
2017-06-05 22:25:22 +00:00
b.sem.GetToken()
2017-06-03 15:39:57 +00:00
resp, err := ctxhttp.Do(ctx, b.client, req)
2017-06-05 22:25:22 +00:00
b.sem.ReleaseToken()
2017-01-22 21:01:12 +00:00
if err != nil {
if resp != nil {
2017-06-03 15:39:57 +00:00
_, _ = io.Copy(ioutil.Discard, resp.Body)
_ = resp.Body.Close()
2017-01-22 21:01:12 +00:00
}
return nil, errors.Wrap(err, "client.Do")
}
2017-06-15 11:40:27 +00:00
if resp.StatusCode == http.StatusNotFound {
_ = resp.Body.Close()
return nil, ErrIsNotExist{h}
}
2017-01-22 21:01:12 +00:00
if resp.StatusCode != 200 && resp.StatusCode != 206 {
2017-06-03 15:39:57 +00:00
_ = resp.Body.Close()
2017-05-28 10:31:19 +00:00
return nil, errors.Errorf("unexpected HTTP response (%v): %v", resp.StatusCode, resp.Status)
2017-01-22 21:01:12 +00:00
}
return resp.Body, nil
}
// Stat returns information about a blob.
2017-06-03 15:39:57 +00:00
func (b *restBackend) Stat(ctx context.Context, h restic.Handle) (restic.FileInfo, error) {
if err := h.Valid(); err != nil {
2016-08-31 20:51:35 +00:00
return restic.FileInfo{}, err
}
2017-06-05 22:25:22 +00:00
b.sem.GetToken()
2017-06-03 15:39:57 +00:00
resp, err := ctxhttp.Head(ctx, b.client, b.Filename(h))
2017-06-05 22:25:22 +00:00
b.sem.ReleaseToken()
if err != nil {
2016-08-31 20:51:35 +00:00
return restic.FileInfo{}, errors.Wrap(err, "client.Head")
}
2017-06-03 15:39:57 +00:00
_, _ = io.Copy(ioutil.Discard, resp.Body)
if err = resp.Body.Close(); err != nil {
2016-08-31 20:51:35 +00:00
return restic.FileInfo{}, errors.Wrap(err, "Close")
}
2017-06-15 11:40:27 +00:00
if resp.StatusCode == http.StatusNotFound {
_ = resp.Body.Close()
return restic.FileInfo{}, ErrIsNotExist{h}
}
if resp.StatusCode != 200 {
2017-05-28 10:31:19 +00:00
return restic.FileInfo{}, errors.Errorf("unexpected HTTP response (%v): %v", resp.StatusCode, resp.Status)
}
if resp.ContentLength < 0 {
2016-08-31 20:51:35 +00:00
return restic.FileInfo{}, errors.New("negative content length")
}
2016-08-31 20:51:35 +00:00
bi := restic.FileInfo{
Size: resp.ContentLength,
}
return bi, nil
}
// Test returns true if a blob of the given type and name exists in the backend.
2017-06-03 15:39:57 +00:00
func (b *restBackend) Test(ctx context.Context, h restic.Handle) (bool, error) {
_, err := b.Stat(ctx, h)
if err != nil {
return false, nil
}
return true, nil
}
// Remove removes the blob with the given name and type.
2017-06-03 15:39:57 +00:00
func (b *restBackend) Remove(ctx context.Context, h restic.Handle) error {
if err := h.Valid(); err != nil {
return err
}
2017-04-11 19:47:57 +00:00
req, err := http.NewRequest("DELETE", b.Filename(h), nil)
if err != nil {
2016-08-29 19:54:50 +00:00
return errors.Wrap(err, "http.NewRequest")
}
2017-06-05 22:25:22 +00:00
b.sem.GetToken()
2017-06-03 15:39:57 +00:00
resp, err := ctxhttp.Do(ctx, b.client, req)
2017-06-05 22:25:22 +00:00
b.sem.ReleaseToken()
if err != nil {
2016-08-29 19:54:50 +00:00
return errors.Wrap(err, "client.Do")
}
2017-06-15 11:40:27 +00:00
if resp.StatusCode == http.StatusNotFound {
_ = resp.Body.Close()
return ErrIsNotExist{h}
}
if resp.StatusCode != 200 {
return errors.Errorf("blob not removed, server response: %v (%v)", resp.Status, resp.StatusCode)
}
2017-06-03 15:39:57 +00:00
_, err = io.Copy(ioutil.Discard, resp.Body)
if err != nil {
return errors.Wrap(err, "Copy")
}
return errors.Wrap(resp.Body.Close(), "Close")
}
// List returns a channel that yields all names of blobs of type t. A
// goroutine is started for this. If the channel done is closed, sending
// stops.
2017-06-03 15:39:57 +00:00
func (b *restBackend) List(ctx context.Context, t restic.FileType) <-chan string {
ch := make(chan string)
2017-04-11 19:47:57 +00:00
url := b.Dirname(restic.Handle{Type: t})
2016-02-21 15:35:25 +00:00
if !strings.HasSuffix(url, "/") {
url += "/"
}
2017-06-05 22:25:22 +00:00
b.sem.GetToken()
2017-06-03 15:39:57 +00:00
resp, err := ctxhttp.Get(ctx, b.client, url)
2017-06-05 22:25:22 +00:00
b.sem.ReleaseToken()
if resp != nil {
Fix REST backend HTTP keepalive This is subtle. A combination od fast client disk (read: SSD) with lots of files and fast network connection to restic-server would suddenly start getting lots of "dial tcp: connect: cannot assign requested address" errors during backup stage. Further inspection revealed that client machine was plagued with TCP sockets in TIME_WAIT state. When ephemeral port range was finally exhausted, no more sockets could be opened, so restic would freak out. To understand the magnitude of this problem, with ~18k ports and default timeout of 60 seconds, it means more than 300 HTTP connections per seconds were created and teared down. Yeah, restic-server is that fast. :) As it turns out, this behavior was product of 2 subtle issues: 1) The body of HTTP response wasn't read completely with io.ReadFull() at the end of the Load() function. This deactivated HTTP keepalive, so already open connections were not reused, but closed instead, and new ones opened for every new request. io.Copy(ioutil.Discard, resp.Body) before resp.Body.Close() remedies this. 2) Even with the above fix, somehow having MaxIdleConnsPerHost at its default value of 2 wasn't enough to stop reconnecting. It is hard to understand why this would be so detrimental, it could even be some subtle Go runtime bug. Anyhow, setting this value to match the connection limit, as set by connLimit global variable, finally nails this ugly bug. I fixed several other places where the response body wasn't read in full (or at all). For example, json.NewDecoder() is also known not to read the whole body of response. Unfortunately, this is not over yet. :( The check command is firing up to 40 simultaneous connections to the restic-server. Then, once again, MaxIdleConnsPerHost is too low to support keepalive, and sockets in the TIME_WAIT state pile up. But, as this kind of concurrency absolutely kill the poor disk on the server side, this is a completely different bug then.
2016-11-09 21:37:20 +00:00
defer func() {
2017-06-03 15:39:57 +00:00
_, _ = io.Copy(ioutil.Discard, resp.Body)
Fix REST backend HTTP keepalive This is subtle. A combination od fast client disk (read: SSD) with lots of files and fast network connection to restic-server would suddenly start getting lots of "dial tcp: connect: cannot assign requested address" errors during backup stage. Further inspection revealed that client machine was plagued with TCP sockets in TIME_WAIT state. When ephemeral port range was finally exhausted, no more sockets could be opened, so restic would freak out. To understand the magnitude of this problem, with ~18k ports and default timeout of 60 seconds, it means more than 300 HTTP connections per seconds were created and teared down. Yeah, restic-server is that fast. :) As it turns out, this behavior was product of 2 subtle issues: 1) The body of HTTP response wasn't read completely with io.ReadFull() at the end of the Load() function. This deactivated HTTP keepalive, so already open connections were not reused, but closed instead, and new ones opened for every new request. io.Copy(ioutil.Discard, resp.Body) before resp.Body.Close() remedies this. 2) Even with the above fix, somehow having MaxIdleConnsPerHost at its default value of 2 wasn't enough to stop reconnecting. It is hard to understand why this would be so detrimental, it could even be some subtle Go runtime bug. Anyhow, setting this value to match the connection limit, as set by connLimit global variable, finally nails this ugly bug. I fixed several other places where the response body wasn't read in full (or at all). For example, json.NewDecoder() is also known not to read the whole body of response. Unfortunately, this is not over yet. :( The check command is firing up to 40 simultaneous connections to the restic-server. Then, once again, MaxIdleConnsPerHost is too low to support keepalive, and sockets in the TIME_WAIT state pile up. But, as this kind of concurrency absolutely kill the poor disk on the server side, this is a completely different bug then.
2016-11-09 21:37:20 +00:00
e := resp.Body.Close()
if err == nil {
err = errors.Wrap(e, "Close")
}
}()
}
if err != nil {
close(ch)
return ch
}
dec := json.NewDecoder(resp.Body)
var list []string
if err = dec.Decode(&list); err != nil {
close(ch)
return ch
}
go func() {
defer close(ch)
for _, m := range list {
select {
case ch <- m:
2017-06-03 15:39:57 +00:00
case <-ctx.Done():
return
}
}
}()
return ch
}
// Close closes all open files.
func (b *restBackend) Close() error {
// this does not need to do anything, all open files are closed within the
// same function.
return nil
}
// Remove keys for a specified backend type.
func (b *restBackend) removeKeys(ctx context.Context, t restic.FileType) error {
for key := range b.List(ctx, restic.DataFile) {
err := b.Remove(ctx, restic.Handle{Type: restic.DataFile, Name: key})
if err != nil {
return err
}
}
return nil
}
// Delete removes all data in the backend.
func (b *restBackend) Delete(ctx context.Context) error {
alltypes := []restic.FileType{
restic.DataFile,
restic.KeyFile,
restic.LockFile,
restic.SnapshotFile,
restic.IndexFile}
for _, t := range alltypes {
err := b.removeKeys(ctx, t)
if err != nil {
return nil
}
}
2017-10-14 13:56:38 +00:00
err := b.Remove(ctx, restic.Handle{Type: restic.ConfigFile})
if err != nil && b.IsNotExist(err) {
return nil
}
return err
}