2
2
mirror of https://github.com/octoleo/restic.git synced 2024-11-29 16:23:59 +00:00

Merge pull request #3805 from greatroar/global

cmd/restic, limiter: Move config knowledge to internal packages
This commit is contained in:
MichaelEischer 2022-07-02 21:56:35 +02:00 committed by GitHub
commit 3e1de52e0a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 29 additions and 38 deletions

View File

@ -60,14 +60,11 @@ type GlobalOptions struct {
JSON bool JSON bool
CacheDir string CacheDir string
NoCache bool NoCache bool
CACerts []string
InsecureTLS bool
TLSClientCert string
CleanupCache bool CleanupCache bool
Compression repository.CompressionMode Compression repository.CompressionMode
LimitUploadKb int backend.TransportOptions
LimitDownloadKb int limiter.Limits
ctx context.Context ctx context.Context
password string password string
@ -117,13 +114,13 @@ func init() {
f.BoolVarP(&globalOptions.JSON, "json", "", false, "set output mode to JSON for commands that support it") f.BoolVarP(&globalOptions.JSON, "json", "", false, "set output mode to JSON for commands that support it")
f.StringVar(&globalOptions.CacheDir, "cache-dir", "", "set the cache `directory`. (default: use system default cache directory)") f.StringVar(&globalOptions.CacheDir, "cache-dir", "", "set the cache `directory`. (default: use system default cache directory)")
f.BoolVar(&globalOptions.NoCache, "no-cache", false, "do not use a local cache") f.BoolVar(&globalOptions.NoCache, "no-cache", false, "do not use a local cache")
f.StringSliceVar(&globalOptions.CACerts, "cacert", nil, "`file` to load root certificates from (default: use system certificates)") f.StringSliceVar(&globalOptions.RootCertFilenames, "cacert", nil, "`file` to load root certificates from (default: use system certificates)")
f.StringVar(&globalOptions.TLSClientCert, "tls-client-cert", "", "path to a `file` containing PEM encoded TLS client certificate and private key") f.StringVar(&globalOptions.TLSClientCertKeyFilename, "tls-client-cert", "", "path to a `file` containing PEM encoded TLS client certificate and private key")
f.BoolVar(&globalOptions.InsecureTLS, "insecure-tls", false, "skip TLS certificate verification when connecting to the repo (insecure)") f.BoolVar(&globalOptions.InsecureTLS, "insecure-tls", false, "skip TLS certificate verification when connecting to the repo (insecure)")
f.BoolVar(&globalOptions.CleanupCache, "cleanup-cache", false, "auto remove old cache directories") f.BoolVar(&globalOptions.CleanupCache, "cleanup-cache", false, "auto remove old cache directories")
f.Var(&globalOptions.Compression, "compression", "compression mode (only available for repo format version 2), one of (auto|off|max)") f.Var(&globalOptions.Compression, "compression", "compression mode (only available for repo format version 2), one of (auto|off|max)")
f.IntVar(&globalOptions.LimitUploadKb, "limit-upload", 0, "limits uploads to a maximum rate in KiB/s. (default: unlimited)") f.IntVar(&globalOptions.Limits.UploadKb, "limit-upload", 0, "limits uploads to a maximum rate in KiB/s. (default: unlimited)")
f.IntVar(&globalOptions.LimitDownloadKb, "limit-download", 0, "limits downloads to a maximum rate in KiB/s. (default: unlimited)") f.IntVar(&globalOptions.Limits.DownloadKb, "limit-download", 0, "limits downloads to a maximum rate in KiB/s. (default: unlimited)")
f.StringSliceVarP(&globalOptions.Options, "option", "o", []string{}, "set extended option (`key=value`, can be specified multiple times)") f.StringSliceVarP(&globalOptions.Options, "option", "o", []string{}, "set extended option (`key=value`, can be specified multiple times)")
// Use our "generate" command instead of the cobra provided "completion" command // Use our "generate" command instead of the cobra provided "completion" command
cmdRoot.CompletionOptions.DisableDefaultCmd = true cmdRoot.CompletionOptions.DisableDefaultCmd = true
@ -681,18 +678,13 @@ func open(s string, gopts GlobalOptions, opts options.Options) (restic.Backend,
return nil, err return nil, err
} }
tropts := backend.TransportOptions{ rt, err := backend.Transport(globalOptions.TransportOptions)
RootCertFilenames: globalOptions.CACerts,
TLSClientCertKeyFilename: globalOptions.TLSClientCert,
InsecureTLS: globalOptions.InsecureTLS,
}
rt, err := backend.Transport(tropts)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// wrap the transport so that the throughput via HTTP is limited // wrap the transport so that the throughput via HTTP is limited
lim := limiter.NewStaticLimiter(gopts.LimitUploadKb, gopts.LimitDownloadKb) lim := limiter.NewStaticLimiter(gopts.Limits)
rt = lim.Transport(rt) rt = lim.Transport(rt)
switch loc.Scheme { switch loc.Scheme {
@ -762,12 +754,7 @@ func create(s string, opts options.Options) (restic.Backend, error) {
return nil, err return nil, err
} }
tropts := backend.TransportOptions{ rt, err := backend.Transport(globalOptions.TransportOptions)
RootCertFilenames: globalOptions.CACerts,
TLSClientCertKeyFilename: globalOptions.TLSClientCert,
InsecureTLS: globalOptions.InsecureTLS,
}
rt, err := backend.Transport(tropts)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -36,7 +36,7 @@ func TestLimitBackendSave(t *testing.T) {
} }
return nil return nil
} }
limiter := NewStaticLimiter(42*1024, 42*1024) limiter := NewStaticLimiter(Limits{42 * 1024, 42 * 1024})
limbe := LimitBackend(be, limiter) limbe := LimitBackend(be, limiter)
rd := restic.NewByteReader(data, nil) rd := restic.NewByteReader(data, nil)
@ -82,7 +82,7 @@ func TestLimitBackendLoad(t *testing.T) {
} }
return newTracedReadCloser(src), nil return newTracedReadCloser(src), nil
} }
limiter := NewStaticLimiter(42*1024, 42*1024) limiter := NewStaticLimiter(Limits{42 * 1024, 42 * 1024})
limbe := LimitBackend(be, limiter) limbe := LimitBackend(be, limiter)
err := limbe.Load(context.TODO(), testHandle, 0, 0, func(rd io.Reader) error { err := limbe.Load(context.TODO(), testHandle, 0, 0, func(rd io.Reader) error {

View File

@ -12,20 +12,27 @@ type staticLimiter struct {
downstream *ratelimit.Bucket downstream *ratelimit.Bucket
} }
// Limits represents static upload and download limits.
// For both, zero means unlimited.
type Limits struct {
UploadKb int
DownloadKb int
}
// NewStaticLimiter constructs a Limiter with a fixed (static) upload and // NewStaticLimiter constructs a Limiter with a fixed (static) upload and
// download rate cap // download rate cap
func NewStaticLimiter(uploadKb, downloadKb int) Limiter { func NewStaticLimiter(l Limits) Limiter {
var ( var (
upstreamBucket *ratelimit.Bucket upstreamBucket *ratelimit.Bucket
downstreamBucket *ratelimit.Bucket downstreamBucket *ratelimit.Bucket
) )
if uploadKb > 0 { if l.UploadKb > 0 {
upstreamBucket = ratelimit.NewBucketWithRate(toByteRate(uploadKb), int64(toByteRate(uploadKb))) upstreamBucket = ratelimit.NewBucketWithRate(toByteRate(l.UploadKb), int64(toByteRate(l.UploadKb)))
} }
if downloadKb > 0 { if l.DownloadKb > 0 {
downstreamBucket = ratelimit.NewBucketWithRate(toByteRate(downloadKb), int64(toByteRate(downloadKb))) downstreamBucket = ratelimit.NewBucketWithRate(toByteRate(l.DownloadKb), int64(toByteRate(l.DownloadKb)))
} }
return staticLimiter{ return staticLimiter{

View File

@ -15,22 +15,19 @@ func TestLimiterWrapping(t *testing.T) {
reader := bytes.NewReader([]byte{}) reader := bytes.NewReader([]byte{})
writer := new(bytes.Buffer) writer := new(bytes.Buffer)
for _, limits := range []struct { for _, limits := range []Limits{
upstream int
downstream int
}{
{0, 0}, {0, 0},
{42, 0}, {42, 0},
{0, 42}, {0, 42},
{42, 42}, {42, 42},
} { } {
limiter := NewStaticLimiter(limits.upstream*1024, limits.downstream*1024) limiter := NewStaticLimiter(limits)
mustWrapUpstream := limits.upstream > 0 mustWrapUpstream := limits.UploadKb > 0
test.Equals(t, limiter.Upstream(reader) != reader, mustWrapUpstream) test.Equals(t, limiter.Upstream(reader) != reader, mustWrapUpstream)
test.Equals(t, limiter.UpstreamWriter(writer) != writer, mustWrapUpstream) test.Equals(t, limiter.UpstreamWriter(writer) != writer, mustWrapUpstream)
mustWrapDownstream := limits.downstream > 0 mustWrapDownstream := limits.DownloadKb > 0
test.Equals(t, limiter.Downstream(reader) != reader, mustWrapDownstream) test.Equals(t, limiter.Downstream(reader) != reader, mustWrapDownstream)
test.Equals(t, limiter.DownstreamWriter(writer) != writer, mustWrapDownstream) test.Equals(t, limiter.DownstreamWriter(writer) != writer, mustWrapDownstream)
} }
@ -51,7 +48,7 @@ func (r *tracedReadCloser) Close() error {
} }
func TestRoundTripperReader(t *testing.T) { func TestRoundTripperReader(t *testing.T) {
limiter := NewStaticLimiter(42*1024, 42*1024) limiter := NewStaticLimiter(Limits{42 * 1024, 42 * 1024})
data := make([]byte, 1234) data := make([]byte, 1234)
_, err := io.ReadFull(rand.Reader, data) _, err := io.ReadFull(rand.Reader, data)
test.OK(t, err) test.OK(t, err)
@ -89,7 +86,7 @@ func TestRoundTripperReader(t *testing.T) {
} }
func TestRoundTripperCornerCases(t *testing.T) { func TestRoundTripperCornerCases(t *testing.T) {
limiter := NewStaticLimiter(42*1024, 42*1024) limiter := NewStaticLimiter(Limits{42 * 1024, 42 * 1024})
rt := limiter.Transport(roundTripper(func(req *http.Request) (*http.Response, error) { rt := limiter.Transport(roundTripper(func(req *http.Request) (*http.Response, error) {
return &http.Response{}, nil return &http.Response{}, nil