mirror of
https://github.com/octoleo/restic.git
synced 2024-11-21 12:25:09 +00:00
gofmt all files
Apparently the rules for comment formatting have changed with go 1.19.
This commit is contained in:
parent
dbca93da28
commit
f414db987d
@ -148,10 +148,10 @@ func parsePercentage(s string) (float64, error) {
|
||||
|
||||
// prepareCheckCache configures a special cache directory for check.
|
||||
//
|
||||
// * if --with-cache is specified, the default cache is used
|
||||
// * if the user explicitly requested --no-cache, we don't use any cache
|
||||
// * if the user provides --cache-dir, we use a cache in a temporary sub-directory of the specified directory and the sub-directory is deleted after the check
|
||||
// * by default, we use a cache in a temporary directory that is deleted after the check
|
||||
// - if --with-cache is specified, the default cache is used
|
||||
// - if the user explicitly requested --no-cache, we don't use any cache
|
||||
// - if the user provides --cache-dir, we use a cache in a temporary sub-directory of the specified directory and the sub-directory is deleted after the check
|
||||
// - by default, we use a cache in a temporary directory that is deleted after the check
|
||||
func prepareCheckCache(opts CheckOptions, gopts *GlobalOptions) (cleanup func()) {
|
||||
cleanup = func() {}
|
||||
if opts.WithCache {
|
||||
|
@ -8,7 +8,7 @@ import (
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
||||
//TestFillSecondaryGlobalOpts tests valid and invalid data on fillSecondaryGlobalOpts-function
|
||||
// TestFillSecondaryGlobalOpts tests valid and invalid data on fillSecondaryGlobalOpts-function
|
||||
func TestFillSecondaryGlobalOpts(t *testing.T) {
|
||||
//secondaryRepoTestCase defines a struct for test cases
|
||||
type secondaryRepoTestCase struct {
|
||||
|
2
doc.go
2
doc.go
@ -1,6 +1,6 @@
|
||||
// Package restic gives a (very brief) introduction to the structure of source code.
|
||||
//
|
||||
// Overview
|
||||
// # Overview
|
||||
//
|
||||
// The packages are structured so that cmd/ contains the main package for the
|
||||
// restic binary, and internal/ contains almost all code in library form. We've
|
||||
|
@ -28,10 +28,10 @@ import (
|
||||
// Backend stores data in a GCS bucket.
|
||||
//
|
||||
// The service account used to access the bucket must have these permissions:
|
||||
// * storage.objects.create
|
||||
// * storage.objects.delete
|
||||
// * storage.objects.get
|
||||
// * storage.objects.list
|
||||
// - storage.objects.create
|
||||
// - storage.objects.delete
|
||||
// - storage.objects.get
|
||||
// - storage.objects.list
|
||||
type Backend struct {
|
||||
gcsClient *storage.Client
|
||||
projectID string
|
||||
|
@ -32,9 +32,9 @@ func init() {
|
||||
|
||||
// ParseConfig parses the string s and extracts the sftp config. The
|
||||
// supported configuration formats are sftp://user@host[:port]/directory
|
||||
// and sftp:user@host:directory. The directory will be path Cleaned and can
|
||||
// be an absolute path if it starts with a '/' (e.g.
|
||||
// sftp://user@host//absolute and sftp:user@host:/absolute).
|
||||
// and sftp:user@host:directory. The directory will be path Cleaned and can
|
||||
// be an absolute path if it starts with a '/' (e.g.
|
||||
// sftp://user@host//absolute and sftp:user@host:/absolute).
|
||||
func ParseConfig(s string) (interface{}, error) {
|
||||
var user, host, port, dir string
|
||||
switch {
|
||||
|
@ -1,6 +1,6 @@
|
||||
// Package test contains a test suite with benchmarks for restic backends.
|
||||
//
|
||||
// Overview
|
||||
// # Overview
|
||||
//
|
||||
// For the test suite to work a few functions need to be implemented to create
|
||||
// new config, create a backend, open it and run cleanup tasks afterwards. The
|
||||
@ -10,30 +10,31 @@
|
||||
// then the methods RunTests() and RunBenchmarks() can be used to run the
|
||||
// individual tests and benchmarks as subtests/subbenchmarks.
|
||||
//
|
||||
// Example
|
||||
// # Example
|
||||
//
|
||||
// Assuming a *Suite is returned by newTestSuite(), the tests and benchmarks
|
||||
// can be run like this:
|
||||
// func newTestSuite(t testing.TB) *test.Suite {
|
||||
// return &test.Suite{
|
||||
// Create: func(cfg interface{}) (restic.Backend, error) {
|
||||
// [...]
|
||||
// },
|
||||
// [...]
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// func TestSuiteBackendMem(t *testing.T) {
|
||||
// newTestSuite(t).RunTests(t)
|
||||
// }
|
||||
// func newTestSuite(t testing.TB) *test.Suite {
|
||||
// return &test.Suite{
|
||||
// Create: func(cfg interface{}) (restic.Backend, error) {
|
||||
// [...]
|
||||
// },
|
||||
// [...]
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// func BenchmarkSuiteBackendMem(b *testing.B) {
|
||||
// newTestSuite(b).RunBenchmarks(b)
|
||||
// }
|
||||
// func TestSuiteBackendMem(t *testing.T) {
|
||||
// newTestSuite(t).RunTests(t)
|
||||
// }
|
||||
//
|
||||
// func BenchmarkSuiteBackendMem(b *testing.B) {
|
||||
// newTestSuite(b).RunBenchmarks(b)
|
||||
// }
|
||||
//
|
||||
// The functions are run in alphabetical order.
|
||||
//
|
||||
// Add new tests
|
||||
// # Add new tests
|
||||
//
|
||||
// A new test or benchmark can be added by implementing a method on *Suite
|
||||
// with the name starting with "Test" and a single *testing.T parameter for
|
||||
|
@ -2,7 +2,7 @@
|
||||
// the following the abstractions used for this package are listed. More
|
||||
// information can be found in the restic design document.
|
||||
//
|
||||
// File
|
||||
// # File
|
||||
//
|
||||
// A file is a named handle for some data saved in the backend. For the local
|
||||
// backend, this corresponds to actual files saved to disk. Usually, the SHA256
|
||||
@ -11,18 +11,17 @@
|
||||
// encrypted before being saved in a backend. This means that the name is the
|
||||
// hash of the ciphertext.
|
||||
//
|
||||
// Blob
|
||||
// # Blob
|
||||
//
|
||||
// A blob is a number of bytes that has a type (data or tree). Blobs are
|
||||
// identified by an ID, which is the SHA256 hash of the blobs' contents. One or
|
||||
// more blobs are bundled together in a Pack and then saved to the backend.
|
||||
// Blobs are always encrypted before being bundled in a Pack.
|
||||
//
|
||||
// Pack
|
||||
// # Pack
|
||||
//
|
||||
// A Pack is a File in the backend that contains one or more (encrypted) blobs,
|
||||
// followed by a header at the end of the Pack. The header is encrypted and
|
||||
// contains the ID, type, length and offset for each blob contained in the
|
||||
// Pack.
|
||||
//
|
||||
package repository
|
||||
|
@ -217,9 +217,9 @@ func (sn *Snapshot) HasTags(l []string) bool {
|
||||
}
|
||||
|
||||
// HasTagList returns true if either
|
||||
// - the snapshot satisfies at least one TagList, so there is a TagList in l
|
||||
// for which all tags are included in sn, or
|
||||
// - l is empty
|
||||
// - the snapshot satisfies at least one TagList, so there is a TagList in l
|
||||
// for which all tags are included in sn, or
|
||||
// - l is empty
|
||||
func (sn *Snapshot) HasTagList(l []TagList) bool {
|
||||
debug.Log("testing snapshot with tags %v against list: %v", sn.Tags, l)
|
||||
|
||||
|
@ -8,10 +8,10 @@
|
||||
// Here is high-level pseudo-code of how the Restorer attempts to achieve
|
||||
// these goals:
|
||||
//
|
||||
// while there are packs to process
|
||||
// choose a pack to process [1]
|
||||
// retrieve the pack from the backend [2]
|
||||
// write pack blobs to the files that need them [3]
|
||||
// while there are packs to process
|
||||
// choose a pack to process [1]
|
||||
// retrieve the pack from the backend [2]
|
||||
// write pack blobs to the files that need them [3]
|
||||
//
|
||||
// Retrieval of repository packs (step [2]) and writing target files (step [3])
|
||||
// are performed concurrently on multiple goroutines.
|
||||
|
@ -12,7 +12,7 @@ import (
|
||||
// multiple files can be written to concurrently.
|
||||
// multiple blobs can be concurrently written to the same file.
|
||||
// TODO I am not 100% convinced this is necessary, i.e. it may be okay
|
||||
// to use multiple os.File to write to the same target file
|
||||
// to use multiple os.File to write to the same target file
|
||||
type filesWriter struct {
|
||||
buckets []filesWriterBucket
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user