2
2
mirror of https://github.com/octoleo/restic.git synced 2024-11-24 05:37:37 +00:00

gofmt all files

Apparently the rules for comment formatting have changed with go 1.19.
This commit is contained in:
Michael Eischer 2022-08-19 19:12:26 +02:00
parent dbca93da28
commit f414db987d
10 changed files with 42 additions and 42 deletions

View File

@ -148,10 +148,10 @@ func parsePercentage(s string) (float64, error) {
// prepareCheckCache configures a special cache directory for check. // prepareCheckCache configures a special cache directory for check.
// //
// * if --with-cache is specified, the default cache is used // - if --with-cache is specified, the default cache is used
// * if the user explicitly requested --no-cache, we don't use any cache // - if the user explicitly requested --no-cache, we don't use any cache
// * if the user provides --cache-dir, we use a cache in a temporary sub-directory of the specified directory and the sub-directory is deleted after the check // - if the user provides --cache-dir, we use a cache in a temporary sub-directory of the specified directory and the sub-directory is deleted after the check
// * by default, we use a cache in a temporary directory that is deleted after the check // - by default, we use a cache in a temporary directory that is deleted after the check
func prepareCheckCache(opts CheckOptions, gopts *GlobalOptions) (cleanup func()) { func prepareCheckCache(opts CheckOptions, gopts *GlobalOptions) (cleanup func()) {
cleanup = func() {} cleanup = func() {}
if opts.WithCache { if opts.WithCache {

View File

@ -8,7 +8,7 @@ import (
rtest "github.com/restic/restic/internal/test" rtest "github.com/restic/restic/internal/test"
) )
//TestFillSecondaryGlobalOpts tests valid and invalid data on fillSecondaryGlobalOpts-function // TestFillSecondaryGlobalOpts tests valid and invalid data on fillSecondaryGlobalOpts-function
func TestFillSecondaryGlobalOpts(t *testing.T) { func TestFillSecondaryGlobalOpts(t *testing.T) {
//secondaryRepoTestCase defines a struct for test cases //secondaryRepoTestCase defines a struct for test cases
type secondaryRepoTestCase struct { type secondaryRepoTestCase struct {

2
doc.go
View File

@ -1,6 +1,6 @@
// Package restic gives a (very brief) introduction to the structure of source code. // Package restic gives a (very brief) introduction to the structure of source code.
// //
// Overview // # Overview
// //
// The packages are structured so that cmd/ contains the main package for the // The packages are structured so that cmd/ contains the main package for the
// restic binary, and internal/ contains almost all code in library form. We've // restic binary, and internal/ contains almost all code in library form. We've

View File

@ -28,10 +28,10 @@ import (
// Backend stores data in a GCS bucket. // Backend stores data in a GCS bucket.
// //
// The service account used to access the bucket must have these permissions: // The service account used to access the bucket must have these permissions:
// * storage.objects.create // - storage.objects.create
// * storage.objects.delete // - storage.objects.delete
// * storage.objects.get // - storage.objects.get
// * storage.objects.list // - storage.objects.list
type Backend struct { type Backend struct {
gcsClient *storage.Client gcsClient *storage.Client
projectID string projectID string

View File

@ -32,9 +32,9 @@ func init() {
// ParseConfig parses the string s and extracts the sftp config. The // ParseConfig parses the string s and extracts the sftp config. The
// supported configuration formats are sftp://user@host[:port]/directory // supported configuration formats are sftp://user@host[:port]/directory
// and sftp:user@host:directory. The directory will be path Cleaned and can // and sftp:user@host:directory. The directory will be path Cleaned and can
// be an absolute path if it starts with a '/' (e.g. // be an absolute path if it starts with a '/' (e.g.
// sftp://user@host//absolute and sftp:user@host:/absolute). // sftp://user@host//absolute and sftp:user@host:/absolute).
func ParseConfig(s string) (interface{}, error) { func ParseConfig(s string) (interface{}, error) {
var user, host, port, dir string var user, host, port, dir string
switch { switch {

View File

@ -1,6 +1,6 @@
// Package test contains a test suite with benchmarks for restic backends. // Package test contains a test suite with benchmarks for restic backends.
// //
// Overview // # Overview
// //
// For the test suite to work a few functions need to be implemented to create // For the test suite to work a few functions need to be implemented to create
// new config, create a backend, open it and run cleanup tasks afterwards. The // new config, create a backend, open it and run cleanup tasks afterwards. The
@ -10,30 +10,31 @@
// then the methods RunTests() and RunBenchmarks() can be used to run the // then the methods RunTests() and RunBenchmarks() can be used to run the
// individual tests and benchmarks as subtests/subbenchmarks. // individual tests and benchmarks as subtests/subbenchmarks.
// //
// Example // # Example
// //
// Assuming a *Suite is returned by newTestSuite(), the tests and benchmarks // Assuming a *Suite is returned by newTestSuite(), the tests and benchmarks
// can be run like this: // can be run like this:
// func newTestSuite(t testing.TB) *test.Suite {
// return &test.Suite{
// Create: func(cfg interface{}) (restic.Backend, error) {
// [...]
// },
// [...]
// }
// }
// //
// func TestSuiteBackendMem(t *testing.T) { // func newTestSuite(t testing.TB) *test.Suite {
// newTestSuite(t).RunTests(t) // return &test.Suite{
// } // Create: func(cfg interface{}) (restic.Backend, error) {
// [...]
// },
// [...]
// }
// }
// //
// func BenchmarkSuiteBackendMem(b *testing.B) { // func TestSuiteBackendMem(t *testing.T) {
// newTestSuite(b).RunBenchmarks(b) // newTestSuite(t).RunTests(t)
// } // }
//
// func BenchmarkSuiteBackendMem(b *testing.B) {
// newTestSuite(b).RunBenchmarks(b)
// }
// //
// The functions are run in alphabetical order. // The functions are run in alphabetical order.
// //
// Add new tests // # Add new tests
// //
// A new test or benchmark can be added by implementing a method on *Suite // A new test or benchmark can be added by implementing a method on *Suite
// with the name starting with "Test" and a single *testing.T parameter for // with the name starting with "Test" and a single *testing.T parameter for

View File

@ -2,7 +2,7 @@
// the following the abstractions used for this package are listed. More // the following the abstractions used for this package are listed. More
// information can be found in the restic design document. // information can be found in the restic design document.
// //
// File // # File
// //
// A file is a named handle for some data saved in the backend. For the local // A file is a named handle for some data saved in the backend. For the local
// backend, this corresponds to actual files saved to disk. Usually, the SHA256 // backend, this corresponds to actual files saved to disk. Usually, the SHA256
@ -11,18 +11,17 @@
// encrypted before being saved in a backend. This means that the name is the // encrypted before being saved in a backend. This means that the name is the
// hash of the ciphertext. // hash of the ciphertext.
// //
// Blob // # Blob
// //
// A blob is a number of bytes that has a type (data or tree). Blobs are // A blob is a number of bytes that has a type (data or tree). Blobs are
// identified by an ID, which is the SHA256 hash of the blobs' contents. One or // identified by an ID, which is the SHA256 hash of the blobs' contents. One or
// more blobs are bundled together in a Pack and then saved to the backend. // more blobs are bundled together in a Pack and then saved to the backend.
// Blobs are always encrypted before being bundled in a Pack. // Blobs are always encrypted before being bundled in a Pack.
// //
// Pack // # Pack
// //
// A Pack is a File in the backend that contains one or more (encrypted) blobs, // A Pack is a File in the backend that contains one or more (encrypted) blobs,
// followed by a header at the end of the Pack. The header is encrypted and // followed by a header at the end of the Pack. The header is encrypted and
// contains the ID, type, length and offset for each blob contained in the // contains the ID, type, length and offset for each blob contained in the
// Pack. // Pack.
//
package repository package repository

View File

@ -217,9 +217,9 @@ func (sn *Snapshot) HasTags(l []string) bool {
} }
// HasTagList returns true if either // HasTagList returns true if either
// - the snapshot satisfies at least one TagList, so there is a TagList in l // - the snapshot satisfies at least one TagList, so there is a TagList in l
// for which all tags are included in sn, or // for which all tags are included in sn, or
// - l is empty // - l is empty
func (sn *Snapshot) HasTagList(l []TagList) bool { func (sn *Snapshot) HasTagList(l []TagList) bool {
debug.Log("testing snapshot with tags %v against list: %v", sn.Tags, l) debug.Log("testing snapshot with tags %v against list: %v", sn.Tags, l)

View File

@ -8,10 +8,10 @@
// Here is high-level pseudo-code of how the Restorer attempts to achieve // Here is high-level pseudo-code of how the Restorer attempts to achieve
// these goals: // these goals:
// //
// while there are packs to process // while there are packs to process
// choose a pack to process [1] // choose a pack to process [1]
// retrieve the pack from the backend [2] // retrieve the pack from the backend [2]
// write pack blobs to the files that need them [3] // write pack blobs to the files that need them [3]
// //
// Retrieval of repository packs (step [2]) and writing target files (step [3]) // Retrieval of repository packs (step [2]) and writing target files (step [3])
// are performed concurrently on multiple goroutines. // are performed concurrently on multiple goroutines.

View File

@ -12,7 +12,7 @@ import (
// multiple files can be written to concurrently. // multiple files can be written to concurrently.
// multiple blobs can be concurrently written to the same file. // multiple blobs can be concurrently written to the same file.
// TODO I am not 100% convinced this is necessary, i.e. it may be okay // TODO I am not 100% convinced this is necessary, i.e. it may be okay
// to use multiple os.File to write to the same target file // to use multiple os.File to write to the same target file
type filesWriter struct { type filesWriter struct {
buckets []filesWriterBucket buckets []filesWriterBucket
} }