2021-09-12 14:15:40 +00:00
|
|
|
package backup
|
2018-08-11 05:34:37 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"sort"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/restic/restic/internal/archiver"
|
|
|
|
"github.com/restic/restic/internal/restic"
|
|
|
|
"github.com/restic/restic/internal/ui"
|
|
|
|
"github.com/restic/restic/internal/ui/termstatus"
|
|
|
|
)
|
|
|
|
|
2021-09-12 14:15:40 +00:00
|
|
|
// JSONProgress reports progress for the `backup` command in JSON.
|
|
|
|
type JSONProgress struct {
|
2018-08-11 05:34:37 +00:00
|
|
|
*ui.Message
|
|
|
|
|
2021-01-26 19:52:00 +00:00
|
|
|
term *termstatus.Terminal
|
|
|
|
v uint
|
2018-08-11 05:34:37 +00:00
|
|
|
}
|
|
|
|
|
2021-08-18 11:07:53 +00:00
|
|
|
// assert that Backup implements the ProgressPrinter interface
|
2021-09-12 14:15:40 +00:00
|
|
|
var _ ProgressPrinter = &JSONProgress{}
|
2021-08-18 11:07:53 +00:00
|
|
|
|
2021-09-12 14:15:40 +00:00
|
|
|
// NewJSONProgress returns a new backup progress reporter.
|
|
|
|
func NewJSONProgress(term *termstatus.Terminal, verbosity uint) *JSONProgress {
|
|
|
|
return &JSONProgress{
|
2022-12-28 20:55:02 +00:00
|
|
|
Message: ui.NewMessage(term, verbosity),
|
|
|
|
term: term,
|
|
|
|
v: verbosity,
|
2018-08-11 05:34:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-12 14:15:40 +00:00
|
|
|
func (b *JSONProgress) print(status interface{}) {
|
2023-05-01 10:01:03 +00:00
|
|
|
b.term.Print(ui.ToJSONString(status))
|
2020-01-12 13:22:52 +00:00
|
|
|
}
|
|
|
|
|
2021-09-12 14:15:40 +00:00
|
|
|
func (b *JSONProgress) error(status interface{}) {
|
2023-05-01 10:01:03 +00:00
|
|
|
b.term.Error(ui.ToJSONString(status))
|
2020-01-12 13:22:52 +00:00
|
|
|
}
|
|
|
|
|
2021-09-12 14:15:40 +00:00
|
|
|
// Update updates the status lines.
|
|
|
|
func (b *JSONProgress) Update(total, processed Counter, errors uint, currentFiles map[string]struct{}, start time.Time, secs uint64) {
|
2018-08-11 05:34:37 +00:00
|
|
|
status := statusUpdate{
|
|
|
|
MessageType: "status",
|
2021-01-26 19:52:00 +00:00
|
|
|
SecondsElapsed: uint64(time.Since(start) / time.Second),
|
2018-08-11 05:34:37 +00:00
|
|
|
SecondsRemaining: secs,
|
|
|
|
TotalFiles: total.Files,
|
|
|
|
FilesDone: processed.Files,
|
|
|
|
TotalBytes: total.Bytes,
|
|
|
|
BytesDone: processed.Bytes,
|
|
|
|
ErrorCount: errors,
|
|
|
|
}
|
|
|
|
|
|
|
|
if total.Bytes > 0 {
|
|
|
|
status.PercentDone = float64(processed.Bytes) / float64(total.Bytes)
|
|
|
|
}
|
|
|
|
|
|
|
|
for filename := range currentFiles {
|
|
|
|
status.CurrentFiles = append(status.CurrentFiles, filename)
|
|
|
|
}
|
2019-06-30 20:20:32 +00:00
|
|
|
sort.Strings(status.CurrentFiles)
|
2018-08-11 05:34:37 +00:00
|
|
|
|
2020-01-12 13:22:52 +00:00
|
|
|
b.print(status)
|
2018-08-11 05:34:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// ScannerError is the error callback function for the scanner, it prints the
|
|
|
|
// error in verbose mode and returns nil.
|
2022-05-20 22:31:26 +00:00
|
|
|
func (b *JSONProgress) ScannerError(item string, err error) error {
|
2020-01-12 13:22:52 +00:00
|
|
|
b.error(errorUpdate{
|
2018-08-11 05:34:37 +00:00
|
|
|
MessageType: "error",
|
|
|
|
Error: err,
|
|
|
|
During: "scan",
|
|
|
|
Item: item,
|
|
|
|
})
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Error is the error callback function for the archiver, it prints the error and returns nil.
|
2022-05-20 22:31:26 +00:00
|
|
|
func (b *JSONProgress) Error(item string, err error) error {
|
2020-01-12 13:22:52 +00:00
|
|
|
b.error(errorUpdate{
|
2018-08-11 05:34:37 +00:00
|
|
|
MessageType: "error",
|
|
|
|
Error: err,
|
|
|
|
During: "archival",
|
|
|
|
Item: item,
|
|
|
|
})
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// CompleteItem is the status callback function for the archiver when a
|
|
|
|
// file/dir has been saved successfully.
|
2023-05-18 17:29:50 +00:00
|
|
|
func (b *JSONProgress) CompleteItem(messageType, item string, s archiver.ItemStats, d time.Duration) {
|
2021-01-26 19:52:00 +00:00
|
|
|
if b.v < 2 {
|
2018-08-11 05:34:37 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-01-26 19:52:00 +00:00
|
|
|
switch messageType {
|
|
|
|
case "dir new":
|
|
|
|
b.print(verboseUpdate{
|
2022-05-01 12:41:36 +00:00
|
|
|
MessageType: "verbose_status",
|
|
|
|
Action: "new",
|
|
|
|
Item: item,
|
|
|
|
Duration: d.Seconds(),
|
|
|
|
DataSize: s.DataSize,
|
|
|
|
DataSizeInRepo: s.DataSizeInRepo,
|
|
|
|
MetadataSize: s.TreeSize,
|
|
|
|
MetadataSizeInRepo: s.TreeSizeInRepo,
|
2021-01-26 19:52:00 +00:00
|
|
|
})
|
|
|
|
case "dir unchanged":
|
|
|
|
b.print(verboseUpdate{
|
|
|
|
MessageType: "verbose_status",
|
|
|
|
Action: "unchanged",
|
|
|
|
Item: item,
|
|
|
|
})
|
|
|
|
case "dir modified":
|
|
|
|
b.print(verboseUpdate{
|
2022-05-01 12:41:36 +00:00
|
|
|
MessageType: "verbose_status",
|
|
|
|
Action: "modified",
|
|
|
|
Item: item,
|
|
|
|
Duration: d.Seconds(),
|
|
|
|
DataSize: s.DataSize,
|
|
|
|
DataSizeInRepo: s.DataSizeInRepo,
|
|
|
|
MetadataSize: s.TreeSize,
|
|
|
|
MetadataSizeInRepo: s.TreeSizeInRepo,
|
2021-01-26 19:52:00 +00:00
|
|
|
})
|
|
|
|
case "file new":
|
|
|
|
b.print(verboseUpdate{
|
2022-05-01 12:41:36 +00:00
|
|
|
MessageType: "verbose_status",
|
|
|
|
Action: "new",
|
|
|
|
Item: item,
|
|
|
|
Duration: d.Seconds(),
|
|
|
|
DataSize: s.DataSize,
|
|
|
|
DataSizeInRepo: s.DataSizeInRepo,
|
2021-01-26 19:52:00 +00:00
|
|
|
})
|
|
|
|
case "file unchanged":
|
|
|
|
b.print(verboseUpdate{
|
|
|
|
MessageType: "verbose_status",
|
|
|
|
Action: "unchanged",
|
|
|
|
Item: item,
|
|
|
|
})
|
|
|
|
case "file modified":
|
|
|
|
b.print(verboseUpdate{
|
2022-05-01 12:41:36 +00:00
|
|
|
MessageType: "verbose_status",
|
|
|
|
Action: "modified",
|
|
|
|
Item: item,
|
|
|
|
Duration: d.Seconds(),
|
|
|
|
DataSize: s.DataSize,
|
|
|
|
DataSizeInRepo: s.DataSizeInRepo,
|
2021-01-26 19:52:00 +00:00
|
|
|
})
|
2018-08-11 05:34:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReportTotal sets the total stats up to now
|
2023-05-18 17:29:50 +00:00
|
|
|
func (b *JSONProgress) ReportTotal(start time.Time, s archiver.ScanStats) {
|
2021-08-17 23:25:34 +00:00
|
|
|
if b.v >= 2 {
|
|
|
|
b.print(verboseUpdate{
|
2023-01-27 10:29:06 +00:00
|
|
|
MessageType: "verbose_status",
|
2021-08-17 23:25:34 +00:00
|
|
|
Action: "scan_finished",
|
|
|
|
Duration: time.Since(start).Seconds(),
|
|
|
|
DataSize: s.Bytes,
|
|
|
|
TotalFiles: s.Files,
|
|
|
|
})
|
2018-08-11 05:34:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finish prints the finishing messages.
|
2024-02-22 21:14:48 +00:00
|
|
|
func (b *JSONProgress) Finish(snapshotID restic.ID, start time.Time, summary *archiver.Summary, dryRun bool) {
|
2020-01-12 13:22:52 +00:00
|
|
|
b.print(summaryOutput{
|
2021-08-17 23:52:04 +00:00
|
|
|
MessageType: "summary",
|
|
|
|
FilesNew: summary.Files.New,
|
|
|
|
FilesChanged: summary.Files.Changed,
|
|
|
|
FilesUnmodified: summary.Files.Unchanged,
|
|
|
|
DirsNew: summary.Dirs.New,
|
|
|
|
DirsChanged: summary.Dirs.Changed,
|
|
|
|
DirsUnmodified: summary.Dirs.Unchanged,
|
|
|
|
DataBlobs: summary.ItemStats.DataBlobs,
|
|
|
|
TreeBlobs: summary.ItemStats.TreeBlobs,
|
|
|
|
DataAdded: summary.ItemStats.DataSize + summary.ItemStats.TreeSize,
|
2024-02-22 21:17:54 +00:00
|
|
|
DataAddedInRepo: summary.ItemStats.DataSizeInRepo + summary.ItemStats.TreeSizeInRepo,
|
2021-08-17 23:52:04 +00:00
|
|
|
TotalFilesProcessed: summary.Files.New + summary.Files.Changed + summary.Files.Unchanged,
|
|
|
|
TotalBytesProcessed: summary.ProcessedBytes,
|
|
|
|
TotalDuration: time.Since(start).Seconds(),
|
2022-10-31 17:59:49 +00:00
|
|
|
SnapshotID: snapshotID.String(),
|
2021-08-18 11:03:08 +00:00
|
|
|
DryRun: dryRun,
|
2018-08-11 05:34:37 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2021-01-26 19:52:00 +00:00
|
|
|
// Reset no-op
|
2021-09-12 14:15:40 +00:00
|
|
|
func (b *JSONProgress) Reset() {
|
2018-08-11 05:34:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type statusUpdate struct {
|
|
|
|
MessageType string `json:"message_type"` // "status"
|
|
|
|
SecondsElapsed uint64 `json:"seconds_elapsed,omitempty"`
|
|
|
|
SecondsRemaining uint64 `json:"seconds_remaining,omitempty"`
|
|
|
|
PercentDone float64 `json:"percent_done"`
|
|
|
|
TotalFiles uint64 `json:"total_files,omitempty"`
|
|
|
|
FilesDone uint64 `json:"files_done,omitempty"`
|
|
|
|
TotalBytes uint64 `json:"total_bytes,omitempty"`
|
|
|
|
BytesDone uint64 `json:"bytes_done,omitempty"`
|
|
|
|
ErrorCount uint `json:"error_count,omitempty"`
|
|
|
|
CurrentFiles []string `json:"current_files,omitempty"`
|
|
|
|
}
|
|
|
|
|
|
|
|
type errorUpdate struct {
|
|
|
|
MessageType string `json:"message_type"` // "error"
|
|
|
|
Error error `json:"error"`
|
|
|
|
During string `json:"during"`
|
|
|
|
Item string `json:"item"`
|
|
|
|
}
|
|
|
|
|
|
|
|
type verboseUpdate struct {
|
2022-05-01 12:41:36 +00:00
|
|
|
MessageType string `json:"message_type"` // "verbose_status"
|
|
|
|
Action string `json:"action"`
|
|
|
|
Item string `json:"item"`
|
|
|
|
Duration float64 `json:"duration"` // in seconds
|
|
|
|
DataSize uint64 `json:"data_size"`
|
|
|
|
DataSizeInRepo uint64 `json:"data_size_in_repo"`
|
|
|
|
MetadataSize uint64 `json:"metadata_size"`
|
|
|
|
MetadataSizeInRepo uint64 `json:"metadata_size_in_repo"`
|
|
|
|
TotalFiles uint `json:"total_files"`
|
2018-08-11 05:34:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type summaryOutput struct {
|
|
|
|
MessageType string `json:"message_type"` // "summary"
|
|
|
|
FilesNew uint `json:"files_new"`
|
|
|
|
FilesChanged uint `json:"files_changed"`
|
|
|
|
FilesUnmodified uint `json:"files_unmodified"`
|
|
|
|
DirsNew uint `json:"dirs_new"`
|
|
|
|
DirsChanged uint `json:"dirs_changed"`
|
|
|
|
DirsUnmodified uint `json:"dirs_unmodified"`
|
|
|
|
DataBlobs int `json:"data_blobs"`
|
|
|
|
TreeBlobs int `json:"tree_blobs"`
|
|
|
|
DataAdded uint64 `json:"data_added"`
|
2024-02-22 21:17:54 +00:00
|
|
|
DataAddedInRepo uint64 `json:"data_added_in_repo"`
|
2018-08-11 05:34:37 +00:00
|
|
|
TotalFilesProcessed uint `json:"total_files_processed"`
|
|
|
|
TotalBytesProcessed uint64 `json:"total_bytes_processed"`
|
|
|
|
TotalDuration float64 `json:"total_duration"` // in seconds
|
2018-12-01 20:53:00 +00:00
|
|
|
SnapshotID string `json:"snapshot_id"`
|
backup: add --dry-run/-n flag to show what would happen.
This can be used to check how large a backup is or validate exclusions.
It does not actually write any data to the underlying backend. This is
implemented as a simple overlay backend that accepts writes without
forwarding them, passes through reads, and generally does the minimal
necessary to pretend that progress is actually happening.
Fixes #1542
Example usage:
$ restic -vv --dry-run . | grep add
new /changelog/unreleased/issue-1542, saved in 0.000s (350 B added)
modified /cmd/restic/cmd_backup.go, saved in 0.000s (16.543 KiB added)
modified /cmd/restic/global.go, saved in 0.000s (0 B added)
new /internal/backend/dry/dry_backend_test.go, saved in 0.000s (3.866 KiB added)
new /internal/backend/dry/dry_backend.go, saved in 0.000s (3.744 KiB added)
modified /internal/backend/test/tests.go, saved in 0.000s (0 B added)
modified /internal/repository/repository.go, saved in 0.000s (20.707 KiB added)
modified /internal/ui/backup.go, saved in 0.000s (9.110 KiB added)
modified /internal/ui/jsonstatus/status.go, saved in 0.001s (11.055 KiB added)
modified /restic, saved in 0.131s (25.542 MiB added)
Would add to the repo: 25.892 MiB
2019-06-13 03:39:13 +00:00
|
|
|
DryRun bool `json:"dry_run,omitempty"`
|
2018-08-11 05:34:37 +00:00
|
|
|
}
|