mirror of
https://github.com/octoleo/restic.git
synced 2024-11-21 20:35:12 +00:00
Merge pull request #5119 from MichaelEischer/backup-json-start-end-time
backup: include start and end time in json output
This commit is contained in:
commit
9c758313e3
6
changelog/unreleased/pull-5119
Normal file
6
changelog/unreleased/pull-5119
Normal file
@ -0,0 +1,6 @@
|
||||
Enhancement: Include backup start and end in JSON output
|
||||
|
||||
The JSON output of the backup command now also includes the timestamps
|
||||
of the `backup_start` and `backup_end` times.
|
||||
|
||||
https://github.com/restic/restic/pull/5119
|
@ -226,6 +226,10 @@ Summary is the last output line in a successful backup.
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
| ``total_bytes_processed`` | Total number of bytes processed |
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
| ``backup_start`` | Time at which the backup was started |
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
| ``backup_end`` | Time at which the backup was completed |
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
| ``total_duration`` | Total time it took for the operation to complete |
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
| ``snapshot_id`` | ID of the new snapshot. Field is omitted if snapshot |
|
||||
|
@ -49,6 +49,8 @@ type ChangeStats struct {
|
||||
}
|
||||
|
||||
type Summary struct {
|
||||
BackupStart time.Time
|
||||
BackupEnd time.Time
|
||||
Files, Dirs ChangeStats
|
||||
ProcessedBytes uint64
|
||||
ItemStats
|
||||
@ -811,7 +813,9 @@ func (arch *Archiver) stopWorkers() {
|
||||
|
||||
// Snapshot saves several targets and returns a snapshot.
|
||||
func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts SnapshotOptions) (*restic.Snapshot, restic.ID, *Summary, error) {
|
||||
arch.summary = &Summary{}
|
||||
arch.summary = &Summary{
|
||||
BackupStart: opts.BackupStart,
|
||||
}
|
||||
|
||||
cleanTargets, err := resolveRelativeTargets(arch.FS, targets)
|
||||
if err != nil {
|
||||
@ -894,9 +898,10 @@ func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts Snaps
|
||||
sn.Parent = opts.ParentSnapshot.ID()
|
||||
}
|
||||
sn.Tree = &rootTreeID
|
||||
arch.summary.BackupEnd = time.Now()
|
||||
sn.Summary = &restic.SnapshotSummary{
|
||||
BackupStart: opts.BackupStart,
|
||||
BackupEnd: time.Now(),
|
||||
BackupStart: arch.summary.BackupStart,
|
||||
BackupEnd: arch.summary.BackupEnd,
|
||||
|
||||
FilesNew: arch.summary.Files.New,
|
||||
FilesChanged: arch.summary.Files.Changed,
|
||||
|
@ -1692,14 +1692,17 @@ func (f MockFile) Read(p []byte) (int, error) {
|
||||
}
|
||||
|
||||
func checkSnapshotStats(t *testing.T, sn *restic.Snapshot, stat Summary) {
|
||||
rtest.Equals(t, stat.Files.New, sn.Summary.FilesNew)
|
||||
rtest.Equals(t, stat.Files.Changed, sn.Summary.FilesChanged)
|
||||
rtest.Equals(t, stat.Files.Unchanged, sn.Summary.FilesUnmodified)
|
||||
rtest.Equals(t, stat.Dirs.New, sn.Summary.DirsNew)
|
||||
rtest.Equals(t, stat.Dirs.Changed, sn.Summary.DirsChanged)
|
||||
rtest.Equals(t, stat.Dirs.Unchanged, sn.Summary.DirsUnmodified)
|
||||
rtest.Equals(t, stat.ProcessedBytes, sn.Summary.TotalBytesProcessed)
|
||||
rtest.Equals(t, stat.Files.New+stat.Files.Changed+stat.Files.Unchanged, sn.Summary.TotalFilesProcessed)
|
||||
t.Helper()
|
||||
rtest.Equals(t, stat.BackupStart, sn.Summary.BackupStart, "BackupStart")
|
||||
// BackupEnd is set to time.Now() and can't be compared to a fixed value
|
||||
rtest.Equals(t, stat.Files.New, sn.Summary.FilesNew, "FilesNew")
|
||||
rtest.Equals(t, stat.Files.Changed, sn.Summary.FilesChanged, "FilesChanged")
|
||||
rtest.Equals(t, stat.Files.Unchanged, sn.Summary.FilesUnmodified, "FilesUnmodified")
|
||||
rtest.Equals(t, stat.Dirs.New, sn.Summary.DirsNew, "DirsNew")
|
||||
rtest.Equals(t, stat.Dirs.Changed, sn.Summary.DirsChanged, "DirsChanged")
|
||||
rtest.Equals(t, stat.Dirs.Unchanged, sn.Summary.DirsUnmodified, "DirsUnmodified")
|
||||
rtest.Equals(t, stat.ProcessedBytes, sn.Summary.TotalBytesProcessed, "TotalBytesProcessed")
|
||||
rtest.Equals(t, stat.Files.New+stat.Files.Changed+stat.Files.Unchanged, sn.Summary.TotalFilesProcessed, "TotalFilesProcessed")
|
||||
bothZeroOrNeither(t, uint64(stat.DataBlobs), uint64(sn.Summary.DataBlobs))
|
||||
bothZeroOrNeither(t, uint64(stat.TreeBlobs), uint64(sn.Summary.TreeBlobs))
|
||||
bothZeroOrNeither(t, uint64(stat.DataSize+stat.TreeSize), uint64(sn.Summary.DataAdded))
|
||||
|
@ -162,7 +162,7 @@ func (b *JSONProgress) ReportTotal(start time.Time, s archiver.ScanStats) {
|
||||
}
|
||||
|
||||
// Finish prints the finishing messages.
|
||||
func (b *JSONProgress) Finish(snapshotID restic.ID, start time.Time, summary *archiver.Summary, dryRun bool) {
|
||||
func (b *JSONProgress) Finish(snapshotID restic.ID, summary *archiver.Summary, dryRun bool) {
|
||||
id := ""
|
||||
// empty if snapshot creation was skipped
|
||||
if !snapshotID.IsNull() {
|
||||
@ -182,7 +182,9 @@ func (b *JSONProgress) Finish(snapshotID restic.ID, start time.Time, summary *ar
|
||||
DataAddedPacked: summary.ItemStats.DataSizeInRepo + summary.ItemStats.TreeSizeInRepo,
|
||||
TotalFilesProcessed: summary.Files.New + summary.Files.Changed + summary.Files.Unchanged,
|
||||
TotalBytesProcessed: summary.ProcessedBytes,
|
||||
TotalDuration: time.Since(start).Seconds(),
|
||||
BackupStart: summary.BackupStart,
|
||||
BackupEnd: summary.BackupEnd,
|
||||
TotalDuration: summary.BackupEnd.Sub(summary.BackupStart).Seconds(),
|
||||
SnapshotID: id,
|
||||
DryRun: dryRun,
|
||||
})
|
||||
@ -229,20 +231,22 @@ type verboseUpdate struct {
|
||||
}
|
||||
|
||||
type summaryOutput struct {
|
||||
MessageType string `json:"message_type"` // "summary"
|
||||
FilesNew uint `json:"files_new"`
|
||||
FilesChanged uint `json:"files_changed"`
|
||||
FilesUnmodified uint `json:"files_unmodified"`
|
||||
DirsNew uint `json:"dirs_new"`
|
||||
DirsChanged uint `json:"dirs_changed"`
|
||||
DirsUnmodified uint `json:"dirs_unmodified"`
|
||||
DataBlobs int `json:"data_blobs"`
|
||||
TreeBlobs int `json:"tree_blobs"`
|
||||
DataAdded uint64 `json:"data_added"`
|
||||
DataAddedPacked uint64 `json:"data_added_packed"`
|
||||
TotalFilesProcessed uint `json:"total_files_processed"`
|
||||
TotalBytesProcessed uint64 `json:"total_bytes_processed"`
|
||||
TotalDuration float64 `json:"total_duration"` // in seconds
|
||||
SnapshotID string `json:"snapshot_id,omitempty"`
|
||||
DryRun bool `json:"dry_run,omitempty"`
|
||||
MessageType string `json:"message_type"` // "summary"
|
||||
FilesNew uint `json:"files_new"`
|
||||
FilesChanged uint `json:"files_changed"`
|
||||
FilesUnmodified uint `json:"files_unmodified"`
|
||||
DirsNew uint `json:"dirs_new"`
|
||||
DirsChanged uint `json:"dirs_changed"`
|
||||
DirsUnmodified uint `json:"dirs_unmodified"`
|
||||
DataBlobs int `json:"data_blobs"`
|
||||
TreeBlobs int `json:"tree_blobs"`
|
||||
DataAdded uint64 `json:"data_added"`
|
||||
DataAddedPacked uint64 `json:"data_added_packed"`
|
||||
TotalFilesProcessed uint `json:"total_files_processed"`
|
||||
TotalBytesProcessed uint64 `json:"total_bytes_processed"`
|
||||
TotalDuration float64 `json:"total_duration"` // in seconds
|
||||
BackupStart time.Time `json:"backup_start"`
|
||||
BackupEnd time.Time `json:"backup_end"`
|
||||
SnapshotID string `json:"snapshot_id,omitempty"`
|
||||
DryRun bool `json:"dry_run,omitempty"`
|
||||
}
|
||||
|
@ -17,7 +17,7 @@ type ProgressPrinter interface {
|
||||
ScannerError(item string, err error) error
|
||||
CompleteItem(messageType string, item string, s archiver.ItemStats, d time.Duration)
|
||||
ReportTotal(start time.Time, s archiver.ScanStats)
|
||||
Finish(snapshotID restic.ID, start time.Time, summary *archiver.Summary, dryRun bool)
|
||||
Finish(snapshotID restic.ID, summary *archiver.Summary, dryRun bool)
|
||||
Reset()
|
||||
|
||||
P(msg string, args ...interface{})
|
||||
@ -173,5 +173,5 @@ func (p *Progress) ReportTotal(item string, s archiver.ScanStats) {
|
||||
func (p *Progress) Finish(snapshotID restic.ID, summary *archiver.Summary, dryrun bool) {
|
||||
// wait for the status update goroutine to shut down
|
||||
p.Updater.Done()
|
||||
p.printer.Finish(snapshotID, p.start, summary, dryrun)
|
||||
p.printer.Finish(snapshotID, summary, dryrun)
|
||||
}
|
||||
|
@ -33,7 +33,7 @@ func (p *mockPrinter) CompleteItem(messageType string, _ string, _ archiver.Item
|
||||
}
|
||||
|
||||
func (p *mockPrinter) ReportTotal(_ time.Time, _ archiver.ScanStats) {}
|
||||
func (p *mockPrinter) Finish(id restic.ID, _ time.Time, _ *archiver.Summary, _ bool) {
|
||||
func (p *mockPrinter) Finish(id restic.ID, _ *archiver.Summary, _ bool) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
|
||||
|
@ -130,7 +130,7 @@ func (b *TextProgress) Reset() {
|
||||
}
|
||||
|
||||
// Finish prints the finishing messages.
|
||||
func (b *TextProgress) Finish(id restic.ID, start time.Time, summary *archiver.Summary, dryRun bool) {
|
||||
func (b *TextProgress) Finish(id restic.ID, summary *archiver.Summary, dryRun bool) {
|
||||
b.P("\n")
|
||||
b.P("Files: %5d new, %5d changed, %5d unmodified\n", summary.Files.New, summary.Files.Changed, summary.Files.Unchanged)
|
||||
b.P("Dirs: %5d new, %5d changed, %5d unmodified\n", summary.Dirs.New, summary.Dirs.Changed, summary.Dirs.Unchanged)
|
||||
@ -147,7 +147,7 @@ func (b *TextProgress) Finish(id restic.ID, start time.Time, summary *archiver.S
|
||||
b.P("processed %v files, %v in %s",
|
||||
summary.Files.New+summary.Files.Changed+summary.Files.Unchanged,
|
||||
ui.FormatBytes(summary.ProcessedBytes),
|
||||
ui.FormatDuration(time.Since(start)),
|
||||
ui.FormatDuration(summary.BackupEnd.Sub(summary.BackupStart)),
|
||||
)
|
||||
|
||||
if !dryRun {
|
||||
|
Loading…
Reference in New Issue
Block a user