From 78dbc5ec5871a9995a25abe7c091fc1a9f104417 Mon Sep 17 00:00:00 2001 From: DRON-666 <64691982+DRON-666@users.noreply.github.com> Date: Fri, 6 Nov 2020 03:41:02 +0300 Subject: [PATCH 01/16] vss: Add initial support for extended options --- cmd/restic/cmd_backup.go | 13 +++++++++++-- internal/fs/fs_local_vss.go | 30 +++++++++++++++++++++++++++++- 2 files changed, 40 insertions(+), 3 deletions(-) diff --git a/cmd/restic/cmd_backup.go b/cmd/restic/cmd_backup.go index d3e5a8546..5329a928c 100644 --- a/cmd/restic/cmd_backup.go +++ b/cmd/restic/cmd_backup.go @@ -445,7 +445,16 @@ func findParentSnapshot(ctx context.Context, repo restic.ListerLoaderUnpacked, o } func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, term *termstatus.Terminal, args []string) error { - err := opts.Check(gopts, args) + var vsscfg fs.VSSConfig + var err error + + if runtime.GOOS == "windows" { + if vsscfg, err = fs.ParseVSSConfig(gopts.extended); err != nil { + return err + } + } + + err = opts.Check(gopts, args) if err != nil { return err } @@ -557,7 +566,7 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter } } - localVss := fs.NewLocalVss(errorHandler, messageHandler) + localVss := fs.NewLocalVss(errorHandler, messageHandler, vsscfg) defer localVss.DeleteSnapshots() targetFS = localVss } diff --git a/internal/fs/fs_local_vss.go b/internal/fs/fs_local_vss.go index aa3522aea..f68e2ff28 100644 --- a/internal/fs/fs_local_vss.go +++ b/internal/fs/fs_local_vss.go @@ -3,12 +3,40 @@ package fs import ( "os" "path/filepath" + "runtime" "strings" "sync" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/options" ) +// VSSConfig holds extended options of windows volume shadow copy service. +type VSSConfig struct { +} + +func init() { + if runtime.GOOS == "windows" { + options.Register("vss", VSSConfig{}) + } +} + +// NewVSSConfig returns a new VSSConfig with the default values filled in. +func NewVSSConfig() VSSConfig { + return VSSConfig{} +} + +// ParseVSSConfig parses a VSS extended options to VSSConfig struct. +func ParseVSSConfig(o options.Options) (VSSConfig, error) { + cfg := NewVSSConfig() + o = o.Extract("vss") + if err := o.Apply("vss", &cfg); err != nil { + return VSSConfig{}, err + } + + return cfg, nil +} + // ErrorHandler is used to report errors via callback type ErrorHandler func(item string, err error) error @@ -31,7 +59,7 @@ var _ FS = &LocalVss{} // NewLocalVss creates a new wrapper around the windows filesystem using volume // shadow copy service to access locked files. -func NewLocalVss(msgError ErrorHandler, msgMessage MessageHandler) *LocalVss { +func NewLocalVss(msgError ErrorHandler, msgMessage MessageHandler, cfg VSSConfig) *LocalVss { return &LocalVss{ FS: Local{}, snapshots: make(map[string]VssSnapshot), From 7470e5356e04424cb891bd6cdea9071a005f30dd Mon Sep 17 00:00:00 2001 From: DRON-666 <64691982+DRON-666@users.noreply.github.com> Date: Fri, 6 Nov 2020 03:41:02 +0300 Subject: [PATCH 02/16] vss: Add "timeout" option Changing multiple "callAsyncFunctionAndWait" with fixed timeout to calculated timeout based on deadline. --- internal/fs/fs_local_vss.go | 10 ++++++++-- internal/fs/vss.go | 4 +++- internal/fs/vss_windows.go | 37 +++++++++++++++++++++++++------------ 3 files changed, 36 insertions(+), 15 deletions(-) diff --git a/internal/fs/fs_local_vss.go b/internal/fs/fs_local_vss.go index f68e2ff28..1f6001782 100644 --- a/internal/fs/fs_local_vss.go +++ b/internal/fs/fs_local_vss.go @@ -6,6 +6,7 @@ import ( "runtime" "strings" "sync" + "time" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/options" @@ -13,6 +14,7 @@ import ( // VSSConfig holds extended options of windows volume shadow copy service. type VSSConfig struct { + Timeout time.Duration `option:"timeout" help:"time that the VSS can spend creating snapshots before timing out"` } func init() { @@ -23,7 +25,9 @@ func init() { // NewVSSConfig returns a new VSSConfig with the default values filled in. func NewVSSConfig() VSSConfig { - return VSSConfig{} + return VSSConfig{ + Timeout: time.Second * 120, + } } // ParseVSSConfig parses a VSS extended options to VSSConfig struct. @@ -52,6 +56,7 @@ type LocalVss struct { mutex sync.RWMutex msgError ErrorHandler msgMessage MessageHandler + timeout time.Duration } // statically ensure that LocalVss implements FS. @@ -66,6 +71,7 @@ func NewLocalVss(msgError ErrorHandler, msgMessage MessageHandler, cfg VSSConfig failedSnapshots: make(map[string]struct{}), msgError: msgError, msgMessage: msgMessage, + timeout: cfg.Timeout, } } @@ -144,7 +150,7 @@ func (fs *LocalVss) snapshotPath(path string) string { vssVolume := volumeNameLower + string(filepath.Separator) fs.msgMessage("creating VSS snapshot for [%s]\n", vssVolume) - if snapshot, err := NewVssSnapshot(vssVolume, 120, fs.msgError); err != nil { + if snapshot, err := NewVssSnapshot(vssVolume, fs.timeout, fs.msgError); err != nil { _ = fs.msgError(vssVolume, errors.Errorf("failed to create snapshot for [%s]: %s", vssVolume, err)) fs.failedSnapshots[volumeNameLower] = struct{}{} diff --git a/internal/fs/vss.go b/internal/fs/vss.go index 5f0ea36d9..92143883d 100644 --- a/internal/fs/vss.go +++ b/internal/fs/vss.go @@ -4,6 +4,8 @@ package fs import ( + "time" + "github.com/restic/restic/internal/errors" ) @@ -34,7 +36,7 @@ func HasSufficientPrivilegesForVSS() error { // NewVssSnapshot creates a new vss snapshot. If creating the snapshots doesn't // finish within the timeout an error is returned. func NewVssSnapshot( - _ string, _ uint, _ ErrorHandler) (VssSnapshot, error) { + _ string, _ time.Duration, _ ErrorHandler) (VssSnapshot, error) { return VssSnapshot{}, errors.New("VSS snapshots are only supported on windows") } diff --git a/internal/fs/vss_windows.go b/internal/fs/vss_windows.go index d75567d25..4e7f10385 100644 --- a/internal/fs/vss_windows.go +++ b/internal/fs/vss_windows.go @@ -9,6 +9,7 @@ import ( "runtime" "strings" "syscall" + "time" "unsafe" ole "github.com/go-ole/go-ole" @@ -617,8 +618,13 @@ func (vssAsync *IVSSAsync) QueryStatus() (HRESULT, uint32) { // WaitUntilAsyncFinished waits until either the async call is finished or // the given timeout is reached. -func (vssAsync *IVSSAsync) WaitUntilAsyncFinished(millis uint32) error { - hresult := vssAsync.Wait(millis) +func (vssAsync *IVSSAsync) WaitUntilAsyncFinished(timeout time.Duration) error { + const maxTimeout = 2147483647 * time.Millisecond + if timeout > maxTimeout { + timeout = maxTimeout + } + + hresult := vssAsync.Wait(uint32(timeout.Milliseconds())) err := newVssErrorIfResultNotOK("Wait() failed", hresult) if err != nil { vssAsync.Cancel() @@ -677,7 +683,7 @@ type VssSnapshot struct { snapshotProperties VssSnapshotProperties snapshotDeviceObject string mountPointInfo map[string]MountPoint - timeoutInMillis uint32 + timeout time.Duration } // GetSnapshotDeviceObject returns root path to access the snapshot files @@ -730,7 +736,7 @@ func HasSufficientPrivilegesForVSS() error { // NewVssSnapshot creates a new vss snapshot. If creating the snapshots doesn't // finish within the timeout an error is returned. func NewVssSnapshot( - volume string, timeoutInSeconds uint, msgError ErrorHandler) (VssSnapshot, error) { + volume string, timeout time.Duration, msgError ErrorHandler) (VssSnapshot, error) { is64Bit, err := isRunningOn64BitWindows() if err != nil { @@ -744,7 +750,7 @@ func NewVssSnapshot( runtime.GOARCH)) } - timeoutInMillis := uint32(timeoutInSeconds * 1000) + deadline := time.Now().Add(timeout) oleIUnknown, err := initializeVssCOMInterface() if oleIUnknown != nil { @@ -796,7 +802,7 @@ func NewVssSnapshot( } err = callAsyncFunctionAndWait(iVssBackupComponents.GatherWriterMetadata, - "GatherWriterMetadata", timeoutInMillis) + "GatherWriterMetadata", deadline) if err != nil { iVssBackupComponents.Release() return VssSnapshot{}, err @@ -854,7 +860,7 @@ func NewVssSnapshot( } err = callAsyncFunctionAndWait(iVssBackupComponents.PrepareForBackup, "PrepareForBackup", - timeoutInMillis) + deadline) if err != nil { // After calling PrepareForBackup one needs to call AbortBackup() before releasing the VSS // instance for proper cleanup. @@ -865,7 +871,7 @@ func NewVssSnapshot( } err = callAsyncFunctionAndWait(iVssBackupComponents.DoSnapshotSet, "DoSnapshotSet", - timeoutInMillis) + deadline) if err != nil { iVssBackupComponents.AbortBackup() iVssBackupComponents.Release() @@ -901,7 +907,7 @@ func NewVssSnapshot( } return VssSnapshot{iVssBackupComponents, snapshotSetID, snapshotProperties, - snapshotProperties.GetSnapshotDeviceObject(), mountPointInfo, timeoutInMillis}, nil + snapshotProperties.GetSnapshotDeviceObject(), mountPointInfo, time.Until(deadline)}, nil } // Delete deletes the created snapshot. @@ -922,8 +928,10 @@ func (p *VssSnapshot) Delete() error { if p.iVssBackupComponents != nil { defer p.iVssBackupComponents.Release() + deadline := time.Now().Add(p.timeout) + err = callAsyncFunctionAndWait(p.iVssBackupComponents.BackupComplete, "BackupComplete", - p.timeoutInMillis) + deadline) if err != nil { return err } @@ -945,7 +953,7 @@ type asyncCallFunc func() (*IVSSAsync, error) // callAsyncFunctionAndWait calls an async functions and waits for it to either // finish or timeout. -func callAsyncFunctionAndWait(function asyncCallFunc, name string, timeoutInMillis uint32) error { +func callAsyncFunctionAndWait(function asyncCallFunc, name string, deadline time.Time) error { iVssAsync, err := function() if err != nil { return err @@ -955,7 +963,12 @@ func callAsyncFunctionAndWait(function asyncCallFunc, name string, timeoutInMill return newVssTextError(fmt.Sprintf("%s() returned nil", name)) } - err = iVssAsync.WaitUntilAsyncFinished(timeoutInMillis) + timeout := time.Until(deadline) + if timeout <= 0 { + return newVssTextError(fmt.Sprintf("%s() deadline exceeded", name)) + } + + err = iVssAsync.WaitUntilAsyncFinished(timeout) iVssAsync.Release() return err } From c4f67c00644ebb344b08e8038868674f7fee0981 Mon Sep 17 00:00:00 2001 From: DRON-666 <64691982+DRON-666@users.noreply.github.com> Date: Fri, 6 Nov 2020 03:41:02 +0300 Subject: [PATCH 03/16] vss: Add volume filtering Add options to exclude all mountpoints and arbitrary volumes from snapshotting. --- internal/fs/fs_local_vss.go | 113 +++++++++++++++++++++++++++--------- internal/fs/vss.go | 8 ++- internal/fs/vss_windows.go | 78 +++++++++++++++++-------- 3 files changed, 148 insertions(+), 51 deletions(-) diff --git a/internal/fs/fs_local_vss.go b/internal/fs/fs_local_vss.go index 1f6001782..0e73092f2 100644 --- a/internal/fs/fs_local_vss.go +++ b/internal/fs/fs_local_vss.go @@ -14,7 +14,9 @@ import ( // VSSConfig holds extended options of windows volume shadow copy service. type VSSConfig struct { - Timeout time.Duration `option:"timeout" help:"time that the VSS can spend creating snapshots before timing out"` + ExcludeAllMountPoints bool `option:"excludeallmountpoints" help:"exclude mountpoints from snapshotting on all volumes"` + ExcludeVolumes string `option:"excludevolumes" help:"semicolon separated list of volumes to exclude from snapshotting (ex. 'c:\\;e:\\mnt;\\\\?\\Volume{...}')"` + Timeout time.Duration `option:"timeout" help:"time that the VSS can spend creating snapshot before timing out"` } func init() { @@ -47,31 +49,59 @@ type ErrorHandler func(item string, err error) error // MessageHandler is used to report errors/messages via callbacks. type MessageHandler func(msg string, args ...interface{}) +// VolumeFilter is used to filter volumes by it's mount point or GUID path. +type VolumeFilter func(volume string) bool + // LocalVss is a wrapper around the local file system which uses windows volume // shadow copy service (VSS) in a transparent way. type LocalVss struct { FS - snapshots map[string]VssSnapshot - failedSnapshots map[string]struct{} - mutex sync.RWMutex - msgError ErrorHandler - msgMessage MessageHandler - timeout time.Duration + snapshots map[string]VssSnapshot + failedSnapshots map[string]struct{} + mutex sync.RWMutex + msgError ErrorHandler + msgMessage MessageHandler + excludeAllMountPoints bool + excludeVolumes map[string]struct{} + timeout time.Duration } // statically ensure that LocalVss implements FS. var _ FS = &LocalVss{} +// parseMountPoints try to convert semicolon separated list of mount points +// to map of lowercased volume GUID pathes. Mountpoints already in volume +// GUID path format will be validated and normalized. +func parseMountPoints(list string, msgError ErrorHandler) (volumes map[string]struct{}) { + if list == "" { + return + } + for _, s := range strings.Split(list, ";") { + if v, err := GetVolumeNameForVolumeMountPoint(s); err != nil { + msgError(s, errors.Errorf("failed to parse vss.excludevolumes [%s]: %s", s, err)) + } else { + if volumes == nil { + volumes = make(map[string]struct{}) + } + volumes[strings.ToLower(v)] = struct{}{} + } + } + + return +} + // NewLocalVss creates a new wrapper around the windows filesystem using volume // shadow copy service to access locked files. func NewLocalVss(msgError ErrorHandler, msgMessage MessageHandler, cfg VSSConfig) *LocalVss { return &LocalVss{ - FS: Local{}, - snapshots: make(map[string]VssSnapshot), - failedSnapshots: make(map[string]struct{}), - msgError: msgError, - msgMessage: msgMessage, - timeout: cfg.Timeout, + FS: Local{}, + snapshots: make(map[string]VssSnapshot), + failedSnapshots: make(map[string]struct{}), + msgError: msgError, + msgMessage: msgMessage, + excludeAllMountPoints: cfg.ExcludeAllMountPoints, + excludeVolumes: parseMountPoints(cfg.ExcludeVolumes, msgError), + timeout: cfg.Timeout, } } @@ -112,6 +142,24 @@ func (fs *LocalVss) Lstat(name string) (os.FileInfo, error) { return os.Lstat(fs.snapshotPath(name)) } +// isMountPointExcluded is true if given mountpoint excluded by user. +func (fs *LocalVss) isMountPointExcluded(mountPoint string) bool { + if fs.excludeVolumes == nil { + return false + } + + volume, err := GetVolumeNameForVolumeMountPoint(mountPoint) + if err != nil { + fs.msgError(mountPoint, errors.Errorf("failed to get volume from mount point [%s]: %s", mountPoint, err)) + + return false + } + + _, ok := fs.excludeVolumes[strings.ToLower(volume)] + + return ok +} + // snapshotPath returns the path inside a VSS snapshots if it already exists. // If the path is not yet available as a snapshot, a snapshot is created. // If creation of a snapshot fails the file's original path is returned as @@ -148,23 +196,36 @@ func (fs *LocalVss) snapshotPath(path string) string { if !snapshotExists && !snapshotFailed { vssVolume := volumeNameLower + string(filepath.Separator) - fs.msgMessage("creating VSS snapshot for [%s]\n", vssVolume) - if snapshot, err := NewVssSnapshot(vssVolume, fs.timeout, fs.msgError); err != nil { - _ = fs.msgError(vssVolume, errors.Errorf("failed to create snapshot for [%s]: %s", - vssVolume, err)) + if fs.isMountPointExcluded(vssVolume) { + fs.msgMessage("snapshots for [%s] excluded by user\n", vssVolume) fs.failedSnapshots[volumeNameLower] = struct{}{} } else { - fs.snapshots[volumeNameLower] = snapshot - fs.msgMessage("successfully created snapshot for [%s]\n", vssVolume) - if len(snapshot.mountPointInfo) > 0 { - fs.msgMessage("mountpoints in snapshot volume [%s]:\n", vssVolume) - for mp, mpInfo := range snapshot.mountPointInfo { - info := "" - if !mpInfo.IsSnapshotted() { - info = " (not snapshotted)" + fs.msgMessage("creating VSS snapshot for [%s]\n", vssVolume) + + var filter VolumeFilter + if !fs.excludeAllMountPoints { + filter = func(volume string) bool { + return !fs.isMountPointExcluded(volume) + } + } + + if snapshot, err := NewVssSnapshot(vssVolume, fs.timeout, filter, fs.msgError); err != nil { + fs.msgError(vssVolume, errors.Errorf("failed to create snapshot for [%s]: %s", + vssVolume, err)) + fs.failedSnapshots[volumeNameLower] = struct{}{} + } else { + fs.snapshots[volumeNameLower] = snapshot + fs.msgMessage("successfully created snapshot for [%s]\n", vssVolume) + if len(snapshot.mountPointInfo) > 0 { + fs.msgMessage("mountpoints in snapshot volume [%s]:\n", vssVolume) + for mp, mpInfo := range snapshot.mountPointInfo { + info := "" + if !mpInfo.IsSnapshotted() { + info = " (not snapshotted)" + } + fs.msgMessage(" - %s%s\n", mp, info) } - fs.msgMessage(" - %s%s\n", mp, info) } } } diff --git a/internal/fs/vss.go b/internal/fs/vss.go index 92143883d..838bdf79b 100644 --- a/internal/fs/vss.go +++ b/internal/fs/vss.go @@ -33,10 +33,16 @@ func HasSufficientPrivilegesForVSS() error { return errors.New("VSS snapshots are only supported on windows") } +// GetVolumeNameForVolumeMountPoint clear input parameter +// and calls the equivalent windows api. +func GetVolumeNameForVolumeMountPoint(mountPoint string) (string, error) { + return mountPoint, nil +} + // NewVssSnapshot creates a new vss snapshot. If creating the snapshots doesn't // finish within the timeout an error is returned. func NewVssSnapshot( - _ string, _ time.Duration, _ ErrorHandler) (VssSnapshot, error) { + _ string, _ time.Duration, _ VolumeFilter, _ ErrorHandler) (VssSnapshot, error) { return VssSnapshot{}, errors.New("VSS snapshots are only supported on windows") } diff --git a/internal/fs/vss_windows.go b/internal/fs/vss_windows.go index 4e7f10385..4ed289366 100644 --- a/internal/fs/vss_windows.go +++ b/internal/fs/vss_windows.go @@ -733,10 +733,33 @@ func HasSufficientPrivilegesForVSS() error { return err } +// GetVolumeNameForVolumeMountPoint clear input parameter +// and calls the equivalent windows api. +func GetVolumeNameForVolumeMountPoint(mountPoint string) (string, error) { + if mountPoint != "" && mountPoint[len(mountPoint)-1] != filepath.Separator { + mountPoint += string(filepath.Separator) + } + + mountPointPointer, err := syscall.UTF16PtrFromString(mountPoint) + if err != nil { + return mountPoint, err + } + + // A reasonable size for the buffer to accommodate the largest possible + // volume GUID path is 50 characters. + volumeNameBuffer := make([]uint16, 50) + if err := windows.GetVolumeNameForVolumeMountPoint( + mountPointPointer, &volumeNameBuffer[0], 50); err != nil { + return mountPoint, err + } + + return syscall.UTF16ToString(volumeNameBuffer), nil +} + // NewVssSnapshot creates a new vss snapshot. If creating the snapshots doesn't // finish within the timeout an error is returned. func NewVssSnapshot( - volume string, timeout time.Duration, msgError ErrorHandler) (VssSnapshot, error) { + volume string, timeout time.Duration, filter VolumeFilter, msgError ErrorHandler) (VssSnapshot, error) { is64Bit, err := isRunningOn64BitWindows() if err != nil { @@ -828,35 +851,42 @@ func NewVssSnapshot( return VssSnapshot{}, err } - mountPoints, err := enumerateMountedFolders(volume) - if err != nil { - iVssBackupComponents.Release() - return VssSnapshot{}, newVssTextError(fmt.Sprintf( - "failed to enumerate mount points for volume %s: %s", volume, err)) - } - mountPointInfo := make(map[string]MountPoint) - for _, mountPoint := range mountPoints { - // ensure every mountpoint is available even without a valid - // snapshot because we need to consider this when backing up files - mountPointInfo[mountPoint] = MountPoint{isSnapshotted: false} - - if isSupported, err := iVssBackupComponents.IsVolumeSupported(mountPoint); err != nil { - continue - } else if !isSupported { - continue - } - - var mountPointSnapshotSetID ole.GUID - err := iVssBackupComponents.AddToSnapshotSet(mountPoint, &mountPointSnapshotSetID) + // if filter==nil just don't process mount points for this volume at all + if filter != nil { + mountPoints, err := enumerateMountedFolders(volume) if err != nil { iVssBackupComponents.Release() - return VssSnapshot{}, err + + return VssSnapshot{}, newVssTextError(fmt.Sprintf( + "failed to enumerate mount points for volume %s: %s", volume, err)) } - mountPointInfo[mountPoint] = MountPoint{isSnapshotted: true, - snapshotSetID: mountPointSnapshotSetID} + for _, mountPoint := range mountPoints { + // ensure every mountpoint is available even without a valid + // snapshot because we need to consider this when backing up files + mountPointInfo[mountPoint] = MountPoint{isSnapshotted: false} + + if !filter(mountPoint) { + continue + } else if isSupported, err := iVssBackupComponents.IsVolumeSupported(mountPoint); err != nil { + continue + } else if !isSupported { + continue + } + + var mountPointSnapshotSetID ole.GUID + err := iVssBackupComponents.AddToSnapshotSet(mountPoint, &mountPointSnapshotSetID) + if err != nil { + iVssBackupComponents.Release() + + return VssSnapshot{}, err + } + + mountPointInfo[mountPoint] = MountPoint{isSnapshotted: true, + snapshotSetID: mountPointSnapshotSetID} + } } err = callAsyncFunctionAndWait(iVssBackupComponents.PrepareForBackup, "PrepareForBackup", From 9182e6bab55c87703d11987c19b9fdb463aa6a74 Mon Sep 17 00:00:00 2001 From: DRON-666 <64691982+DRON-666@users.noreply.github.com> Date: Fri, 6 Nov 2020 06:18:30 +0300 Subject: [PATCH 04/16] vss: Update docs and changelog --- changelog/unreleased/pull-3067 | 18 ++++++++++++++++++ doc/040_backup.rst | 18 ++++++++++++++++++ 2 files changed, 36 insertions(+) create mode 100644 changelog/unreleased/pull-3067 diff --git a/changelog/unreleased/pull-3067 b/changelog/unreleased/pull-3067 new file mode 100644 index 000000000..a56c045fa --- /dev/null +++ b/changelog/unreleased/pull-3067 @@ -0,0 +1,18 @@ +Enhancement: Add options to configure Windows Shadow Copy Service + +Restic always used 120 sec. timeout and unconditionally created VSS snapshots +for all volume mount points on disk. Now this behavior can be fine-tuned by +new options, like exclude user specific volumes and mount points or completely +disable auto snapshotting of volume mount points. + +For example: + + restic backup --use-fs-snapshot -o vss.timeout=5m -o vss.excludeallmountpoints=true + +changes timeout to five minutes and disable snapshotting of mount points on all volumes, and + + restic backup --use-fs-snapshot -o vss.excludevolumes="d:\;c:\mnt\;\\?\Volume{e2e0315d-9066-4f97-8343-eb5659b35762}" + +excludes drive `D:`, mount point `C:\MNT` and specific volume from VSS snapshotting. + +https://github.com/restic/restic/pull/3067 diff --git a/doc/040_backup.rst b/doc/040_backup.rst index d0bd4b2e2..d1bb39f96 100644 --- a/doc/040_backup.rst +++ b/doc/040_backup.rst @@ -56,6 +56,24 @@ snapshot for each volume that contains files to backup. Files are read from the VSS snapshot instead of the regular filesystem. This allows to backup files that are exclusively locked by another process during the backup. +You can use three additional options to change VSS behaviour: + + * ``-o vss.timeout`` specifies timeout for VSS snapshot creation, the default value is 120 seconds + * ``-o vss.excludeallmountpoints`` disable auto snapshotting of all volume mount points + * ``-o vss.excludevolumes`` allows excluding specific volumes or volume mount points from snapshotting + +E.g., 2.5 minutes timeout with mount points snapshotting disabled can be specified as + +.. code-block:: console + + -o vss.timeout=2m30s -o vss.excludeallmountpoints=true + +and excluding drive ``D:\``, mount point ``C:\mnt`` and volume ``\\?\Volume{04ce0545-3391-11e0-ba2f-806e6f6e6963}\`` as + +.. code-block:: console + + -o vss.excludevolumes="d:;c:\MNT\;\\?\volume{04ce0545-3391-11e0-ba2f-806e6f6e6963}" + By default VSS ignores Outlook OST files. This is not a restriction of restic but the default Windows VSS configuration. The files not to snapshot are configured in the Windows registry under the following key: From 9d3d915e2c9a349630ac9d9272962b3ef801db39 Mon Sep 17 00:00:00 2001 From: DRON-666 <64691982+DRON-666@users.noreply.github.com> Date: Fri, 6 Nov 2020 22:36:48 +0300 Subject: [PATCH 05/16] vss: Add some tests --- internal/fs/fs_local_vss_test.go | 211 +++++++++++++++++++++++++++++++ 1 file changed, 211 insertions(+) create mode 100644 internal/fs/fs_local_vss_test.go diff --git a/internal/fs/fs_local_vss_test.go b/internal/fs/fs_local_vss_test.go new file mode 100644 index 000000000..ce07fee3c --- /dev/null +++ b/internal/fs/fs_local_vss_test.go @@ -0,0 +1,211 @@ +// +build windows + +package fs + +import ( + "fmt" + "regexp" + "strings" + "testing" + "time" + + "github.com/restic/restic/internal/options" +) + +func matchStrings(ptrs []string, strs []string) bool { + if len(ptrs) != len(strs) { + return false + } + + for i, p := range ptrs { + matched, err := regexp.MatchString(p, strs[i]) + if err != nil { + panic(err) + } + if !matched { + return false + } + } + + return true +} + +func matchMap(strs []string, m map[string]struct{}) bool { + if len(strs) != len(m) { + return false + } + + for _, s := range strs { + if _, ok := m[s]; !ok { + return false + } + } + + return true +} + +func TestVSSConfig(t *testing.T) { + type config struct { + excludeAllMountPoints bool + timeout time.Duration + } + setTests := []struct { + input options.Options + output config + }{ + { + options.Options{ + "vss.timeout": "6h38m42s", + }, + config{ + timeout: 23922000000000, + }, + }, + { + options.Options{ + "vss.excludeallmountpoints": "t", + }, + config{ + excludeAllMountPoints: true, + timeout: 120000000000, + }, + }, + { + options.Options{ + "vss.excludeallmountpoints": "0", + "vss.excludevolumes": "", + "vss.timeout": "120s", + }, + config{ + timeout: 120000000000, + }, + }, + } + for i, test := range setTests { + t.Run(fmt.Sprintf("test-%d", i), func(t *testing.T) { + cfg, err := ParseVSSConfig(test.input) + if err != nil { + t.Fatal(err) + } + + errorHandler := func(item string, err error) error { + t.Fatalf("unexpected error (%v)", err) + + return nil + } + messageHandler := func(msg string, args ...interface{}) { + t.Fatalf("unexpected message (%s)", fmt.Sprintf(msg, args)) + } + + dst := NewLocalVss(errorHandler, messageHandler, cfg) + + if dst.excludeAllMountPoints != test.output.excludeAllMountPoints || + dst.excludeVolumes != nil || dst.timeout != test.output.timeout { + t.Fatalf("wrong result, want:\n %#v\ngot:\n %#v", test.output, dst) + } + }) + } +} + +func TestParseMountPoints(t *testing.T) { + volumeMatch := regexp.MustCompile(`^\\\\\?\\Volume\{[0-9a-f]{8}(?:-[0-9a-f]{4}){3}-[0-9a-f]{12}\}\\$`) + + // It's not a good idea to test functions based on GetVolumeNameForVolumeMountPoint by calling + // GetVolumeNameForVolumeMountPoint itself, but we have restricted test environment: + // cannot manage volumes and can only be sure that the mount point C:\ exists + sysVolume, err := GetVolumeNameForVolumeMountPoint("C:") + if err != nil { + t.Fatal(err) + } + // We don't know a valid volume GUID path for C:\, but we'll at least check its format + if !volumeMatch.MatchString(sysVolume) { + t.Fatalf("invalid volume GUID path: %s", sysVolume) + } + sysVolumeMutated := strings.ToUpper(sysVolume[:len(sysVolume)-1]) + sysVolumeMatch := strings.ToLower(sysVolume) + + type check struct { + volume string + result bool + } + setTests := []struct { + input options.Options + output []string + checks []check + errors []string + }{ + { + options.Options{ + "vss.excludevolumes": `c:;c:\;` + sysVolume + `;` + sysVolumeMutated, + }, + []string{ + sysVolumeMatch, + }, + []check{ + {`c:\`, true}, + {`c:`, true}, + {sysVolume, true}, + {sysVolumeMutated, true}, + }, + []string{}, + }, + { + options.Options{ + "vss.excludevolumes": `z:\nonexistent;c:;c:\windows\;\\?\Volume{39b9cac2-bcdb-4d51-97c8-0d0677d607fb}\`, + }, + []string{ + sysVolumeMatch, + }, + []check{ + {`c:\windows\`, false}, + {`\\?\Volume{39b9cac2-bcdb-4d51-97c8-0d0677d607fb}\`, false}, + {`c:`, true}, + {``, false}, + }, + []string{ + `failed to parse vss\.excludevolumes \[z:\\nonexistent\]:.*`, + `failed to parse vss\.excludevolumes \[c:\\windows\\\]:.*`, + `failed to parse vss\.excludevolumes \[\\\\\?\\Volume\{39b9cac2-bcdb-4d51-97c8-0d0677d607fb\}\\\]:.*`, + `failed to get volume from mount point \[c:\\windows\\\]:.*`, + `failed to get volume from mount point \[\\\\\?\\Volume\{39b9cac2-bcdb-4d51-97c8-0d0677d607fb\}\\\]:.*`, + `failed to get volume from mount point \[\]:.*`, + }, + }, + } + + for i, test := range setTests { + t.Run(fmt.Sprintf("test-%d", i), func(t *testing.T) { + cfg, err := ParseVSSConfig(test.input) + if err != nil { + t.Fatal(err) + } + + var log []string + errorHandler := func(item string, err error) error { + log = append(log, strings.TrimSpace(err.Error())) + + return nil + } + messageHandler := func(msg string, args ...interface{}) { + t.Fatalf("unexpected message (%s)", fmt.Sprintf(msg, args)) + } + + dst := NewLocalVss(errorHandler, messageHandler, cfg) + + if !matchMap(test.output, dst.excludeVolumes) { + t.Fatalf("wrong result, want:\n %#v\ngot:\n %#v", + test.output, dst.excludeVolumes) + } + + for _, c := range test.checks { + if dst.isMountPointExcluded(c.volume) != c.result { + t.Fatalf(`wrong check: isMountPointExcluded("%s") != %v`, c.volume, c.result) + } + } + + if !matchStrings(test.errors, log) { + t.Fatalf("wrong log, want:\n %#v\ngot:\n %#v", test.errors, log) + } + }) + } +} From 88c509e3e9c301d72e0aabf9abd3c7c13344b090 Mon Sep 17 00:00:00 2001 From: DRON-666 <64691982+DRON-666@users.noreply.github.com> Date: Tue, 10 Nov 2020 06:48:05 +0300 Subject: [PATCH 06/16] vss: Change `ErrorHandler` signature We don't need `error` here: the only existing implementation of `ErrorHandler` always call `Backup.Error` and all implementations of `Backup.Error` always return nil. --- cmd/restic/cmd_backup.go | 4 ++-- internal/fs/fs_local_vss.go | 6 +++--- internal/fs/fs_local_vss_test.go | 8 ++------ 3 files changed, 7 insertions(+), 11 deletions(-) diff --git a/cmd/restic/cmd_backup.go b/cmd/restic/cmd_backup.go index 5329a928c..19b96e9b0 100644 --- a/cmd/restic/cmd_backup.go +++ b/cmd/restic/cmd_backup.go @@ -556,8 +556,8 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter return err } - errorHandler := func(item string, err error) error { - return progressReporter.Error(item, err) + errorHandler := func(item string, err error) { + _ = progressReporter.Error(item, err) } messageHandler := func(msg string, args ...interface{}) { diff --git a/internal/fs/fs_local_vss.go b/internal/fs/fs_local_vss.go index 0e73092f2..230e14a1f 100644 --- a/internal/fs/fs_local_vss.go +++ b/internal/fs/fs_local_vss.go @@ -43,8 +43,8 @@ func ParseVSSConfig(o options.Options) (VSSConfig, error) { return cfg, nil } -// ErrorHandler is used to report errors via callback -type ErrorHandler func(item string, err error) error +// ErrorHandler is used to report errors via callback. +type ErrorHandler func(item string, err error) // MessageHandler is used to report errors/messages via callbacks. type MessageHandler func(msg string, args ...interface{}) @@ -114,7 +114,7 @@ func (fs *LocalVss) DeleteSnapshots() { for volumeName, snapshot := range fs.snapshots { if err := snapshot.Delete(); err != nil { - _ = fs.msgError(volumeName, errors.Errorf("failed to delete VSS snapshot: %s", err)) + fs.msgError(volumeName, errors.Errorf("failed to delete VSS snapshot: %s", err)) activeSnapshots[volumeName] = snapshot } } diff --git a/internal/fs/fs_local_vss_test.go b/internal/fs/fs_local_vss_test.go index ce07fee3c..6beb35b98 100644 --- a/internal/fs/fs_local_vss_test.go +++ b/internal/fs/fs_local_vss_test.go @@ -88,10 +88,8 @@ func TestVSSConfig(t *testing.T) { t.Fatal(err) } - errorHandler := func(item string, err error) error { + errorHandler := func(item string, err error) { t.Fatalf("unexpected error (%v)", err) - - return nil } messageHandler := func(msg string, args ...interface{}) { t.Fatalf("unexpected message (%s)", fmt.Sprintf(msg, args)) @@ -181,10 +179,8 @@ func TestParseMountPoints(t *testing.T) { } var log []string - errorHandler := func(item string, err error) error { + errorHandler := func(item string, err error) { log = append(log, strings.TrimSpace(err.Error())) - - return nil } messageHandler := func(msg string, args ...interface{}) { t.Fatalf("unexpected message (%s)", fmt.Sprintf(msg, args)) From 3bac1f0135f6c238e1ac90bfd14cb50eb83521c3 Mon Sep 17 00:00:00 2001 From: DRON-666 <64691982+DRON-666@users.noreply.github.com> Date: Sun, 31 Jan 2021 00:34:41 +0300 Subject: [PATCH 07/16] vss: Fix issues reported by linters --- internal/fs/fs_local_vss.go | 4 +--- internal/fs/vss_windows.go | 43 +++++++++++++++++++++++-------------- 2 files changed, 28 insertions(+), 19 deletions(-) diff --git a/internal/fs/fs_local_vss.go b/internal/fs/fs_local_vss.go index 230e14a1f..5f55dcfd1 100644 --- a/internal/fs/fs_local_vss.go +++ b/internal/fs/fs_local_vss.go @@ -165,7 +165,6 @@ func (fs *LocalVss) isMountPointExcluded(mountPoint string) bool { // If creation of a snapshot fails the file's original path is returned as // a fallback. func (fs *LocalVss) snapshotPath(path string) string { - fixPath := fixpath(path) if strings.HasPrefix(fixPath, `\\?\UNC\`) { @@ -268,9 +267,8 @@ func (fs *LocalVss) snapshotPath(path string) string { snapshotPath = fs.Join(snapshot.GetSnapshotDeviceObject(), strings.TrimPrefix(fixPath, volumeName)) if snapshotPath == snapshot.GetSnapshotDeviceObject() { - snapshotPath = snapshotPath + string(filepath.Separator) + snapshotPath += string(filepath.Separator) } - } else { // no snapshot is available for the requested path: // -> try to backup without a snapshot diff --git a/internal/fs/vss_windows.go b/internal/fs/vss_windows.go index 4ed289366..424548a74 100644 --- a/internal/fs/vss_windows.go +++ b/internal/fs/vss_windows.go @@ -21,6 +21,7 @@ import ( type HRESULT uint // HRESULT constant values necessary for using VSS api. +//nolint:golint const ( S_OK HRESULT = 0x00000000 E_ACCESSDENIED HRESULT = 0x80070005 @@ -256,6 +257,7 @@ type IVssBackupComponents struct { } // IVssBackupComponentsVTable is the vtable for IVssBackupComponents. +// nolint:structcheck type IVssBackupComponentsVTable struct { ole.IUnknownVtbl getWriterComponentsCount uintptr @@ -415,7 +417,7 @@ func (vss *IVssBackupComponents) AddToSnapshotSet(volumeName string, idSnapshot panic(err) } - var result uintptr = 0 + var result uintptr if runtime.GOARCH == "386" { id := (*[4]uintptr)(unsafe.Pointer(ole.IID_NULL)) @@ -479,9 +481,9 @@ func (vss *IVssBackupComponents) DoSnapshotSet() (*IVSSAsync, error) { // DeleteSnapshots calls the equivalent VSS api. func (vss *IVssBackupComponents) DeleteSnapshots(snapshotID ole.GUID) (int32, ole.GUID, error) { - var deletedSnapshots int32 = 0 + var deletedSnapshots int32 var nondeletedSnapshotID ole.GUID - var result uintptr = 0 + var result uintptr if runtime.GOARCH == "386" { id := (*[4]uintptr)(unsafe.Pointer(&snapshotID)) @@ -505,7 +507,7 @@ func (vss *IVssBackupComponents) DeleteSnapshots(snapshotID ole.GUID) (int32, ol // GetSnapshotProperties calls the equivalent VSS api. func (vss *IVssBackupComponents) GetSnapshotProperties(snapshotID ole.GUID, properties *VssSnapshotProperties) error { - var result uintptr = 0 + var result uintptr if runtime.GOARCH == "386" { id := (*[4]uintptr)(unsafe.Pointer(&snapshotID)) @@ -528,8 +530,8 @@ func vssFreeSnapshotProperties(properties *VssSnapshotProperties) error { if err != nil { return err } - - proc.Call(uintptr(unsafe.Pointer(properties))) + // this function always succeeds and returns no value + _, _, _ = proc.Call(uintptr(unsafe.Pointer(properties))) return nil } @@ -544,6 +546,7 @@ func (vss *IVssBackupComponents) BackupComplete() (*IVSSAsync, error) { } // VssSnapshotProperties defines the properties of a VSS snapshot as part of the VSS api. +// nolint:structcheck type VssSnapshotProperties struct { snapshotID ole.GUID snapshotSetID ole.GUID @@ -700,7 +703,12 @@ func initializeVssCOMInterface() (*ole.IUnknown, error) { } // ensure COM is initialized before use - ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED) + if err = ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED); err != nil { + // CoInitializeEx returns 1 if COM is already initialized + if oleErr, ok := err.(*ole.OleError); !ok || oleErr.Code() != 1 { + return nil, err + } + } var oleIUnknown *ole.IUnknown result, _, _ := vssInstance.Call(uintptr(unsafe.Pointer(&oleIUnknown))) @@ -761,7 +769,6 @@ func GetVolumeNameForVolumeMountPoint(mountPoint string) (string, error) { func NewVssSnapshot( volume string, timeout time.Duration, filter VolumeFilter, msgError ErrorHandler) (VssSnapshot, error) { is64Bit, err := isRunningOn64BitWindows() - if err != nil { return VssSnapshot{}, newVssTextError(fmt.Sprintf( "Failed to detect windows architecture: %s", err.Error())) @@ -884,8 +891,10 @@ func NewVssSnapshot( return VssSnapshot{}, err } - mountPointInfo[mountPoint] = MountPoint{isSnapshotted: true, - snapshotSetID: mountPointSnapshotSetID} + mountPointInfo[mountPoint] = MountPoint{ + isSnapshotted: true, + snapshotSetID: mountPointSnapshotSetID, + } } } @@ -903,7 +912,7 @@ func NewVssSnapshot( err = callAsyncFunctionAndWait(iVssBackupComponents.DoSnapshotSet, "DoSnapshotSet", deadline) if err != nil { - iVssBackupComponents.AbortBackup() + _ = iVssBackupComponents.AbortBackup() iVssBackupComponents.Release() return VssSnapshot{}, err } @@ -911,13 +920,12 @@ func NewVssSnapshot( var snapshotProperties VssSnapshotProperties err = iVssBackupComponents.GetSnapshotProperties(snapshotSetID, &snapshotProperties) if err != nil { - iVssBackupComponents.AbortBackup() + _ = iVssBackupComponents.AbortBackup() iVssBackupComponents.Release() return VssSnapshot{}, err } for mountPoint, info := range mountPointInfo { - if !info.isSnapshotted { continue } @@ -936,8 +944,10 @@ func NewVssSnapshot( mountPointInfo[mountPoint] = info } - return VssSnapshot{iVssBackupComponents, snapshotSetID, snapshotProperties, - snapshotProperties.GetSnapshotDeviceObject(), mountPointInfo, time.Until(deadline)}, nil + return VssSnapshot{ + iVssBackupComponents, snapshotSetID, snapshotProperties, + snapshotProperties.GetSnapshotDeviceObject(), mountPointInfo, time.Until(deadline), + }, nil } // Delete deletes the created snapshot. @@ -968,7 +978,7 @@ func (p *VssSnapshot) Delete() error { if _, _, e := p.iVssBackupComponents.DeleteSnapshots(p.snapshotID); e != nil { err = newVssTextError(fmt.Sprintf("Failed to delete snapshot: %s", e.Error())) - p.iVssBackupComponents.AbortBackup() + _ = p.iVssBackupComponents.AbortBackup() if err != nil { return err } @@ -1079,6 +1089,7 @@ func enumerateMountedFolders(volume string) ([]string, error) { return mountedFolders, nil } + // nolint:errcheck defer windows.FindVolumeMountPointClose(handle) volumeMountPoint := syscall.UTF16ToString(volumeMountPointBuffer) From bb0f93ef3d3ac6dd8b86928571adf2583dd443b7 Mon Sep 17 00:00:00 2001 From: DRON-666 <64691982+DRON-666@users.noreply.github.com> Date: Mon, 22 Mar 2021 23:31:19 +0300 Subject: [PATCH 08/16] vss: Add "provider" option --- internal/fs/fs_local_vss.go | 5 +- internal/fs/vss.go | 2 +- internal/fs/vss_windows.go | 168 +++++++++++++++++++++++++++++++++--- 3 files changed, 160 insertions(+), 15 deletions(-) diff --git a/internal/fs/fs_local_vss.go b/internal/fs/fs_local_vss.go index 5f55dcfd1..de30bcedb 100644 --- a/internal/fs/fs_local_vss.go +++ b/internal/fs/fs_local_vss.go @@ -17,6 +17,7 @@ type VSSConfig struct { ExcludeAllMountPoints bool `option:"excludeallmountpoints" help:"exclude mountpoints from snapshotting on all volumes"` ExcludeVolumes string `option:"excludevolumes" help:"semicolon separated list of volumes to exclude from snapshotting (ex. 'c:\\;e:\\mnt;\\\\?\\Volume{...}')"` Timeout time.Duration `option:"timeout" help:"time that the VSS can spend creating snapshot before timing out"` + Provider string `option:"provider" help:"VSS provider identifier which will be used for snapshotting"` } func init() { @@ -64,6 +65,7 @@ type LocalVss struct { excludeAllMountPoints bool excludeVolumes map[string]struct{} timeout time.Duration + provider string } // statically ensure that LocalVss implements FS. @@ -102,6 +104,7 @@ func NewLocalVss(msgError ErrorHandler, msgMessage MessageHandler, cfg VSSConfig excludeAllMountPoints: cfg.ExcludeAllMountPoints, excludeVolumes: parseMountPoints(cfg.ExcludeVolumes, msgError), timeout: cfg.Timeout, + provider: cfg.Provider, } } @@ -209,7 +212,7 @@ func (fs *LocalVss) snapshotPath(path string) string { } } - if snapshot, err := NewVssSnapshot(vssVolume, fs.timeout, filter, fs.msgError); err != nil { + if snapshot, err := NewVssSnapshot(fs.provider, vssVolume, fs.timeout, filter, fs.msgError); err != nil { fs.msgError(vssVolume, errors.Errorf("failed to create snapshot for [%s]: %s", vssVolume, err)) fs.failedSnapshots[volumeNameLower] = struct{}{} diff --git a/internal/fs/vss.go b/internal/fs/vss.go index 838bdf79b..a54475480 100644 --- a/internal/fs/vss.go +++ b/internal/fs/vss.go @@ -41,7 +41,7 @@ func GetVolumeNameForVolumeMountPoint(mountPoint string) (string, error) { // NewVssSnapshot creates a new vss snapshot. If creating the snapshots doesn't // finish within the timeout an error is returned. -func NewVssSnapshot( +func NewVssSnapshot(_ string, _ string, _ time.Duration, _ VolumeFilter, _ ErrorHandler) (VssSnapshot, error) { return VssSnapshot{}, errors.New("VSS snapshots are only supported on windows") } diff --git a/internal/fs/vss_windows.go b/internal/fs/vss_windows.go index 424548a74..18aea419d 100644 --- a/internal/fs/vss_windows.go +++ b/internal/fs/vss_windows.go @@ -367,7 +367,7 @@ func (vss *IVssBackupComponents) convertToVSSAsync( } // IsVolumeSupported calls the equivalent VSS api. -func (vss *IVssBackupComponents) IsVolumeSupported(volumeName string) (bool, error) { +func (vss *IVssBackupComponents) IsVolumeSupported(providerID *ole.GUID, volumeName string) (bool, error) { volumeNamePointer, err := syscall.UTF16PtrFromString(volumeName) if err != nil { panic(err) @@ -377,7 +377,7 @@ func (vss *IVssBackupComponents) IsVolumeSupported(volumeName string) (bool, err var result uintptr if runtime.GOARCH == "386" { - id := (*[4]uintptr)(unsafe.Pointer(ole.IID_NULL)) + id := (*[4]uintptr)(unsafe.Pointer(providerID)) result, _, _ = syscall.Syscall9(vss.getVTable().isVolumeSupported, 7, uintptr(unsafe.Pointer(vss)), id[0], id[1], id[2], id[3], @@ -385,7 +385,7 @@ func (vss *IVssBackupComponents) IsVolumeSupported(volumeName string) (bool, err 0) } else { result, _, _ = syscall.Syscall6(vss.getVTable().isVolumeSupported, 4, - uintptr(unsafe.Pointer(vss)), uintptr(unsafe.Pointer(ole.IID_NULL)), + uintptr(unsafe.Pointer(vss)), uintptr(unsafe.Pointer(providerID)), uintptr(unsafe.Pointer(volumeNamePointer)), uintptr(unsafe.Pointer(&isSupportedRaw)), 0, 0) } @@ -411,7 +411,7 @@ func (vss *IVssBackupComponents) StartSnapshotSet() (ole.GUID, error) { } // AddToSnapshotSet calls the equivalent VSS api. -func (vss *IVssBackupComponents) AddToSnapshotSet(volumeName string, idSnapshot *ole.GUID) error { +func (vss *IVssBackupComponents) AddToSnapshotSet(volumeName string, providerID *ole.GUID, idSnapshot *ole.GUID) error { volumeNamePointer, err := syscall.UTF16PtrFromString(volumeName) if err != nil { panic(err) @@ -420,15 +420,15 @@ func (vss *IVssBackupComponents) AddToSnapshotSet(volumeName string, idSnapshot var result uintptr if runtime.GOARCH == "386" { - id := (*[4]uintptr)(unsafe.Pointer(ole.IID_NULL)) + id := (*[4]uintptr)(unsafe.Pointer(providerID)) result, _, _ = syscall.Syscall9(vss.getVTable().addToSnapshotSet, 7, - uintptr(unsafe.Pointer(vss)), uintptr(unsafe.Pointer(volumeNamePointer)), id[0], id[1], - id[2], id[3], uintptr(unsafe.Pointer(idSnapshot)), 0, 0) + uintptr(unsafe.Pointer(vss)), uintptr(unsafe.Pointer(volumeNamePointer)), + id[0], id[1], id[2], id[3], uintptr(unsafe.Pointer(idSnapshot)), 0, 0) } else { result, _, _ = syscall.Syscall6(vss.getVTable().addToSnapshotSet, 4, uintptr(unsafe.Pointer(vss)), uintptr(unsafe.Pointer(volumeNamePointer)), - uintptr(unsafe.Pointer(ole.IID_NULL)), uintptr(unsafe.Pointer(idSnapshot)), 0, 0) + uintptr(unsafe.Pointer(providerID)), uintptr(unsafe.Pointer(idSnapshot)), 0, 0) } return newVssErrorIfResultNotOK("AddToSnapshotSet() failed", HRESULT(result)) @@ -535,6 +535,13 @@ func vssFreeSnapshotProperties(properties *VssSnapshotProperties) error { return nil } +func vssFreeProviderProperties(p *VssProviderProperties) { + ole.CoTaskMemFree(uintptr(unsafe.Pointer(p.providerName))) + p.providerName = nil + ole.CoTaskMemFree(uintptr(unsafe.Pointer(p.providerVersion))) + p.providerName = nil +} + // BackupComplete calls the equivalent VSS api. func (vss *IVssBackupComponents) BackupComplete() (*IVSSAsync, error) { var oleIUnknown *ole.IUnknown @@ -563,6 +570,17 @@ type VssSnapshotProperties struct { status uint } +// VssProviderProperties defines the properties of a VSS provider as part of the VSS api. +// nolint:structcheck +type VssProviderProperties struct { + providerID ole.GUID + providerName *uint16 + providerType uint32 + providerVersion *uint16 + providerVersionID ole.GUID + classID ole.GUID +} + // GetSnapshotDeviceObject returns root path to access the snapshot files // and folders. func (p *VssSnapshotProperties) GetSnapshotDeviceObject() string { @@ -660,6 +678,75 @@ func (vssAsync *IVSSAsync) WaitUntilAsyncFinished(timeout time.Duration) error { return nil } +// UIID_IVSS_ADMIN defines the GUID of IVSSAdmin. +var ( + UIID_IVSS_ADMIN = ole.NewGUID("{77ED5996-2F63-11d3-8A39-00C04F72D8E3}") + CLSID_VSS_COORDINATOR = ole.NewGUID("{E579AB5F-1CC4-44b4-BED9-DE0991FF0623}") +) + +// IVSSAdmin VSS api interface. +type IVSSAdmin struct { + ole.IUnknown +} + +// IVSSAdminVTable is the vtable for IVSSAdmin. +// nolint:structcheck +type IVSSAdminVTable struct { + ole.IUnknownVtbl + registerProvider uintptr + unregisterProvider uintptr + queryProviders uintptr + abortAllSnapshotsInProgress uintptr +} + +// getVTable returns the vtable for IVSSAdmin. +func (vssAdmin *IVSSAdmin) getVTable() *IVSSAdminVTable { + return (*IVSSAdminVTable)(unsafe.Pointer(vssAdmin.RawVTable)) +} + +// QueryProviders calls the equivalent VSS api. +func (vssAdmin *IVSSAdmin) QueryProviders() (*IVssEnumObject, error) { + var enum *IVssEnumObject + + result, _, _ := syscall.Syscall(vssAdmin.getVTable().queryProviders, 2, + uintptr(unsafe.Pointer(vssAdmin)), uintptr(unsafe.Pointer(&enum)), 0) + + return enum, newVssErrorIfResultNotOK("QueryProviders() failed", HRESULT(result)) +} + +// IVssEnumObject VSS api interface. +type IVssEnumObject struct { + ole.IUnknown +} + +// IVssEnumObjectVTable is the vtable for IVssEnumObject. +// nolint:structcheck +type IVssEnumObjectVTable struct { + ole.IUnknownVtbl + next uintptr + skip uintptr + reset uintptr + clone uintptr +} + +// getVTable returns the vtable for IVssEnumObject. +func (vssEnum *IVssEnumObject) getVTable() *IVssEnumObjectVTable { + return (*IVssEnumObjectVTable)(unsafe.Pointer(vssEnum.RawVTable)) +} + +// Next calls the equivalent VSS api. +func (vssEnum *IVssEnumObject) Next(count uint, props unsafe.Pointer) (uint, error) { + var fetched uint32 + result, _, _ := syscall.Syscall6(vssEnum.getVTable().next, 4, + uintptr(unsafe.Pointer(vssEnum)), uintptr(count), uintptr(props), + uintptr(unsafe.Pointer(&fetched)), 0, 0) + if result == 1 { + return uint(fetched), nil + } + + return uint(fetched), newVssErrorIfResultNotOK("Next() failed", HRESULT(result)) +} + // MountPoint wraps all information of a snapshot of a mountpoint on a volume. type MountPoint struct { isSnapshotted bool @@ -766,7 +853,7 @@ func GetVolumeNameForVolumeMountPoint(mountPoint string) (string, error) { // NewVssSnapshot creates a new vss snapshot. If creating the snapshots doesn't // finish within the timeout an error is returned. -func NewVssSnapshot( +func NewVssSnapshot(provider string, volume string, timeout time.Duration, filter VolumeFilter, msgError ErrorHandler) (VssSnapshot, error) { is64Bit, err := isRunningOn64BitWindows() if err != nil { @@ -814,6 +901,12 @@ func NewVssSnapshot( iVssBackupComponents := (*IVssBackupComponents)(unsafe.Pointer(comInterface)) + providerID, err := getProviderID(provider) + if err != nil { + iVssBackupComponents.Release() + return VssSnapshot{}, err + } + if err := iVssBackupComponents.InitializeForBackup(); err != nil { iVssBackupComponents.Release() return VssSnapshot{}, err @@ -838,7 +931,7 @@ func NewVssSnapshot( return VssSnapshot{}, err } - if isSupported, err := iVssBackupComponents.IsVolumeSupported(volume); err != nil { + if isSupported, err := iVssBackupComponents.IsVolumeSupported(providerID, volume); err != nil { iVssBackupComponents.Release() return VssSnapshot{}, err } else if !isSupported { @@ -853,7 +946,7 @@ func NewVssSnapshot( return VssSnapshot{}, err } - if err := iVssBackupComponents.AddToSnapshotSet(volume, &snapshotSetID); err != nil { + if err := iVssBackupComponents.AddToSnapshotSet(volume, providerID, &snapshotSetID); err != nil { iVssBackupComponents.Release() return VssSnapshot{}, err } @@ -877,14 +970,14 @@ func NewVssSnapshot( if !filter(mountPoint) { continue - } else if isSupported, err := iVssBackupComponents.IsVolumeSupported(mountPoint); err != nil { + } else if isSupported, err := iVssBackupComponents.IsVolumeSupported(providerID, mountPoint); err != nil { continue } else if !isSupported { continue } var mountPointSnapshotSetID ole.GUID - err := iVssBackupComponents.AddToSnapshotSet(mountPoint, &mountPointSnapshotSetID) + err := iVssBackupComponents.AddToSnapshotSet(mountPoint, providerID, &mountPointSnapshotSetID) if err != nil { iVssBackupComponents.Release() @@ -988,6 +1081,55 @@ func (p *VssSnapshot) Delete() error { return nil } +func getProviderID(provider string) (*ole.GUID, error) { + comInterface, err := ole.CreateInstance(CLSID_VSS_COORDINATOR, UIID_IVSS_ADMIN) + if err != nil { + return nil, err + } + defer comInterface.Release() + + vssAdmin := (*IVSSAdmin)(unsafe.Pointer(comInterface)) + + providerLower := strings.ToLower(provider) + switch providerLower { + case "": + return ole.IID_NULL, nil + case "ms": + return ole.NewGUID("{b5946137-7b9f-4925-af80-51abd60b20d5}"), nil + } + + enum, err := vssAdmin.QueryProviders() + if err != nil { + return nil, err + } + defer enum.Release() + + id := ole.NewGUID(provider) + + var props struct { + objectType uint32 + provider VssProviderProperties + } + for { + count, err := enum.Next(1, unsafe.Pointer(&props)) + if err != nil { + return nil, err + } + + if count < 1 { + return nil, errors.Errorf(`invalid VSS provider "%s"`, provider) + } + + name := ole.UTF16PtrToString(props.provider.providerName) + vssFreeProviderProperties(&props.provider) + + if id != nil && *id == props.provider.providerID || + id == nil && providerLower == strings.ToLower(name) { + return &props.provider.providerID, nil + } + } +} + // asyncCallFunc is the callback type for callAsyncFunctionAndWait. type asyncCallFunc func() (*IVSSAsync, error) From 739d3243d9f8e17b0f80440aaec9bacdcb046745 Mon Sep 17 00:00:00 2001 From: DRON-666 <64691982+DRON-666@users.noreply.github.com> Date: Tue, 23 Mar 2021 07:41:45 +0300 Subject: [PATCH 09/16] vss: Update docs and changelog --- changelog/unreleased/pull-3067 | 4 ++++ doc/040_backup.rst | 17 ++++++++++++++++- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/changelog/unreleased/pull-3067 b/changelog/unreleased/pull-3067 index a56c045fa..52f04c5e0 100644 --- a/changelog/unreleased/pull-3067 +++ b/changelog/unreleased/pull-3067 @@ -15,4 +15,8 @@ changes timeout to five minutes and disable snapshotting of mount points on all excludes drive `D:`, mount point `C:\MNT` and specific volume from VSS snapshotting. + restic backup --use-fs-snapshot -o vss.provider={b5946137-7b9f-4925-af80-51abd60b20d5} + +uses 'Microsoft Software Shadow Copy provider 1.0' instead of the default provider. + https://github.com/restic/restic/pull/3067 diff --git a/doc/040_backup.rst b/doc/040_backup.rst index d1bb39f96..7360bb0e0 100644 --- a/doc/040_backup.rst +++ b/doc/040_backup.rst @@ -56,11 +56,12 @@ snapshot for each volume that contains files to backup. Files are read from the VSS snapshot instead of the regular filesystem. This allows to backup files that are exclusively locked by another process during the backup. -You can use three additional options to change VSS behaviour: +You can use additional options to change VSS behaviour: * ``-o vss.timeout`` specifies timeout for VSS snapshot creation, the default value is 120 seconds * ``-o vss.excludeallmountpoints`` disable auto snapshotting of all volume mount points * ``-o vss.excludevolumes`` allows excluding specific volumes or volume mount points from snapshotting + * ``-o vss.provider`` specifies VSS provider used for snapshotting E.g., 2.5 minutes timeout with mount points snapshotting disabled can be specified as @@ -74,6 +75,20 @@ and excluding drive ``D:\``, mount point ``C:\mnt`` and volume ``\\?\Volume{04ce -o vss.excludevolumes="d:;c:\MNT\;\\?\volume{04ce0545-3391-11e0-ba2f-806e6f6e6963}" +VSS provider can be specified by GUID + +.. code-block:: console + + -o vss.provider={3f900f90-00e9-440e-873a-96ca5eb079e5} + +or by name + +.. code-block:: console + + -o vss.provider="Hyper-V IC Software Shadow Copy Provider" + +Also ``MS`` can be used as alias for ``Microsoft Software Shadow Copy provider 1.0``. + By default VSS ignores Outlook OST files. This is not a restriction of restic but the default Windows VSS configuration. The files not to snapshot are configured in the Windows registry under the following key: From 0a8f9c5d9cef798902a49e63da0a51be0e14d095 Mon Sep 17 00:00:00 2001 From: DRON-666 <64691982+DRON-666@users.noreply.github.com> Date: Fri, 30 Apr 2021 18:01:40 +0300 Subject: [PATCH 10/16] vss: Add tests for "provider" option --- internal/fs/fs_local_vss_test.go | 88 ++++++++++++++++++++++++++++++-- 1 file changed, 84 insertions(+), 4 deletions(-) diff --git a/internal/fs/fs_local_vss_test.go b/internal/fs/fs_local_vss_test.go index 6beb35b98..cff881151 100644 --- a/internal/fs/fs_local_vss_test.go +++ b/internal/fs/fs_local_vss_test.go @@ -9,6 +9,7 @@ import ( "testing" "time" + ole "github.com/go-ole/go-ole" "github.com/restic/restic/internal/options" ) @@ -18,6 +19,9 @@ func matchStrings(ptrs []string, strs []string) bool { } for i, p := range ptrs { + if p == "" { + return false + } matched, err := regexp.MatchString(p, strs[i]) if err != nil { panic(err) @@ -48,6 +52,7 @@ func TestVSSConfig(t *testing.T) { type config struct { excludeAllMountPoints bool timeout time.Duration + provider string } setTests := []struct { input options.Options @@ -55,19 +60,23 @@ func TestVSSConfig(t *testing.T) { }{ { options.Options{ - "vss.timeout": "6h38m42s", + "vss.timeout": "6h38m42s", + "vss.provider": "Ms", }, config{ - timeout: 23922000000000, + timeout: 23922000000000, + provider: "Ms", }, }, { options.Options{ "vss.excludeallmountpoints": "t", + "vss.provider": "{b5946137-7b9f-4925-af80-51abd60b20d5}", }, config{ excludeAllMountPoints: true, timeout: 120000000000, + provider: "{b5946137-7b9f-4925-af80-51abd60b20d5}", }, }, { @@ -75,9 +84,11 @@ func TestVSSConfig(t *testing.T) { "vss.excludeallmountpoints": "0", "vss.excludevolumes": "", "vss.timeout": "120s", + "vss.provider": "Microsoft Software Shadow Copy provider 1.0", }, config{ - timeout: 120000000000, + timeout: 120000000000, + provider: "Microsoft Software Shadow Copy provider 1.0", }, }, } @@ -98,7 +109,8 @@ func TestVSSConfig(t *testing.T) { dst := NewLocalVss(errorHandler, messageHandler, cfg) if dst.excludeAllMountPoints != test.output.excludeAllMountPoints || - dst.excludeVolumes != nil || dst.timeout != test.output.timeout { + dst.excludeVolumes != nil || dst.timeout != test.output.timeout || + dst.provider != test.output.provider { t.Fatalf("wrong result, want:\n %#v\ngot:\n %#v", test.output, dst) } }) @@ -205,3 +217,71 @@ func TestParseMountPoints(t *testing.T) { }) } } + +func TestParseProvider(t *testing.T) { + msProvider := ole.NewGUID("{b5946137-7b9f-4925-af80-51abd60b20d5}") + setTests := []struct { + provider string + id *ole.GUID + result string + }{ + { + "", + ole.IID_NULL, + "", + }, + { + "mS", + msProvider, + "", + }, + { + "{B5946137-7b9f-4925-Af80-51abD60b20d5}", + msProvider, + "", + }, + { + "Microsoft Software Shadow Copy provider 1.0", + msProvider, + "", + }, + { + "{04560982-3d7d-4bbc-84f7-0712f833a28f}", + nil, + `invalid VSS provider "{04560982-3d7d-4bbc-84f7-0712f833a28f}"`, + }, + { + "non-existent provider", + nil, + `invalid VSS provider "non-existent provider"`, + }, + } + + _ = ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED) + + for i, test := range setTests { + t.Run(fmt.Sprintf("test-%d", i), func(t *testing.T) { + id, err := getProviderID(test.provider) + + if err != nil && id != nil { + t.Fatalf("err!=nil but id=%v", id) + } + + if test.result != "" || err != nil { + var result string + if err != nil { + result = err.Error() + } + matched, err := regexp.MatchString(test.result, result) + if err != nil { + panic(err) + } + if !matched || test.result == "" { + t.Fatalf("wrong result, want:\n %#v\ngot:\n %#v", test.result, result) + } + } else if !ole.IsEqualGUID(id, test.id) { + t.Fatalf("wrong id, want:\n %s\ngot:\n %s", test.id.String(), id.String()) + } + }) + } +} From 5703e5a6526fda6ab7856876bedaf07cffcce752 Mon Sep 17 00:00:00 2001 From: DRON-666 <64691982+DRON-666@users.noreply.github.com> Date: Mon, 29 Apr 2024 01:18:46 +0300 Subject: [PATCH 11/16] Fix texts and comments --- changelog/unreleased/pull-3067 | 12 ++++++------ doc/040_backup.rst | 12 ++++++------ internal/fs/fs_local_vss_test.go | 4 +++- internal/fs/vss.go | 2 +- internal/fs/vss_windows.go | 2 +- 5 files changed, 17 insertions(+), 15 deletions(-) diff --git a/changelog/unreleased/pull-3067 b/changelog/unreleased/pull-3067 index 52f04c5e0..855c7f2be 100644 --- a/changelog/unreleased/pull-3067 +++ b/changelog/unreleased/pull-3067 @@ -2,21 +2,21 @@ Enhancement: Add options to configure Windows Shadow Copy Service Restic always used 120 sec. timeout and unconditionally created VSS snapshots for all volume mount points on disk. Now this behavior can be fine-tuned by -new options, like exclude user specific volumes and mount points or completely +new options, like exclude specific volumes and mount points or completely disable auto snapshotting of volume mount points. For example: - restic backup --use-fs-snapshot -o vss.timeout=5m -o vss.excludeallmountpoints=true - + restic backup --use-fs-snapshot -o vss.timeout=5m -o vss.exclude-all-mount-points=true + changes timeout to five minutes and disable snapshotting of mount points on all volumes, and - restic backup --use-fs-snapshot -o vss.excludevolumes="d:\;c:\mnt\;\\?\Volume{e2e0315d-9066-4f97-8343-eb5659b35762}" + restic backup --use-fs-snapshot -o vss.exclude-volumes="d:\;c:\mnt\;\\?\Volume{e2e0315d-9066-4f97-8343-eb5659b35762}" -excludes drive `D:`, mount point `C:\MNT` and specific volume from VSS snapshotting. +excludes drive `d:`, mount point `c:\mnt` and specific volume from VSS snapshotting. restic backup --use-fs-snapshot -o vss.provider={b5946137-7b9f-4925-af80-51abd60b20d5} - + uses 'Microsoft Software Shadow Copy provider 1.0' instead of the default provider. https://github.com/restic/restic/pull/3067 diff --git a/doc/040_backup.rst b/doc/040_backup.rst index 7360bb0e0..50de954ef 100644 --- a/doc/040_backup.rst +++ b/doc/040_backup.rst @@ -59,21 +59,21 @@ exclusively locked by another process during the backup. You can use additional options to change VSS behaviour: * ``-o vss.timeout`` specifies timeout for VSS snapshot creation, the default value is 120 seconds - * ``-o vss.excludeallmountpoints`` disable auto snapshotting of all volume mount points - * ``-o vss.excludevolumes`` allows excluding specific volumes or volume mount points from snapshotting + * ``-o vss.exclude-all-mount-points`` disable auto snapshotting of all volume mount points + * ``-o vss.exclude-volumes`` allows excluding specific volumes or volume mount points from snapshotting * ``-o vss.provider`` specifies VSS provider used for snapshotting -E.g., 2.5 minutes timeout with mount points snapshotting disabled can be specified as +For example a 2.5 minutes timeout with snapshotting of mount points disabled can be specified as .. code-block:: console - -o vss.timeout=2m30s -o vss.excludeallmountpoints=true + -o vss.timeout=2m30s -o vss.exclude-all-mount-points=true -and excluding drive ``D:\``, mount point ``C:\mnt`` and volume ``\\?\Volume{04ce0545-3391-11e0-ba2f-806e6f6e6963}\`` as +and excluding drive ``d:\``, mount point ``c:\mnt`` and volume ``\\?\Volume{04ce0545-3391-11e0-ba2f-806e6f6e6963}\`` as .. code-block:: console - -o vss.excludevolumes="d:;c:\MNT\;\\?\volume{04ce0545-3391-11e0-ba2f-806e6f6e6963}" + -o vss.exclude-volumes="d:;c:\mnt\;\\?\volume{04ce0545-3391-11e0-ba2f-806e6f6e6963}" VSS provider can be specified by GUID diff --git a/internal/fs/fs_local_vss_test.go b/internal/fs/fs_local_vss_test.go index cff881151..23e86b911 100644 --- a/internal/fs/fs_local_vss_test.go +++ b/internal/fs/fs_local_vss_test.go @@ -127,10 +127,12 @@ func TestParseMountPoints(t *testing.T) { if err != nil { t.Fatal(err) } - // We don't know a valid volume GUID path for C:\, but we'll at least check its format + // We don't know a valid volume GUID path for c:\, but we'll at least check its format if !volumeMatch.MatchString(sysVolume) { t.Fatalf("invalid volume GUID path: %s", sysVolume) } + // Changing the case and removing trailing backslash allows tests + // the equality of different ways of writing a volume name sysVolumeMutated := strings.ToUpper(sysVolume[:len(sysVolume)-1]) sysVolumeMatch := strings.ToLower(sysVolume) diff --git a/internal/fs/vss.go b/internal/fs/vss.go index a54475480..8bfffab71 100644 --- a/internal/fs/vss.go +++ b/internal/fs/vss.go @@ -33,7 +33,7 @@ func HasSufficientPrivilegesForVSS() error { return errors.New("VSS snapshots are only supported on windows") } -// GetVolumeNameForVolumeMountPoint clear input parameter +// GetVolumeNameForVolumeMountPoint add trailing backslash to input parameter // and calls the equivalent windows api. func GetVolumeNameForVolumeMountPoint(mountPoint string) (string, error) { return mountPoint, nil diff --git a/internal/fs/vss_windows.go b/internal/fs/vss_windows.go index 18aea419d..91c60c4ba 100644 --- a/internal/fs/vss_windows.go +++ b/internal/fs/vss_windows.go @@ -828,7 +828,7 @@ func HasSufficientPrivilegesForVSS() error { return err } -// GetVolumeNameForVolumeMountPoint clear input parameter +// GetVolumeNameForVolumeMountPoint add trailing backslash to input parameter // and calls the equivalent windows api. func GetVolumeNameForVolumeMountPoint(mountPoint string) (string, error) { if mountPoint != "" && mountPoint[len(mountPoint)-1] != filepath.Separator { From 24330c19a8be55fc4d4f89d9f4b912b066e0fa32 Mon Sep 17 00:00:00 2001 From: DRON-666 <64691982+DRON-666@users.noreply.github.com> Date: Mon, 29 Apr 2024 01:21:33 +0300 Subject: [PATCH 12/16] Use kebab case in option names --- internal/fs/fs_local_vss.go | 6 +++--- internal/fs/fs_local_vss_test.go | 22 +++++++++++----------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/internal/fs/fs_local_vss.go b/internal/fs/fs_local_vss.go index de30bcedb..0f983d136 100644 --- a/internal/fs/fs_local_vss.go +++ b/internal/fs/fs_local_vss.go @@ -14,8 +14,8 @@ import ( // VSSConfig holds extended options of windows volume shadow copy service. type VSSConfig struct { - ExcludeAllMountPoints bool `option:"excludeallmountpoints" help:"exclude mountpoints from snapshotting on all volumes"` - ExcludeVolumes string `option:"excludevolumes" help:"semicolon separated list of volumes to exclude from snapshotting (ex. 'c:\\;e:\\mnt;\\\\?\\Volume{...}')"` + ExcludeAllMountPoints bool `option:"exclude-all-mount-points" help:"exclude mountpoints from snapshotting on all volumes"` + ExcludeVolumes string `option:"exclude-volumes" help:"semicolon separated list of volumes to exclude from snapshotting (ex. 'c:\\;e:\\mnt;\\\\?\\Volume{...}')"` Timeout time.Duration `option:"timeout" help:"time that the VSS can spend creating snapshot before timing out"` Provider string `option:"provider" help:"VSS provider identifier which will be used for snapshotting"` } @@ -80,7 +80,7 @@ func parseMountPoints(list string, msgError ErrorHandler) (volumes map[string]st } for _, s := range strings.Split(list, ";") { if v, err := GetVolumeNameForVolumeMountPoint(s); err != nil { - msgError(s, errors.Errorf("failed to parse vss.excludevolumes [%s]: %s", s, err)) + msgError(s, errors.Errorf("failed to parse vss.exclude-volumes [%s]: %s", s, err)) } else { if volumes == nil { volumes = make(map[string]struct{}) diff --git a/internal/fs/fs_local_vss_test.go b/internal/fs/fs_local_vss_test.go index 23e86b911..9e11b6c6e 100644 --- a/internal/fs/fs_local_vss_test.go +++ b/internal/fs/fs_local_vss_test.go @@ -70,8 +70,8 @@ func TestVSSConfig(t *testing.T) { }, { options.Options{ - "vss.excludeallmountpoints": "t", - "vss.provider": "{b5946137-7b9f-4925-af80-51abd60b20d5}", + "vss.exclude-all-mount-points": "t", + "vss.provider": "{b5946137-7b9f-4925-af80-51abd60b20d5}", }, config{ excludeAllMountPoints: true, @@ -81,10 +81,10 @@ func TestVSSConfig(t *testing.T) { }, { options.Options{ - "vss.excludeallmountpoints": "0", - "vss.excludevolumes": "", - "vss.timeout": "120s", - "vss.provider": "Microsoft Software Shadow Copy provider 1.0", + "vss.exclude-all-mount-points": "0", + "vss.exclude-volumes": "", + "vss.timeout": "120s", + "vss.provider": "Microsoft Software Shadow Copy provider 1.0", }, config{ timeout: 120000000000, @@ -148,7 +148,7 @@ func TestParseMountPoints(t *testing.T) { }{ { options.Options{ - "vss.excludevolumes": `c:;c:\;` + sysVolume + `;` + sysVolumeMutated, + "vss.exclude-volumes": `c:;c:\;` + sysVolume + `;` + sysVolumeMutated, }, []string{ sysVolumeMatch, @@ -163,7 +163,7 @@ func TestParseMountPoints(t *testing.T) { }, { options.Options{ - "vss.excludevolumes": `z:\nonexistent;c:;c:\windows\;\\?\Volume{39b9cac2-bcdb-4d51-97c8-0d0677d607fb}\`, + "vss.exclude-volumes": `z:\nonexistent;c:;c:\windows\;\\?\Volume{39b9cac2-bcdb-4d51-97c8-0d0677d607fb}\`, }, []string{ sysVolumeMatch, @@ -175,9 +175,9 @@ func TestParseMountPoints(t *testing.T) { {``, false}, }, []string{ - `failed to parse vss\.excludevolumes \[z:\\nonexistent\]:.*`, - `failed to parse vss\.excludevolumes \[c:\\windows\\\]:.*`, - `failed to parse vss\.excludevolumes \[\\\\\?\\Volume\{39b9cac2-bcdb-4d51-97c8-0d0677d607fb\}\\\]:.*`, + `failed to parse vss\.exclude-volumes \[z:\\nonexistent\]:.*`, + `failed to parse vss\.exclude-volumes \[c:\\windows\\\]:.*`, + `failed to parse vss\.exclude-volumes \[\\\\\?\\Volume\{39b9cac2-bcdb-4d51-97c8-0d0677d607fb\}\\\]:.*`, `failed to get volume from mount point \[c:\\windows\\\]:.*`, `failed to get volume from mount point \[\\\\\?\\Volume\{39b9cac2-bcdb-4d51-97c8-0d0677d607fb\}\\\]:.*`, `failed to get volume from mount point \[\]:.*`, From 90b168eb6cde4fe1afd6aad68185d6abcca3b806 Mon Sep 17 00:00:00 2001 From: DRON-666 <64691982+DRON-666@users.noreply.github.com> Date: Mon, 29 Apr 2024 01:23:50 +0300 Subject: [PATCH 13/16] isMountPointExcluded to isMountPointIncluded --- internal/fs/fs_local_vss.go | 22 ++++++++++------------ internal/fs/fs_local_vss_test.go | 20 ++++++++++---------- 2 files changed, 20 insertions(+), 22 deletions(-) diff --git a/internal/fs/fs_local_vss.go b/internal/fs/fs_local_vss.go index 0f983d136..48ab165f1 100644 --- a/internal/fs/fs_local_vss.go +++ b/internal/fs/fs_local_vss.go @@ -145,22 +145,20 @@ func (fs *LocalVss) Lstat(name string) (os.FileInfo, error) { return os.Lstat(fs.snapshotPath(name)) } -// isMountPointExcluded is true if given mountpoint excluded by user. -func (fs *LocalVss) isMountPointExcluded(mountPoint string) bool { +// isMountPointIncluded is true if given mountpoint included by user. +func (fs *LocalVss) isMountPointIncluded(mountPoint string) bool { if fs.excludeVolumes == nil { - return false + return true } volume, err := GetVolumeNameForVolumeMountPoint(mountPoint) if err != nil { fs.msgError(mountPoint, errors.Errorf("failed to get volume from mount point [%s]: %s", mountPoint, err)) - - return false + return true } _, ok := fs.excludeVolumes[strings.ToLower(volume)] - - return ok + return !ok } // snapshotPath returns the path inside a VSS snapshots if it already exists. @@ -199,20 +197,20 @@ func (fs *LocalVss) snapshotPath(path string) string { if !snapshotExists && !snapshotFailed { vssVolume := volumeNameLower + string(filepath.Separator) - if fs.isMountPointExcluded(vssVolume) { + if !fs.isMountPointIncluded(vssVolume) { fs.msgMessage("snapshots for [%s] excluded by user\n", vssVolume) fs.failedSnapshots[volumeNameLower] = struct{}{} } else { fs.msgMessage("creating VSS snapshot for [%s]\n", vssVolume) - var filter VolumeFilter + var includeVolume VolumeFilter if !fs.excludeAllMountPoints { - filter = func(volume string) bool { - return !fs.isMountPointExcluded(volume) + includeVolume = func(volume string) bool { + return fs.isMountPointIncluded(volume) } } - if snapshot, err := NewVssSnapshot(fs.provider, vssVolume, fs.timeout, filter, fs.msgError); err != nil { + if snapshot, err := NewVssSnapshot(fs.provider, vssVolume, fs.timeout, includeVolume, fs.msgError); err != nil { fs.msgError(vssVolume, errors.Errorf("failed to create snapshot for [%s]: %s", vssVolume, err)) fs.failedSnapshots[volumeNameLower] = struct{}{} diff --git a/internal/fs/fs_local_vss_test.go b/internal/fs/fs_local_vss_test.go index 9e11b6c6e..c25ce4535 100644 --- a/internal/fs/fs_local_vss_test.go +++ b/internal/fs/fs_local_vss_test.go @@ -154,10 +154,10 @@ func TestParseMountPoints(t *testing.T) { sysVolumeMatch, }, []check{ - {`c:\`, true}, - {`c:`, true}, - {sysVolume, true}, - {sysVolumeMutated, true}, + {`c:\`, false}, + {`c:`, false}, + {sysVolume, false}, + {sysVolumeMutated, false}, }, []string{}, }, @@ -169,10 +169,10 @@ func TestParseMountPoints(t *testing.T) { sysVolumeMatch, }, []check{ - {`c:\windows\`, false}, - {`\\?\Volume{39b9cac2-bcdb-4d51-97c8-0d0677d607fb}\`, false}, - {`c:`, true}, - {``, false}, + {`c:\windows\`, true}, + {`\\?\Volume{39b9cac2-bcdb-4d51-97c8-0d0677d607fb}\`, true}, + {`c:`, false}, + {``, true}, }, []string{ `failed to parse vss\.exclude-volumes \[z:\\nonexistent\]:.*`, @@ -208,8 +208,8 @@ func TestParseMountPoints(t *testing.T) { } for _, c := range test.checks { - if dst.isMountPointExcluded(c.volume) != c.result { - t.Fatalf(`wrong check: isMountPointExcluded("%s") != %v`, c.volume, c.result) + if dst.isMountPointIncluded(c.volume) != c.result { + t.Fatalf(`wrong check: isMountPointIncluded("%s") != %v`, c.volume, c.result) } } From 7ee889bb0d0dcf0292745975454ed53d94cdb0a9 Mon Sep 17 00:00:00 2001 From: DRON-666 <64691982+DRON-666@users.noreply.github.com> Date: Mon, 29 Apr 2024 01:25:25 +0300 Subject: [PATCH 14/16] Use S_FALSE and MaxInt --- internal/fs/vss_windows.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/internal/fs/vss_windows.go b/internal/fs/vss_windows.go index 91c60c4ba..e8c5dc561 100644 --- a/internal/fs/vss_windows.go +++ b/internal/fs/vss_windows.go @@ -5,6 +5,7 @@ package fs import ( "fmt" + "math" "path/filepath" "runtime" "strings" @@ -24,6 +25,7 @@ type HRESULT uint //nolint:golint const ( S_OK HRESULT = 0x00000000 + S_FALSE HRESULT = 0x00000001 E_ACCESSDENIED HRESULT = 0x80070005 E_OUTOFMEMORY HRESULT = 0x8007000E E_INVALIDARG HRESULT = 0x80070057 @@ -640,7 +642,7 @@ func (vssAsync *IVSSAsync) QueryStatus() (HRESULT, uint32) { // WaitUntilAsyncFinished waits until either the async call is finished or // the given timeout is reached. func (vssAsync *IVSSAsync) WaitUntilAsyncFinished(timeout time.Duration) error { - const maxTimeout = 2147483647 * time.Millisecond + const maxTimeout = math.MaxInt32 * time.Millisecond if timeout > maxTimeout { timeout = maxTimeout } @@ -740,7 +742,7 @@ func (vssEnum *IVssEnumObject) Next(count uint, props unsafe.Pointer) (uint, err result, _, _ := syscall.Syscall6(vssEnum.getVTable().next, 4, uintptr(unsafe.Pointer(vssEnum)), uintptr(count), uintptr(props), uintptr(unsafe.Pointer(&fetched)), 0, 0) - if result == 1 { + if HRESULT(result) == S_FALSE { return uint(fetched), nil } @@ -791,8 +793,8 @@ func initializeVssCOMInterface() (*ole.IUnknown, error) { // ensure COM is initialized before use if err = ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED); err != nil { - // CoInitializeEx returns 1 if COM is already initialized - if oleErr, ok := err.(*ole.OleError); !ok || oleErr.Code() != 1 { + // CoInitializeEx returns S_FALSE if COM is already initialized + if oleErr, ok := err.(*ole.OleError); !ok || HRESULT(oleErr.Code()) != S_FALSE { return nil, err } } From 125dba23c5e8e453eb7a5d784ac262cacca4f3c9 Mon Sep 17 00:00:00 2001 From: DRON-666 <64691982+DRON-666@users.noreply.github.com> Date: Mon, 29 Apr 2024 01:27:34 +0300 Subject: [PATCH 15/16] Rearange code --- internal/fs/fs_local_vss_test.go | 6 +----- internal/fs/vss_windows.go | 30 +++++++++++++++--------------- 2 files changed, 16 insertions(+), 20 deletions(-) diff --git a/internal/fs/fs_local_vss_test.go b/internal/fs/fs_local_vss_test.go index c25ce4535..60262c873 100644 --- a/internal/fs/fs_local_vss_test.go +++ b/internal/fs/fs_local_vss_test.go @@ -274,11 +274,7 @@ func TestParseProvider(t *testing.T) { if err != nil { result = err.Error() } - matched, err := regexp.MatchString(test.result, result) - if err != nil { - panic(err) - } - if !matched || test.result == "" { + if test.result != result || test.result == "" { t.Fatalf("wrong result, want:\n %#v\ngot:\n %#v", test.result, result) } } else if !ole.IsEqualGUID(id, test.id) { diff --git a/internal/fs/vss_windows.go b/internal/fs/vss_windows.go index e8c5dc561..0b51b00f3 100644 --- a/internal/fs/vss_windows.go +++ b/internal/fs/vss_windows.go @@ -537,13 +537,6 @@ func vssFreeSnapshotProperties(properties *VssSnapshotProperties) error { return nil } -func vssFreeProviderProperties(p *VssProviderProperties) { - ole.CoTaskMemFree(uintptr(unsafe.Pointer(p.providerName))) - p.providerName = nil - ole.CoTaskMemFree(uintptr(unsafe.Pointer(p.providerVersion))) - p.providerName = nil -} - // BackupComplete calls the equivalent VSS api. func (vss *IVssBackupComponents) BackupComplete() (*IVSSAsync, error) { var oleIUnknown *ole.IUnknown @@ -583,6 +576,13 @@ type VssProviderProperties struct { classID ole.GUID } +func vssFreeProviderProperties(p *VssProviderProperties) { + ole.CoTaskMemFree(uintptr(unsafe.Pointer(p.providerName))) + p.providerName = nil + ole.CoTaskMemFree(uintptr(unsafe.Pointer(p.providerVersion))) + p.providerVersion = nil +} + // GetSnapshotDeviceObject returns root path to access the snapshot files // and folders. func (p *VssSnapshotProperties) GetSnapshotDeviceObject() string { @@ -1084,14 +1084,6 @@ func (p *VssSnapshot) Delete() error { } func getProviderID(provider string) (*ole.GUID, error) { - comInterface, err := ole.CreateInstance(CLSID_VSS_COORDINATOR, UIID_IVSS_ADMIN) - if err != nil { - return nil, err - } - defer comInterface.Release() - - vssAdmin := (*IVSSAdmin)(unsafe.Pointer(comInterface)) - providerLower := strings.ToLower(provider) switch providerLower { case "": @@ -1100,6 +1092,14 @@ func getProviderID(provider string) (*ole.GUID, error) { return ole.NewGUID("{b5946137-7b9f-4925-af80-51abd60b20d5}"), nil } + comInterface, err := ole.CreateInstance(CLSID_VSS_COORDINATOR, UIID_IVSS_ADMIN) + if err != nil { + return nil, err + } + defer comInterface.Release() + + vssAdmin := (*IVSSAdmin)(unsafe.Pointer(comInterface)) + enum, err := vssAdmin.QueryProviders() if err != nil { return nil, err From ccd35565ee10a12a7698f088aa37df1035fc230d Mon Sep 17 00:00:00 2001 From: DRON-666 <64691982+DRON-666@users.noreply.github.com> Date: Mon, 29 Apr 2024 01:48:22 +0300 Subject: [PATCH 16/16] s/sec./seconds --- changelog/unreleased/pull-3067 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog/unreleased/pull-3067 b/changelog/unreleased/pull-3067 index 855c7f2be..fbdcfd7e5 100644 --- a/changelog/unreleased/pull-3067 +++ b/changelog/unreleased/pull-3067 @@ -1,6 +1,6 @@ Enhancement: Add options to configure Windows Shadow Copy Service -Restic always used 120 sec. timeout and unconditionally created VSS snapshots +Restic always used 120 seconds timeout and unconditionally created VSS snapshots for all volume mount points on disk. Now this behavior can be fine-tuned by new options, like exclude specific volumes and mount points or completely disable auto snapshotting of volume mount points.