mirror of
https://github.com/octoleo/restic.git
synced 2024-11-25 06:07:44 +00:00
Merge pull request #3593 from DarkKirb/parallelize-restic-copy
Parallelize blob upload/download for restic copy
This commit is contained in:
commit
2c3e5d943d
9
changelog/unreleased/pull-3593
Normal file
9
changelog/unreleased/pull-3593
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
Enhancement: Improve restic copy performance by parallelizing IO
|
||||||
|
|
||||||
|
Restic copy previously only used a single thread for copying blobs between
|
||||||
|
repositories, which resulted in limited performance when copying small blobs
|
||||||
|
to/from a high latency backend (i.e. any remote backend, especially b2).
|
||||||
|
Copying will now use 8 parallel threads to increase the throughput of the copy
|
||||||
|
operation.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/pull/3593
|
@ -176,9 +176,12 @@ func similarSnapshots(sna *restic.Snapshot, snb *restic.Snapshot) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const numCopyWorkers = 8
|
||||||
|
|
||||||
func copyTree(ctx context.Context, srcRepo restic.Repository, dstRepo restic.Repository,
|
func copyTree(ctx context.Context, srcRepo restic.Repository, dstRepo restic.Repository,
|
||||||
visitedTrees restic.IDSet, rootTreeID restic.ID) error {
|
visitedTrees restic.IDSet, rootTreeID restic.ID) error {
|
||||||
|
|
||||||
|
idChan := make(chan restic.ID)
|
||||||
wg, ctx := errgroup.WithContext(ctx)
|
wg, ctx := errgroup.WithContext(ctx)
|
||||||
|
|
||||||
treeStream := restic.StreamTrees(ctx, wg, srcRepo, restic.IDs{rootTreeID}, func(treeID restic.ID) bool {
|
treeStream := restic.StreamTrees(ctx, wg, srcRepo, restic.IDs{rootTreeID}, func(treeID restic.ID) bool {
|
||||||
@ -188,9 +191,9 @@ func copyTree(ctx context.Context, srcRepo restic.Repository, dstRepo restic.Rep
|
|||||||
}, nil)
|
}, nil)
|
||||||
|
|
||||||
wg.Go(func() error {
|
wg.Go(func() error {
|
||||||
|
defer close(idChan)
|
||||||
// reused buffer
|
// reused buffer
|
||||||
var buf []byte
|
var buf []byte
|
||||||
|
|
||||||
for tree := range treeStream {
|
for tree := range treeStream {
|
||||||
if tree.Error != nil {
|
if tree.Error != nil {
|
||||||
return fmt.Errorf("LoadTree(%v) returned error %v", tree.ID.Str(), tree.Error)
|
return fmt.Errorf("LoadTree(%v) returned error %v", tree.ID.Str(), tree.Error)
|
||||||
@ -211,12 +214,26 @@ func copyTree(ctx context.Context, srcRepo restic.Repository, dstRepo restic.Rep
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: parallelize blob down/upload
|
|
||||||
|
|
||||||
for _, entry := range tree.Nodes {
|
for _, entry := range tree.Nodes {
|
||||||
// Recursion into directories is handled by StreamTrees
|
// Recursion into directories is handled by StreamTrees
|
||||||
// Copy the blobs for this file.
|
// Copy the blobs for this file.
|
||||||
for _, blobID := range entry.Content {
|
for _, blobID := range entry.Content {
|
||||||
|
select {
|
||||||
|
case idChan <- blobID:
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
for i := 0; i < numCopyWorkers; i++ {
|
||||||
|
wg.Go(func() error {
|
||||||
|
// reused buffer
|
||||||
|
var buf []byte
|
||||||
|
for blobID := range idChan {
|
||||||
// Do we already have this data blob?
|
// Do we already have this data blob?
|
||||||
if dstRepo.Index().Has(restic.BlobHandle{ID: blobID, Type: restic.DataBlob}) {
|
if dstRepo.Index().Has(restic.BlobHandle{ID: blobID, Type: restic.DataBlob}) {
|
||||||
continue
|
continue
|
||||||
@ -233,10 +250,8 @@ func copyTree(ctx context.Context, srcRepo restic.Repository, dstRepo restic.Rep
|
|||||||
return fmt.Errorf("SaveBlob(%v) returned error %v", blobID, err)
|
return fmt.Errorf("SaveBlob(%v) returned error %v", blobID, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
}
|
||||||
return wg.Wait()
|
return wg.Wait()
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user