mirror of
https://github.com/octoleo/syncthing.git
synced 2024-12-22 10:58:57 +00:00
fix(protocol): allow encrypted-to-encrypted connections again
Encrypted-to-encrypted connections (i.e., ones where both sides set a
password) used to work but were broken in the 1.28.0 release. The
culprit is the 5342bec1b
refactor which slightly changed how the request
was constructed, resulting in a bad block hash field.
Co-authored-by: Simon Frei <freisim93@gmail.com>
This commit is contained in:
parent
65d0ca8aa9
commit
8ccb7f1924
@ -212,38 +212,44 @@ func (e encryptedConnection) Request(ctx context.Context, req *Request) ([]byte,
|
|||||||
if !ok {
|
if !ok {
|
||||||
return e.conn.Request(ctx, req)
|
return e.conn.Request(ctx, req)
|
||||||
}
|
}
|
||||||
|
fileKey := e.keyGen.FileKey(req.Name, folderKey)
|
||||||
|
|
||||||
// Encrypt / adjust the request parameters.
|
// Encrypt / adjust the request parameters.
|
||||||
|
|
||||||
origSize := req.Size
|
encSize := req.Size
|
||||||
origName := req.Name
|
if encSize < minPaddedSize {
|
||||||
if req.Size < minPaddedSize {
|
|
||||||
// Make a request for minPaddedSize data instead of the smaller
|
// Make a request for minPaddedSize data instead of the smaller
|
||||||
// block. We'll chop of the extra data later.
|
// block. We'll chop of the extra data later.
|
||||||
req.Size = minPaddedSize
|
encSize = minPaddedSize
|
||||||
}
|
}
|
||||||
|
encSize += blockOverhead
|
||||||
encName := encryptName(req.Name, folderKey)
|
encName := encryptName(req.Name, folderKey)
|
||||||
encOffset := req.Offset + int64(req.BlockNo*blockOverhead)
|
encOffset := req.Offset + int64(req.BlockNo*blockOverhead)
|
||||||
encSize := req.Size + blockOverhead
|
encHash := encryptBlockHash(req.Hash, req.Offset, fileKey)
|
||||||
|
|
||||||
// Perform that request, getting back an encrypted block.
|
// Perform that request, getting back an encrypted block.
|
||||||
|
|
||||||
req.Name = encName
|
encReq := &Request{
|
||||||
req.Offset = encOffset
|
ID: req.ID,
|
||||||
req.Size = encSize
|
Folder: req.Folder,
|
||||||
bs, err := e.conn.Request(ctx, req)
|
Name: encName,
|
||||||
|
Offset: encOffset,
|
||||||
|
Size: encSize,
|
||||||
|
Hash: encHash,
|
||||||
|
BlockNo: req.BlockNo,
|
||||||
|
}
|
||||||
|
bs, err := e.conn.Request(ctx, encReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return the decrypted block (or an error if it fails decryption)
|
// Return the decrypted block (or an error if it fails decryption)
|
||||||
|
|
||||||
fileKey := e.keyGen.FileKey(origName, folderKey)
|
|
||||||
bs, err = DecryptBytes(bs, fileKey)
|
bs, err = DecryptBytes(bs, fileKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return bs[:origSize], nil
|
return bs[:req.Size], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e encryptedConnection) DownloadProgress(ctx context.Context, dp *DownloadProgress) {
|
func (e encryptedConnection) DownloadProgress(ctx context.Context, dp *DownloadProgress) {
|
||||||
@ -327,15 +333,7 @@ func encryptFileInfo(keyGen *KeyGenerator, fi FileInfo, folderKey *[keySize]byte
|
|||||||
b.Size = minPaddedSize
|
b.Size = minPaddedSize
|
||||||
}
|
}
|
||||||
size := b.Size + blockOverhead
|
size := b.Size + blockOverhead
|
||||||
|
hash := encryptBlockHash(b.Hash, b.Offset, fileKey)
|
||||||
// The offset goes into the encrypted block hash as additional data,
|
|
||||||
// essentially mixing in with the nonce. This means a block hash
|
|
||||||
// remains stable for the same data at the same offset, but doesn't
|
|
||||||
// reveal the existence of identical data blocks at other offsets.
|
|
||||||
var additional [8]byte
|
|
||||||
binary.BigEndian.PutUint64(additional[:], uint64(b.Offset))
|
|
||||||
hash := encryptDeterministic(b.Hash, fileKey, additional[:])
|
|
||||||
|
|
||||||
blocks[i] = BlockInfo{
|
blocks[i] = BlockInfo{
|
||||||
Hash: hash,
|
Hash: hash,
|
||||||
Offset: offset,
|
Offset: offset,
|
||||||
@ -374,6 +372,16 @@ func encryptFileInfo(keyGen *KeyGenerator, fi FileInfo, folderKey *[keySize]byte
|
|||||||
return enc
|
return enc
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func encryptBlockHash(hash []byte, offset int64, fileKey *[keySize]byte) []byte {
|
||||||
|
// The offset goes into the encrypted block hash as additional data,
|
||||||
|
// essentially mixing in with the nonce. This means a block hash
|
||||||
|
// remains stable for the same data at the same offset, but doesn't
|
||||||
|
// reveal the existence of identical data blocks at other offsets.
|
||||||
|
var additional [8]byte
|
||||||
|
binary.BigEndian.PutUint64(additional[:], uint64(offset))
|
||||||
|
return encryptDeterministic(hash, fileKey, additional[:])
|
||||||
|
}
|
||||||
|
|
||||||
func decryptFileInfos(keyGen *KeyGenerator, files []FileInfo, folderKey *[keySize]byte) error {
|
func decryptFileInfos(keyGen *KeyGenerator, files []FileInfo, folderKey *[keySize]byte) error {
|
||||||
for i, fi := range files {
|
for i, fi := range files {
|
||||||
decFI, err := DecryptFileInfo(keyGen, fi, folderKey)
|
decFI, err := DecryptFileInfo(keyGen, fi, folderKey)
|
||||||
|
Loading…
Reference in New Issue
Block a user