2014-09-11 18:43:12 +00:00
|
|
|
package chunker
|
|
|
|
|
|
|
|
import (
|
2015-04-05 20:24:26 +00:00
|
|
|
"errors"
|
2015-02-08 21:54:45 +00:00
|
|
|
"hash"
|
2014-09-11 18:43:12 +00:00
|
|
|
"io"
|
2015-04-05 20:46:11 +00:00
|
|
|
"sync"
|
2014-09-11 18:43:12 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
KiB = 1024
|
|
|
|
MiB = 1024 * KiB
|
|
|
|
|
2015-02-08 18:32:12 +00:00
|
|
|
// WindowSize is the size of the sliding window.
|
2015-05-02 14:19:16 +00:00
|
|
|
windowSize = 64
|
2014-09-11 18:43:12 +00:00
|
|
|
|
|
|
|
// aim to create chunks of 20 bits or about 1MiB on average.
|
2015-05-02 14:19:16 +00:00
|
|
|
averageBits = 20
|
2014-09-11 18:43:12 +00:00
|
|
|
|
2015-05-02 14:22:43 +00:00
|
|
|
// MinSize is the minimal size of a chunk.
|
2014-09-11 18:43:12 +00:00
|
|
|
MinSize = 512 * KiB
|
2015-05-02 14:22:43 +00:00
|
|
|
// MaxSize is the maximal size of a chunk.
|
2014-09-11 18:43:12 +00:00
|
|
|
MaxSize = 8 * MiB
|
|
|
|
|
2015-05-02 14:19:16 +00:00
|
|
|
splitmask = (1 << averageBits) - 1
|
2015-05-04 22:45:29 +00:00
|
|
|
|
|
|
|
chunkerBufSize = 512 * KiB
|
2014-09-11 18:43:12 +00:00
|
|
|
)
|
|
|
|
|
2015-05-04 22:45:29 +00:00
|
|
|
var bufPool = sync.Pool{
|
|
|
|
New: func() interface{} { return make([]byte, chunkerBufSize) },
|
|
|
|
}
|
|
|
|
|
2015-04-05 20:46:11 +00:00
|
|
|
type tables struct {
|
2015-04-06 18:45:06 +00:00
|
|
|
out [256]Pol
|
|
|
|
mod [256]Pol
|
2015-04-05 20:46:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// cache precomputed tables, these are read-only anyway
|
|
|
|
var cache struct {
|
|
|
|
entries map[Pol]*tables
|
|
|
|
sync.Mutex
|
|
|
|
}
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
cache.entries = make(map[Pol]*tables)
|
|
|
|
}
|
|
|
|
|
2015-05-02 14:22:43 +00:00
|
|
|
// Chunk is one content-dependent chunk of bytes whose end was cut when the
|
2014-09-11 18:43:12 +00:00
|
|
|
// Rabin Fingerprint had the value stored in Cut.
|
|
|
|
type Chunk struct {
|
2015-02-08 21:54:45 +00:00
|
|
|
Start uint
|
|
|
|
Length uint
|
2014-09-11 18:43:12 +00:00
|
|
|
Cut uint64
|
2015-02-08 21:54:45 +00:00
|
|
|
Digest []byte
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c Chunk) Reader(r io.ReaderAt) io.Reader {
|
|
|
|
return io.NewSectionReader(r, int64(c.Start), int64(c.Length))
|
2014-09-11 18:43:12 +00:00
|
|
|
}
|
|
|
|
|
2015-05-02 15:01:31 +00:00
|
|
|
// Chunker splits content with Rabin Fingerprints.
|
2014-11-23 15:48:00 +00:00
|
|
|
type Chunker struct {
|
2015-04-24 23:39:32 +00:00
|
|
|
pol Pol
|
|
|
|
polShift uint
|
|
|
|
tables *tables
|
2015-04-05 20:24:26 +00:00
|
|
|
|
2014-09-18 20:20:12 +00:00
|
|
|
rd io.Reader
|
|
|
|
closed bool
|
2014-09-11 18:43:12 +00:00
|
|
|
|
2015-05-02 14:19:16 +00:00
|
|
|
window [windowSize]byte
|
2014-09-11 18:43:12 +00:00
|
|
|
wpos int
|
|
|
|
|
|
|
|
buf []byte
|
2015-02-08 21:54:45 +00:00
|
|
|
bpos uint
|
|
|
|
bmax uint
|
2014-09-11 18:43:12 +00:00
|
|
|
|
2015-02-08 21:54:45 +00:00
|
|
|
start uint
|
|
|
|
count uint
|
|
|
|
pos uint
|
2014-09-11 18:43:12 +00:00
|
|
|
|
2015-02-08 21:54:45 +00:00
|
|
|
pre uint // wait for this many bytes before start calculating an new chunk
|
2015-01-14 15:33:41 +00:00
|
|
|
|
2014-09-11 18:43:12 +00:00
|
|
|
digest uint64
|
2015-02-08 21:54:45 +00:00
|
|
|
h hash.Hash
|
2014-09-11 18:43:12 +00:00
|
|
|
}
|
|
|
|
|
2015-05-04 21:25:07 +00:00
|
|
|
// New returns a new Chunker based on polynomial p that reads from rd
|
2015-05-04 22:56:07 +00:00
|
|
|
// with bufsize and pass all data to hash along the way.
|
2015-05-04 22:45:29 +00:00
|
|
|
func New(rd io.Reader, pol Pol, h hash.Hash) *Chunker {
|
2015-02-08 18:32:12 +00:00
|
|
|
c := &Chunker{
|
2015-05-04 22:45:29 +00:00
|
|
|
buf: bufPool.Get().([]byte),
|
2015-05-04 19:07:21 +00:00
|
|
|
h: h,
|
|
|
|
pol: pol,
|
|
|
|
rd: rd,
|
2015-02-08 18:32:12 +00:00
|
|
|
}
|
2015-05-04 19:07:21 +00:00
|
|
|
|
|
|
|
c.reset()
|
|
|
|
|
2015-04-05 22:22:19 +00:00
|
|
|
return c
|
2014-09-11 18:43:12 +00:00
|
|
|
}
|
|
|
|
|
2015-05-04 19:07:21 +00:00
|
|
|
func (c *Chunker) reset() {
|
|
|
|
c.polShift = uint(c.pol.Deg() - 8)
|
2015-04-24 23:39:32 +00:00
|
|
|
c.fillTables()
|
2015-02-09 22:37:33 +00:00
|
|
|
|
2015-05-02 14:19:16 +00:00
|
|
|
for i := 0; i < windowSize; i++ {
|
2014-09-11 18:43:12 +00:00
|
|
|
c.window[i] = 0
|
|
|
|
}
|
2015-05-04 19:07:21 +00:00
|
|
|
|
2014-12-03 22:20:45 +00:00
|
|
|
c.closed = false
|
2014-09-11 18:43:12 +00:00
|
|
|
c.digest = 0
|
|
|
|
c.wpos = 0
|
|
|
|
c.count = 0
|
2015-05-04 19:07:21 +00:00
|
|
|
c.slide(1)
|
|
|
|
c.start = c.pos
|
2015-02-08 21:54:45 +00:00
|
|
|
|
2015-02-11 12:10:36 +00:00
|
|
|
if c.h != nil {
|
|
|
|
c.h.Reset()
|
|
|
|
}
|
2015-02-08 21:54:45 +00:00
|
|
|
|
2015-01-14 15:33:41 +00:00
|
|
|
// do not start a new chunk unless at least MinSize bytes have been read
|
2015-05-02 14:19:16 +00:00
|
|
|
c.pre = MinSize - windowSize
|
2014-09-11 18:43:12 +00:00
|
|
|
}
|
|
|
|
|
2015-04-05 20:46:11 +00:00
|
|
|
// Calculate out_table and mod_table for optimization. Must be called only
|
|
|
|
// once. This implementation uses a cache in the global variable cache.
|
2015-04-24 23:39:32 +00:00
|
|
|
func (c *Chunker) fillTables() {
|
2015-04-05 22:22:19 +00:00
|
|
|
// if polynomial hasn't been specified, do not compute anything for now
|
|
|
|
if c.pol == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-04-05 20:46:11 +00:00
|
|
|
// test if the tables are cached for this polynomial
|
|
|
|
cache.Lock()
|
|
|
|
defer cache.Unlock()
|
|
|
|
if t, ok := cache.entries[c.pol]; ok {
|
|
|
|
c.tables = t
|
2015-04-05 22:22:19 +00:00
|
|
|
return
|
2015-04-05 20:46:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// else create a new entry
|
|
|
|
c.tables = &tables{}
|
|
|
|
cache.entries[c.pol] = c.tables
|
|
|
|
|
2014-09-11 18:43:12 +00:00
|
|
|
// calculate table for sliding out bytes. The byte to slide out is used as
|
|
|
|
// the index for the table, the value contains the following:
|
|
|
|
// out_table[b] = Hash(b || 0 || ... || 0)
|
|
|
|
// \ windowsize-1 zero bytes /
|
|
|
|
// To slide out byte b_0 for window size w with known hash
|
|
|
|
// H := H(b_0 || ... || b_w), it is sufficient to add out_table[b_0]:
|
|
|
|
// H(b_0 || ... || b_w) + H(b_0 || 0 || ... || 0)
|
|
|
|
// = H(b_0 + b_0 || b_1 + 0 || ... || b_w + 0)
|
|
|
|
// = H( 0 || b_1 || ... || b_w)
|
|
|
|
//
|
|
|
|
// Afterwards a new byte can be shifted in.
|
|
|
|
for b := 0; b < 256; b++ {
|
2015-04-06 18:45:06 +00:00
|
|
|
var h Pol
|
2014-09-11 18:43:12 +00:00
|
|
|
|
2015-04-24 23:39:32 +00:00
|
|
|
h = appendByte(h, byte(b), c.pol)
|
2015-05-02 14:19:16 +00:00
|
|
|
for i := 0; i < windowSize-1; i++ {
|
2015-04-24 23:39:32 +00:00
|
|
|
h = appendByte(h, 0, c.pol)
|
2014-09-11 18:43:12 +00:00
|
|
|
}
|
2015-04-06 18:45:06 +00:00
|
|
|
c.tables.out[b] = h
|
2014-09-11 18:43:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// calculate table for reduction mod Polynomial
|
2015-04-05 20:24:26 +00:00
|
|
|
k := c.pol.Deg()
|
2014-09-11 18:43:12 +00:00
|
|
|
for b := 0; b < 256; b++ {
|
|
|
|
// mod_table[b] = A | B, where A = (b(x) * x^k mod pol) and B = b(x) * x^k
|
|
|
|
//
|
|
|
|
// The 8 bits above deg(Polynomial) determine what happens next and so
|
|
|
|
// these bits are used as a lookup to this table. The value is split in
|
|
|
|
// two parts: Part A contains the result of the modulus operation, part
|
|
|
|
// B is used to cancel out the 8 top bits so that one XOR operation is
|
|
|
|
// enough to reduce modulo Polynomial
|
2015-04-06 18:45:06 +00:00
|
|
|
c.tables.mod[b] = Pol(uint64(b)<<uint(k)).Mod(c.pol) | (Pol(b) << uint(k))
|
2014-09-11 18:43:12 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-02-08 18:32:12 +00:00
|
|
|
// Next returns the position and length of the next chunk of data. If an error
|
|
|
|
// occurs while reading, the error is returned with a nil chunk. The state of
|
|
|
|
// the current chunk is undefined. When the last chunk has been returned, all
|
|
|
|
// subsequent calls yield a nil chunk and an io.EOF error.
|
|
|
|
func (c *Chunker) Next() (*Chunk, error) {
|
2015-04-05 22:22:19 +00:00
|
|
|
if c.tables == nil {
|
|
|
|
return nil, errors.New("polynomial is not set")
|
|
|
|
}
|
|
|
|
|
2014-09-11 18:43:12 +00:00
|
|
|
for {
|
|
|
|
if c.bpos >= c.bmax {
|
2015-02-08 21:54:45 +00:00
|
|
|
n, err := io.ReadFull(c.rd, c.buf[:])
|
2014-09-11 18:43:12 +00:00
|
|
|
|
|
|
|
if err == io.ErrUnexpectedEOF {
|
|
|
|
err = nil
|
|
|
|
}
|
|
|
|
|
2014-09-18 20:20:12 +00:00
|
|
|
// io.ReadFull only returns io.EOF when no bytes could be read. If
|
|
|
|
// this is the case and we're in this branch, there are no more
|
|
|
|
// bytes to buffer, so this was the last chunk. If a different
|
|
|
|
// error has occurred, return that error and abandon the current
|
|
|
|
// chunk.
|
|
|
|
if err == io.EOF && !c.closed {
|
|
|
|
c.closed = true
|
|
|
|
|
2015-05-04 22:45:29 +00:00
|
|
|
// return the buffer to the pool
|
|
|
|
bufPool.Put(c.buf)
|
|
|
|
|
2014-10-02 21:01:01 +00:00
|
|
|
// return current chunk, if any bytes have been processed
|
|
|
|
if c.count > 0 {
|
|
|
|
return &Chunk{
|
|
|
|
Start: c.start,
|
|
|
|
Length: c.count,
|
|
|
|
Cut: c.digest,
|
2015-02-08 21:54:45 +00:00
|
|
|
Digest: c.hashDigest(),
|
2014-10-02 21:01:01 +00:00
|
|
|
}, nil
|
|
|
|
}
|
2014-09-18 20:20:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2014-09-11 18:43:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
c.bpos = 0
|
2015-02-08 21:54:45 +00:00
|
|
|
c.bmax = uint(n)
|
2014-09-11 18:43:12 +00:00
|
|
|
}
|
|
|
|
|
2015-01-14 15:33:41 +00:00
|
|
|
// check if bytes have to be dismissed before starting a new chunk
|
|
|
|
if c.pre > 0 {
|
|
|
|
n := c.bmax - c.bpos
|
2015-02-08 21:54:45 +00:00
|
|
|
if c.pre > uint(n) {
|
|
|
|
c.pre -= uint(n)
|
|
|
|
c.updateHash(c.buf[c.bpos:c.bmax])
|
2015-01-14 15:33:41 +00:00
|
|
|
|
2015-02-08 21:54:45 +00:00
|
|
|
c.count += uint(n)
|
|
|
|
c.pos += uint(n)
|
2015-01-14 15:33:41 +00:00
|
|
|
c.bpos = c.bmax
|
2015-02-08 21:54:45 +00:00
|
|
|
|
2015-01-14 15:33:41 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2015-02-08 21:54:45 +00:00
|
|
|
c.updateHash(c.buf[c.bpos : c.bpos+c.pre])
|
|
|
|
|
2015-01-14 15:33:41 +00:00
|
|
|
c.bpos += c.pre
|
|
|
|
c.count += c.pre
|
|
|
|
c.pos += c.pre
|
|
|
|
c.pre = 0
|
|
|
|
}
|
|
|
|
|
2015-02-13 19:03:40 +00:00
|
|
|
add := c.count
|
|
|
|
for _, b := range c.buf[c.bpos:c.bmax] {
|
2014-09-11 18:43:12 +00:00
|
|
|
// inline c.slide(b) and append(b) to increase performance
|
|
|
|
out := c.window[c.wpos]
|
|
|
|
c.window[c.wpos] = b
|
2015-04-06 18:45:06 +00:00
|
|
|
c.digest ^= uint64(c.tables.out[out])
|
2015-05-02 14:19:16 +00:00
|
|
|
c.wpos = (c.wpos + 1) % windowSize
|
2014-09-11 18:43:12 +00:00
|
|
|
|
|
|
|
// c.append(b)
|
2015-04-24 23:39:32 +00:00
|
|
|
index := c.digest >> c.polShift
|
2014-09-11 18:43:12 +00:00
|
|
|
c.digest <<= 8
|
|
|
|
c.digest |= uint64(b)
|
|
|
|
|
2015-04-06 18:45:06 +00:00
|
|
|
c.digest ^= uint64(c.tables.mod[index])
|
2015-02-08 21:54:45 +00:00
|
|
|
// end inline
|
2014-09-11 18:43:12 +00:00
|
|
|
|
2015-02-13 19:03:40 +00:00
|
|
|
add++
|
|
|
|
if add < MinSize {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if (c.digest&splitmask) == 0 || add >= MaxSize {
|
|
|
|
i := add - c.count - 1
|
2015-02-08 21:54:45 +00:00
|
|
|
c.updateHash(c.buf[c.bpos : c.bpos+uint(i)+1])
|
2015-02-13 19:03:40 +00:00
|
|
|
c.count = add
|
2015-02-08 21:54:45 +00:00
|
|
|
c.pos += uint(i) + 1
|
|
|
|
c.bpos += uint(i) + 1
|
2014-09-11 18:43:12 +00:00
|
|
|
|
|
|
|
chunk := &Chunk{
|
|
|
|
Start: c.start,
|
|
|
|
Length: c.count,
|
|
|
|
Cut: c.digest,
|
2015-02-08 21:54:45 +00:00
|
|
|
Digest: c.hashDigest(),
|
2014-09-11 18:43:12 +00:00
|
|
|
}
|
|
|
|
|
2015-05-04 19:07:21 +00:00
|
|
|
c.reset()
|
2014-09-11 18:43:12 +00:00
|
|
|
|
|
|
|
return chunk, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
steps := c.bmax - c.bpos
|
2015-02-08 21:54:45 +00:00
|
|
|
if steps > 0 {
|
|
|
|
c.updateHash(c.buf[c.bpos : c.bpos+steps])
|
|
|
|
}
|
2014-09-11 18:43:12 +00:00
|
|
|
c.count += steps
|
|
|
|
c.pos += steps
|
|
|
|
c.bpos = c.bmax
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-02-08 21:54:45 +00:00
|
|
|
func (c *Chunker) updateHash(data []byte) {
|
|
|
|
if c.h != nil {
|
|
|
|
// the hashes from crypto/sha* do not return an error
|
|
|
|
_, err := c.h.Write(data)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Chunker) hashDigest() []byte {
|
|
|
|
if c.h == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return c.h.Sum(nil)
|
|
|
|
}
|
|
|
|
|
2014-11-23 15:48:00 +00:00
|
|
|
func (c *Chunker) append(b byte) {
|
2015-04-24 23:39:32 +00:00
|
|
|
index := c.digest >> c.polShift
|
2014-09-11 18:43:12 +00:00
|
|
|
c.digest <<= 8
|
|
|
|
c.digest |= uint64(b)
|
|
|
|
|
2015-04-06 18:45:06 +00:00
|
|
|
c.digest ^= uint64(c.tables.mod[index])
|
2014-09-11 18:43:12 +00:00
|
|
|
}
|
|
|
|
|
2014-11-23 15:48:00 +00:00
|
|
|
func (c *Chunker) slide(b byte) {
|
2014-09-11 18:43:12 +00:00
|
|
|
out := c.window[c.wpos]
|
|
|
|
c.window[c.wpos] = b
|
2015-04-06 18:45:06 +00:00
|
|
|
c.digest ^= uint64(c.tables.out[out])
|
2015-05-02 14:19:16 +00:00
|
|
|
c.wpos = (c.wpos + 1) % windowSize
|
2014-09-11 18:43:12 +00:00
|
|
|
|
|
|
|
c.append(b)
|
|
|
|
}
|
|
|
|
|
2015-04-24 23:39:32 +00:00
|
|
|
func appendByte(hash Pol, b byte, pol Pol) Pol {
|
2014-09-11 18:43:12 +00:00
|
|
|
hash <<= 8
|
2015-04-06 18:45:06 +00:00
|
|
|
hash |= Pol(b)
|
2014-09-11 18:43:12 +00:00
|
|
|
|
2015-04-06 18:45:06 +00:00
|
|
|
return hash.Mod(pol)
|
2014-09-11 18:43:12 +00:00
|
|
|
}
|