mirror of
https://github.com/octoleo/syncthing.git
synced 2024-11-09 14:50:56 +00:00
vendor: Add dependencies for discosrv
This commit is contained in:
parent
eacae83886
commit
f9e2623fdc
27
vendor/github.com/cznic/b/LICENSE
generated
vendored
Normal file
27
vendor/github.com/cznic/b/LICENSE
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
Copyright (c) 2014 The b Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the names of the authors nor the names of the
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
929
vendor/github.com/cznic/b/btree.go
generated
vendored
Normal file
929
vendor/github.com/cznic/b/btree.go
generated
vendored
Normal file
@ -0,0 +1,929 @@
|
||||
// Copyright 2014 The b Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package b
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
kx = 32 //TODO benchmark tune this number if using custom key/value type(s).
|
||||
kd = 32 //TODO benchmark tune this number if using custom key/value type(s).
|
||||
)
|
||||
|
||||
func init() {
|
||||
if kd < 1 {
|
||||
panic(fmt.Errorf("kd %d: out of range", kd))
|
||||
}
|
||||
|
||||
if kx < 2 {
|
||||
panic(fmt.Errorf("kx %d: out of range", kx))
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
btDPool = sync.Pool{New: func() interface{} { return &d{} }}
|
||||
btEPool = btEpool{sync.Pool{New: func() interface{} { return &Enumerator{} }}}
|
||||
btTPool = btTpool{sync.Pool{New: func() interface{} { return &Tree{} }}}
|
||||
btXPool = sync.Pool{New: func() interface{} { return &x{} }}
|
||||
)
|
||||
|
||||
type btTpool struct{ sync.Pool }
|
||||
|
||||
func (p *btTpool) get(cmp Cmp) *Tree {
|
||||
x := p.Get().(*Tree)
|
||||
x.cmp = cmp
|
||||
return x
|
||||
}
|
||||
|
||||
type btEpool struct{ sync.Pool }
|
||||
|
||||
func (p *btEpool) get(err error, hit bool, i int, k interface{} /*K*/, q *d, t *Tree, ver int64) *Enumerator {
|
||||
x := p.Get().(*Enumerator)
|
||||
x.err, x.hit, x.i, x.k, x.q, x.t, x.ver = err, hit, i, k, q, t, ver
|
||||
return x
|
||||
}
|
||||
|
||||
type (
|
||||
// Cmp compares a and b. Return value is:
|
||||
//
|
||||
// < 0 if a < b
|
||||
// 0 if a == b
|
||||
// > 0 if a > b
|
||||
//
|
||||
Cmp func(a, b interface{} /*K*/) int
|
||||
|
||||
d struct { // data page
|
||||
c int
|
||||
d [2*kd + 1]de
|
||||
n *d
|
||||
p *d
|
||||
}
|
||||
|
||||
de struct { // d element
|
||||
k interface{} /*K*/
|
||||
v interface{} /*V*/
|
||||
}
|
||||
|
||||
// Enumerator captures the state of enumerating a tree. It is returned
|
||||
// from the Seek* methods. The enumerator is aware of any mutations
|
||||
// made to the tree in the process of enumerating it and automatically
|
||||
// resumes the enumeration at the proper key, if possible.
|
||||
//
|
||||
// However, once an Enumerator returns io.EOF to signal "no more
|
||||
// items", it does no more attempt to "resync" on tree mutation(s). In
|
||||
// other words, io.EOF from an Enumerator is "sticky" (idempotent).
|
||||
Enumerator struct {
|
||||
err error
|
||||
hit bool
|
||||
i int
|
||||
k interface{} /*K*/
|
||||
q *d
|
||||
t *Tree
|
||||
ver int64
|
||||
}
|
||||
|
||||
// Tree is a B+tree.
|
||||
Tree struct {
|
||||
c int
|
||||
cmp Cmp
|
||||
first *d
|
||||
last *d
|
||||
r interface{}
|
||||
ver int64
|
||||
}
|
||||
|
||||
xe struct { // x element
|
||||
ch interface{}
|
||||
k interface{} /*K*/
|
||||
}
|
||||
|
||||
x struct { // index page
|
||||
c int
|
||||
x [2*kx + 2]xe
|
||||
}
|
||||
)
|
||||
|
||||
var ( // R/O zero values
|
||||
zd d
|
||||
zde de
|
||||
ze Enumerator
|
||||
zk interface{} /*K*/
|
||||
zt Tree
|
||||
zx x
|
||||
zxe xe
|
||||
)
|
||||
|
||||
func clr(q interface{}) {
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
for i := 0; i <= x.c; i++ { // Ch0 Sep0 ... Chn-1 Sepn-1 Chn
|
||||
clr(x.x[i].ch)
|
||||
}
|
||||
*x = zx
|
||||
btXPool.Put(x)
|
||||
case *d:
|
||||
*x = zd
|
||||
btDPool.Put(x)
|
||||
}
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------- x
|
||||
|
||||
func newX(ch0 interface{}) *x {
|
||||
r := btXPool.Get().(*x)
|
||||
r.x[0].ch = ch0
|
||||
return r
|
||||
}
|
||||
|
||||
func (q *x) extract(i int) {
|
||||
q.c--
|
||||
if i < q.c {
|
||||
copy(q.x[i:], q.x[i+1:q.c+1])
|
||||
q.x[q.c].ch = q.x[q.c+1].ch
|
||||
q.x[q.c].k = zk // GC
|
||||
q.x[q.c+1] = zxe // GC
|
||||
}
|
||||
}
|
||||
|
||||
func (q *x) insert(i int, k interface{} /*K*/, ch interface{}) *x {
|
||||
c := q.c
|
||||
if i < c {
|
||||
q.x[c+1].ch = q.x[c].ch
|
||||
copy(q.x[i+2:], q.x[i+1:c])
|
||||
q.x[i+1].k = q.x[i].k
|
||||
}
|
||||
c++
|
||||
q.c = c
|
||||
q.x[i].k = k
|
||||
q.x[i+1].ch = ch
|
||||
return q
|
||||
}
|
||||
|
||||
func (q *x) siblings(i int) (l, r *d) {
|
||||
if i >= 0 {
|
||||
if i > 0 {
|
||||
l = q.x[i-1].ch.(*d)
|
||||
}
|
||||
if i < q.c {
|
||||
r = q.x[i+1].ch.(*d)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------- d
|
||||
|
||||
func (l *d) mvL(r *d, c int) {
|
||||
copy(l.d[l.c:], r.d[:c])
|
||||
copy(r.d[:], r.d[c:r.c])
|
||||
l.c += c
|
||||
r.c -= c
|
||||
}
|
||||
|
||||
func (l *d) mvR(r *d, c int) {
|
||||
copy(r.d[c:], r.d[:r.c])
|
||||
copy(r.d[:c], l.d[l.c-c:])
|
||||
r.c += c
|
||||
l.c -= c
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------- Tree
|
||||
|
||||
// TreeNew returns a newly created, empty Tree. The compare function is used
|
||||
// for key collation.
|
||||
func TreeNew(cmp Cmp) *Tree {
|
||||
return btTPool.get(cmp)
|
||||
}
|
||||
|
||||
// Clear removes all K/V pairs from the tree.
|
||||
func (t *Tree) Clear() {
|
||||
if t.r == nil {
|
||||
return
|
||||
}
|
||||
|
||||
clr(t.r)
|
||||
t.c, t.first, t.last, t.r = 0, nil, nil, nil
|
||||
t.ver++
|
||||
}
|
||||
|
||||
// Close performs Clear and recycles t to a pool for possible later reuse. No
|
||||
// references to t should exist or such references must not be used afterwards.
|
||||
func (t *Tree) Close() {
|
||||
t.Clear()
|
||||
*t = zt
|
||||
btTPool.Put(t)
|
||||
}
|
||||
|
||||
func (t *Tree) cat(p *x, q, r *d, pi int) {
|
||||
t.ver++
|
||||
q.mvL(r, r.c)
|
||||
if r.n != nil {
|
||||
r.n.p = q
|
||||
} else {
|
||||
t.last = q
|
||||
}
|
||||
q.n = r.n
|
||||
*r = zd
|
||||
btDPool.Put(r)
|
||||
if p.c > 1 {
|
||||
p.extract(pi)
|
||||
p.x[pi].ch = q
|
||||
return
|
||||
}
|
||||
|
||||
switch x := t.r.(type) {
|
||||
case *x:
|
||||
*x = zx
|
||||
btXPool.Put(x)
|
||||
case *d:
|
||||
*x = zd
|
||||
btDPool.Put(x)
|
||||
}
|
||||
t.r = q
|
||||
}
|
||||
|
||||
func (t *Tree) catX(p, q, r *x, pi int) {
|
||||
t.ver++
|
||||
q.x[q.c].k = p.x[pi].k
|
||||
copy(q.x[q.c+1:], r.x[:r.c])
|
||||
q.c += r.c + 1
|
||||
q.x[q.c].ch = r.x[r.c].ch
|
||||
*r = zx
|
||||
btXPool.Put(r)
|
||||
if p.c > 1 {
|
||||
p.c--
|
||||
pc := p.c
|
||||
if pi < pc {
|
||||
p.x[pi].k = p.x[pi+1].k
|
||||
copy(p.x[pi+1:], p.x[pi+2:pc+1])
|
||||
p.x[pc].ch = p.x[pc+1].ch
|
||||
p.x[pc].k = zk // GC
|
||||
p.x[pc+1].ch = nil // GC
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
switch x := t.r.(type) {
|
||||
case *x:
|
||||
*x = zx
|
||||
btXPool.Put(x)
|
||||
case *d:
|
||||
*x = zd
|
||||
btDPool.Put(x)
|
||||
}
|
||||
t.r = q
|
||||
}
|
||||
|
||||
// Delete removes the k's KV pair, if it exists, in which case Delete returns
|
||||
// true.
|
||||
func (t *Tree) Delete(k interface{} /*K*/) (ok bool) {
|
||||
pi := -1
|
||||
var p *x
|
||||
q := t.r
|
||||
if q == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
for {
|
||||
var i int
|
||||
i, ok = t.find(q, k)
|
||||
if ok {
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
if x.c < kx && q != t.r {
|
||||
x, i = t.underflowX(p, x, pi, i)
|
||||
}
|
||||
pi = i + 1
|
||||
p = x
|
||||
q = x.x[pi].ch
|
||||
ok = false
|
||||
continue
|
||||
case *d:
|
||||
t.extract(x, i)
|
||||
if x.c >= kd {
|
||||
return true
|
||||
}
|
||||
|
||||
if q != t.r {
|
||||
t.underflow(p, x, pi)
|
||||
} else if t.c == 0 {
|
||||
t.Clear()
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
if x.c < kx && q != t.r {
|
||||
x, i = t.underflowX(p, x, pi, i)
|
||||
}
|
||||
pi = i
|
||||
p = x
|
||||
q = x.x[i].ch
|
||||
case *d:
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tree) extract(q *d, i int) { // (r interface{} /*V*/) {
|
||||
t.ver++
|
||||
//r = q.d[i].v // prepared for Extract
|
||||
q.c--
|
||||
if i < q.c {
|
||||
copy(q.d[i:], q.d[i+1:q.c+1])
|
||||
}
|
||||
q.d[q.c] = zde // GC
|
||||
t.c--
|
||||
return
|
||||
}
|
||||
|
||||
func (t *Tree) find(q interface{}, k interface{} /*K*/) (i int, ok bool) {
|
||||
var mk interface{} /*K*/
|
||||
l := 0
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
h := x.c - 1
|
||||
for l <= h {
|
||||
m := (l + h) >> 1
|
||||
mk = x.x[m].k
|
||||
switch cmp := t.cmp(k, mk); {
|
||||
case cmp > 0:
|
||||
l = m + 1
|
||||
case cmp == 0:
|
||||
return m, true
|
||||
default:
|
||||
h = m - 1
|
||||
}
|
||||
}
|
||||
case *d:
|
||||
h := x.c - 1
|
||||
for l <= h {
|
||||
m := (l + h) >> 1
|
||||
mk = x.d[m].k
|
||||
switch cmp := t.cmp(k, mk); {
|
||||
case cmp > 0:
|
||||
l = m + 1
|
||||
case cmp == 0:
|
||||
return m, true
|
||||
default:
|
||||
h = m - 1
|
||||
}
|
||||
}
|
||||
}
|
||||
return l, false
|
||||
}
|
||||
|
||||
// First returns the first item of the tree in the key collating order, or
|
||||
// (zero-value, zero-value) if the tree is empty.
|
||||
func (t *Tree) First() (k interface{} /*K*/, v interface{} /*V*/) {
|
||||
if q := t.first; q != nil {
|
||||
q := &q.d[0]
|
||||
k, v = q.k, q.v
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Get returns the value associated with k and true if it exists. Otherwise Get
|
||||
// returns (zero-value, false).
|
||||
func (t *Tree) Get(k interface{} /*K*/) (v interface{} /*V*/, ok bool) {
|
||||
q := t.r
|
||||
if q == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
var i int
|
||||
if i, ok = t.find(q, k); ok {
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
q = x.x[i+1].ch
|
||||
continue
|
||||
case *d:
|
||||
return x.d[i].v, true
|
||||
}
|
||||
}
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
q = x.x[i].ch
|
||||
default:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tree) insert(q *d, i int, k interface{} /*K*/, v interface{} /*V*/) *d {
|
||||
t.ver++
|
||||
c := q.c
|
||||
if i < c {
|
||||
copy(q.d[i+1:], q.d[i:c])
|
||||
}
|
||||
c++
|
||||
q.c = c
|
||||
q.d[i].k, q.d[i].v = k, v
|
||||
t.c++
|
||||
return q
|
||||
}
|
||||
|
||||
// Last returns the last item of the tree in the key collating order, or
|
||||
// (zero-value, zero-value) if the tree is empty.
|
||||
func (t *Tree) Last() (k interface{} /*K*/, v interface{} /*V*/) {
|
||||
if q := t.last; q != nil {
|
||||
q := &q.d[q.c-1]
|
||||
k, v = q.k, q.v
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Len returns the number of items in the tree.
|
||||
func (t *Tree) Len() int {
|
||||
return t.c
|
||||
}
|
||||
|
||||
func (t *Tree) overflow(p *x, q *d, pi, i int, k interface{} /*K*/, v interface{} /*V*/) {
|
||||
t.ver++
|
||||
l, r := p.siblings(pi)
|
||||
|
||||
if l != nil && l.c < 2*kd && i != 0 {
|
||||
l.mvL(q, 1)
|
||||
t.insert(q, i-1, k, v)
|
||||
p.x[pi-1].k = q.d[0].k
|
||||
return
|
||||
}
|
||||
|
||||
if r != nil && r.c < 2*kd {
|
||||
if i < 2*kd {
|
||||
q.mvR(r, 1)
|
||||
t.insert(q, i, k, v)
|
||||
p.x[pi].k = r.d[0].k
|
||||
return
|
||||
}
|
||||
|
||||
t.insert(r, 0, k, v)
|
||||
p.x[pi].k = k
|
||||
return
|
||||
}
|
||||
|
||||
t.split(p, q, pi, i, k, v)
|
||||
}
|
||||
|
||||
// Seek returns an Enumerator positioned on an item such that k >= item's key.
|
||||
// ok reports if k == item.key The Enumerator's position is possibly after the
|
||||
// last item in the tree.
|
||||
func (t *Tree) Seek(k interface{} /*K*/) (e *Enumerator, ok bool) {
|
||||
q := t.r
|
||||
if q == nil {
|
||||
e = btEPool.get(nil, false, 0, k, nil, t, t.ver)
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
var i int
|
||||
if i, ok = t.find(q, k); ok {
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
q = x.x[i+1].ch
|
||||
continue
|
||||
case *d:
|
||||
return btEPool.get(nil, ok, i, k, x, t, t.ver), true
|
||||
}
|
||||
}
|
||||
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
q = x.x[i].ch
|
||||
case *d:
|
||||
return btEPool.get(nil, ok, i, k, x, t, t.ver), false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SeekFirst returns an enumerator positioned on the first KV pair in the tree,
|
||||
// if any. For an empty tree, err == io.EOF is returned and e will be nil.
|
||||
func (t *Tree) SeekFirst() (e *Enumerator, err error) {
|
||||
q := t.first
|
||||
if q == nil {
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
return btEPool.get(nil, true, 0, q.d[0].k, q, t, t.ver), nil
|
||||
}
|
||||
|
||||
// SeekLast returns an enumerator positioned on the last KV pair in the tree,
|
||||
// if any. For an empty tree, err == io.EOF is returned and e will be nil.
|
||||
func (t *Tree) SeekLast() (e *Enumerator, err error) {
|
||||
q := t.last
|
||||
if q == nil {
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
return btEPool.get(nil, true, q.c-1, q.d[q.c-1].k, q, t, t.ver), nil
|
||||
}
|
||||
|
||||
// Set sets the value associated with k.
|
||||
func (t *Tree) Set(k interface{} /*K*/, v interface{} /*V*/) {
|
||||
//dbg("--- PRE Set(%v, %v)\n%s", k, v, t.dump())
|
||||
//defer func() {
|
||||
// dbg("--- POST\n%s\n====\n", t.dump())
|
||||
//}()
|
||||
|
||||
pi := -1
|
||||
var p *x
|
||||
q := t.r
|
||||
if q == nil {
|
||||
z := t.insert(btDPool.Get().(*d), 0, k, v)
|
||||
t.r, t.first, t.last = z, z, z
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
i, ok := t.find(q, k)
|
||||
if ok {
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
if x.c > 2*kx {
|
||||
x, i = t.splitX(p, x, pi, i)
|
||||
}
|
||||
pi = i + 1
|
||||
p = x
|
||||
q = x.x[i+1].ch
|
||||
continue
|
||||
case *d:
|
||||
x.d[i].v = v
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
if x.c > 2*kx {
|
||||
x, i = t.splitX(p, x, pi, i)
|
||||
}
|
||||
pi = i
|
||||
p = x
|
||||
q = x.x[i].ch
|
||||
case *d:
|
||||
switch {
|
||||
case x.c < 2*kd:
|
||||
t.insert(x, i, k, v)
|
||||
default:
|
||||
t.overflow(p, x, pi, i, k, v)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Put combines Get and Set in a more efficient way where the tree is walked
|
||||
// only once. The upd(ater) receives (old-value, true) if a KV pair for k
|
||||
// exists or (zero-value, false) otherwise. It can then return a (new-value,
|
||||
// true) to create or overwrite the existing value in the KV pair, or
|
||||
// (whatever, false) if it decides not to create or not to update the value of
|
||||
// the KV pair.
|
||||
//
|
||||
// tree.Set(k, v) call conceptually equals calling
|
||||
//
|
||||
// tree.Put(k, func(interface{} /*K*/, bool){ return v, true })
|
||||
//
|
||||
// modulo the differing return values.
|
||||
func (t *Tree) Put(k interface{} /*K*/, upd func(oldV interface{} /*V*/, exists bool) (newV interface{} /*V*/, write bool)) (oldV interface{} /*V*/, written bool) {
|
||||
pi := -1
|
||||
var p *x
|
||||
q := t.r
|
||||
var newV interface{} /*V*/
|
||||
if q == nil {
|
||||
// new KV pair in empty tree
|
||||
newV, written = upd(newV, false)
|
||||
if !written {
|
||||
return
|
||||
}
|
||||
|
||||
z := t.insert(btDPool.Get().(*d), 0, k, newV)
|
||||
t.r, t.first, t.last = z, z, z
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
i, ok := t.find(q, k)
|
||||
if ok {
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
if x.c > 2*kx {
|
||||
x, i = t.splitX(p, x, pi, i)
|
||||
}
|
||||
pi = i + 1
|
||||
p = x
|
||||
q = x.x[i+1].ch
|
||||
continue
|
||||
case *d:
|
||||
oldV = x.d[i].v
|
||||
newV, written = upd(oldV, true)
|
||||
if !written {
|
||||
return
|
||||
}
|
||||
|
||||
x.d[i].v = newV
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
if x.c > 2*kx {
|
||||
x, i = t.splitX(p, x, pi, i)
|
||||
}
|
||||
pi = i
|
||||
p = x
|
||||
q = x.x[i].ch
|
||||
case *d: // new KV pair
|
||||
newV, written = upd(newV, false)
|
||||
if !written {
|
||||
return
|
||||
}
|
||||
|
||||
switch {
|
||||
case x.c < 2*kd:
|
||||
t.insert(x, i, k, newV)
|
||||
default:
|
||||
t.overflow(p, x, pi, i, k, newV)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tree) split(p *x, q *d, pi, i int, k interface{} /*K*/, v interface{} /*V*/) {
|
||||
t.ver++
|
||||
r := btDPool.Get().(*d)
|
||||
if q.n != nil {
|
||||
r.n = q.n
|
||||
r.n.p = r
|
||||
} else {
|
||||
t.last = r
|
||||
}
|
||||
q.n = r
|
||||
r.p = q
|
||||
|
||||
copy(r.d[:], q.d[kd:2*kd])
|
||||
for i := range q.d[kd:] {
|
||||
q.d[kd+i] = zde
|
||||
}
|
||||
q.c = kd
|
||||
r.c = kd
|
||||
var done bool
|
||||
if i > kd {
|
||||
done = true
|
||||
t.insert(r, i-kd, k, v)
|
||||
}
|
||||
if pi >= 0 {
|
||||
p.insert(pi, r.d[0].k, r)
|
||||
} else {
|
||||
t.r = newX(q).insert(0, r.d[0].k, r)
|
||||
}
|
||||
if done {
|
||||
return
|
||||
}
|
||||
|
||||
t.insert(q, i, k, v)
|
||||
}
|
||||
|
||||
func (t *Tree) splitX(p *x, q *x, pi int, i int) (*x, int) {
|
||||
t.ver++
|
||||
r := btXPool.Get().(*x)
|
||||
copy(r.x[:], q.x[kx+1:])
|
||||
q.c = kx
|
||||
r.c = kx
|
||||
if pi >= 0 {
|
||||
p.insert(pi, q.x[kx].k, r)
|
||||
q.x[kx].k = zk
|
||||
for i := range q.x[kx+1:] {
|
||||
q.x[kx+i+1] = zxe
|
||||
}
|
||||
|
||||
switch {
|
||||
case i < kx:
|
||||
return q, i
|
||||
case i == kx:
|
||||
return p, pi
|
||||
default: // i > kx
|
||||
return r, i - kx - 1
|
||||
}
|
||||
}
|
||||
|
||||
nr := newX(q).insert(0, q.x[kx].k, r)
|
||||
t.r = nr
|
||||
q.x[kx].k = zk
|
||||
for i := range q.x[kx+1:] {
|
||||
q.x[kx+i+1] = zxe
|
||||
}
|
||||
|
||||
switch {
|
||||
case i < kx:
|
||||
return q, i
|
||||
case i == kx:
|
||||
return nr, 0
|
||||
default: // i > kx
|
||||
return r, i - kx - 1
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tree) underflow(p *x, q *d, pi int) {
|
||||
t.ver++
|
||||
l, r := p.siblings(pi)
|
||||
|
||||
if l != nil && l.c+q.c >= 2*kd {
|
||||
l.mvR(q, 1)
|
||||
p.x[pi-1].k = q.d[0].k
|
||||
return
|
||||
}
|
||||
|
||||
if r != nil && q.c+r.c >= 2*kd {
|
||||
q.mvL(r, 1)
|
||||
p.x[pi].k = r.d[0].k
|
||||
r.d[r.c] = zde // GC
|
||||
return
|
||||
}
|
||||
|
||||
if l != nil {
|
||||
t.cat(p, l, q, pi-1)
|
||||
return
|
||||
}
|
||||
|
||||
t.cat(p, q, r, pi)
|
||||
}
|
||||
|
||||
func (t *Tree) underflowX(p *x, q *x, pi int, i int) (*x, int) {
|
||||
t.ver++
|
||||
var l, r *x
|
||||
|
||||
if pi >= 0 {
|
||||
if pi > 0 {
|
||||
l = p.x[pi-1].ch.(*x)
|
||||
}
|
||||
if pi < p.c {
|
||||
r = p.x[pi+1].ch.(*x)
|
||||
}
|
||||
}
|
||||
|
||||
if l != nil && l.c > kx {
|
||||
q.x[q.c+1].ch = q.x[q.c].ch
|
||||
copy(q.x[1:], q.x[:q.c])
|
||||
q.x[0].ch = l.x[l.c].ch
|
||||
q.x[0].k = p.x[pi-1].k
|
||||
q.c++
|
||||
i++
|
||||
l.c--
|
||||
p.x[pi-1].k = l.x[l.c].k
|
||||
return q, i
|
||||
}
|
||||
|
||||
if r != nil && r.c > kx {
|
||||
q.x[q.c].k = p.x[pi].k
|
||||
q.c++
|
||||
q.x[q.c].ch = r.x[0].ch
|
||||
p.x[pi].k = r.x[0].k
|
||||
copy(r.x[:], r.x[1:r.c])
|
||||
r.c--
|
||||
rc := r.c
|
||||
r.x[rc].ch = r.x[rc+1].ch
|
||||
r.x[rc].k = zk
|
||||
r.x[rc+1].ch = nil
|
||||
return q, i
|
||||
}
|
||||
|
||||
if l != nil {
|
||||
i += l.c + 1
|
||||
t.catX(p, l, q, pi-1)
|
||||
q = l
|
||||
return q, i
|
||||
}
|
||||
|
||||
t.catX(p, q, r, pi)
|
||||
return q, i
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------- Enumerator
|
||||
|
||||
// Close recycles e to a pool for possible later reuse. No references to e
|
||||
// should exist or such references must not be used afterwards.
|
||||
func (e *Enumerator) Close() {
|
||||
*e = ze
|
||||
btEPool.Put(e)
|
||||
}
|
||||
|
||||
// Next returns the currently enumerated item, if it exists and moves to the
|
||||
// next item in the key collation order. If there is no item to return, err ==
|
||||
// io.EOF is returned.
|
||||
func (e *Enumerator) Next() (k interface{} /*K*/, v interface{} /*V*/, err error) {
|
||||
if err = e.err; err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if e.ver != e.t.ver {
|
||||
f, hit := e.t.Seek(e.k)
|
||||
if !e.hit && hit {
|
||||
if err = f.next(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
*e = *f
|
||||
f.Close()
|
||||
}
|
||||
if e.q == nil {
|
||||
e.err, err = io.EOF, io.EOF
|
||||
return
|
||||
}
|
||||
|
||||
if e.i >= e.q.c {
|
||||
if err = e.next(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
i := e.q.d[e.i]
|
||||
k, v = i.k, i.v
|
||||
e.k, e.hit = k, false
|
||||
e.next()
|
||||
return
|
||||
}
|
||||
|
||||
func (e *Enumerator) next() error {
|
||||
if e.q == nil {
|
||||
e.err = io.EOF
|
||||
return io.EOF
|
||||
}
|
||||
|
||||
switch {
|
||||
case e.i < e.q.c-1:
|
||||
e.i++
|
||||
default:
|
||||
if e.q, e.i = e.q.n, 0; e.q == nil {
|
||||
e.err = io.EOF
|
||||
}
|
||||
}
|
||||
return e.err
|
||||
}
|
||||
|
||||
// Prev returns the currently enumerated item, if it exists and moves to the
|
||||
// previous item in the key collation order. If there is no item to return, err
|
||||
// == io.EOF is returned.
|
||||
func (e *Enumerator) Prev() (k interface{} /*K*/, v interface{} /*V*/, err error) {
|
||||
if err = e.err; err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if e.ver != e.t.ver {
|
||||
f, hit := e.t.Seek(e.k)
|
||||
if !e.hit && hit {
|
||||
if err = f.prev(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
*e = *f
|
||||
f.Close()
|
||||
}
|
||||
if e.q == nil {
|
||||
e.err, err = io.EOF, io.EOF
|
||||
return
|
||||
}
|
||||
|
||||
if e.i >= e.q.c {
|
||||
if err = e.next(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
i := e.q.d[e.i]
|
||||
k, v = i.k, i.v
|
||||
e.k, e.hit = k, false
|
||||
e.prev()
|
||||
return
|
||||
}
|
||||
|
||||
func (e *Enumerator) prev() error {
|
||||
if e.q == nil {
|
||||
e.err = io.EOF
|
||||
return io.EOF
|
||||
}
|
||||
|
||||
switch {
|
||||
case e.i > 0:
|
||||
e.i--
|
||||
default:
|
||||
if e.q = e.q.p; e.q == nil {
|
||||
e.err = io.EOF
|
||||
break
|
||||
}
|
||||
|
||||
e.i = e.q.c - 1
|
||||
}
|
||||
return e.err
|
||||
}
|
53
vendor/github.com/cznic/b/doc.go
generated
vendored
Normal file
53
vendor/github.com/cznic/b/doc.go
generated
vendored
Normal file
@ -0,0 +1,53 @@
|
||||
// Copyright 2014 The b Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package b implements the B+tree flavor of a BTree.
|
||||
//
|
||||
// Changelog
|
||||
//
|
||||
// 2014-06-26: Lower GC presure by recycling things.
|
||||
//
|
||||
// 2014-04-18: Added new method Put.
|
||||
//
|
||||
// Generic types
|
||||
//
|
||||
// Keys and their associated values are interface{} typed, similar to all of
|
||||
// the containers in the standard library.
|
||||
//
|
||||
// Semiautomatic production of a type specific variant of this package is
|
||||
// supported via
|
||||
//
|
||||
// $ make generic
|
||||
//
|
||||
// This command will write to stdout a version of the btree.go file where every
|
||||
// key type occurrence is replaced by the word 'KEY' and every value type
|
||||
// occurrence is replaced by the word 'VALUE'. Then you have to replace these
|
||||
// tokens with your desired type(s), using any technique you're comfortable
|
||||
// with.
|
||||
//
|
||||
// This is how, for example, 'example/int.go' was created:
|
||||
//
|
||||
// $ mkdir example
|
||||
// $ make generic | sed -e 's/KEY/int/g' -e 's/VALUE/int/g' > example/int.go
|
||||
//
|
||||
// No other changes to int.go are necessary, it compiles just fine.
|
||||
//
|
||||
// Running the benchmarks for 1000 keys on a machine with Intel i5-4670 CPU @
|
||||
// 3.4GHz, Go release 1.4.2.
|
||||
//
|
||||
// $ go test -bench 1e3 example/all_test.go example/int.go
|
||||
// PASS
|
||||
// BenchmarkSetSeq1e3 10000 151620 ns/op
|
||||
// BenchmarkGetSeq1e3 10000 115354 ns/op
|
||||
// BenchmarkSetRnd1e3 5000 255865 ns/op
|
||||
// BenchmarkGetRnd1e3 10000 140466 ns/op
|
||||
// BenchmarkDelSeq1e3 10000 143860 ns/op
|
||||
// BenchmarkDelRnd1e3 10000 188228 ns/op
|
||||
// BenchmarkSeekSeq1e3 10000 156448 ns/op
|
||||
// BenchmarkSeekRnd1e3 10000 190587 ns/op
|
||||
// BenchmarkNext1e3 200000 9407 ns/op
|
||||
// BenchmarkPrev1e3 200000 9306 ns/op
|
||||
// ok command-line-arguments 26.369s
|
||||
// $
|
||||
package b
|
929
vendor/github.com/cznic/b/example/int.go
generated
vendored
Normal file
929
vendor/github.com/cznic/b/example/int.go
generated
vendored
Normal file
@ -0,0 +1,929 @@
|
||||
// Copyright 2014 The b Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package b
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
kx = 32 //TODO benchmark tune this number if using custom key/value type(s).
|
||||
kd = 32 //TODO benchmark tune this number if using custom key/value type(s).
|
||||
)
|
||||
|
||||
func init() {
|
||||
if kd < 1 {
|
||||
panic(fmt.Errorf("kd %d: out of range", kd))
|
||||
}
|
||||
|
||||
if kx < 2 {
|
||||
panic(fmt.Errorf("kx %d: out of range", kx))
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
btDPool = sync.Pool{New: func() interface{} { return &d{} }}
|
||||
btEPool = btEpool{sync.Pool{New: func() interface{} { return &Enumerator{} }}}
|
||||
btTPool = btTpool{sync.Pool{New: func() interface{} { return &Tree{} }}}
|
||||
btXPool = sync.Pool{New: func() interface{} { return &x{} }}
|
||||
)
|
||||
|
||||
type btTpool struct{ sync.Pool }
|
||||
|
||||
func (p *btTpool) get(cmp Cmp) *Tree {
|
||||
x := p.Get().(*Tree)
|
||||
x.cmp = cmp
|
||||
return x
|
||||
}
|
||||
|
||||
type btEpool struct{ sync.Pool }
|
||||
|
||||
func (p *btEpool) get(err error, hit bool, i int, k int, q *d, t *Tree, ver int64) *Enumerator {
|
||||
x := p.Get().(*Enumerator)
|
||||
x.err, x.hit, x.i, x.k, x.q, x.t, x.ver = err, hit, i, k, q, t, ver
|
||||
return x
|
||||
}
|
||||
|
||||
type (
|
||||
// Cmp compares a and b. Return value is:
|
||||
//
|
||||
// < 0 if a < b
|
||||
// 0 if a == b
|
||||
// > 0 if a > b
|
||||
//
|
||||
Cmp func(a, b int) int
|
||||
|
||||
d struct { // data page
|
||||
c int
|
||||
d [2*kd + 1]de
|
||||
n *d
|
||||
p *d
|
||||
}
|
||||
|
||||
de struct { // d element
|
||||
k int
|
||||
v int
|
||||
}
|
||||
|
||||
// Enumerator captures the state of enumerating a tree. It is returned
|
||||
// from the Seek* methods. The enumerator is aware of any mutations
|
||||
// made to the tree in the process of enumerating it and automatically
|
||||
// resumes the enumeration at the proper key, if possible.
|
||||
//
|
||||
// However, once an Enumerator returns io.EOF to signal "no more
|
||||
// items", it does no more attempt to "resync" on tree mutation(s). In
|
||||
// other words, io.EOF from an Enumaretor is "sticky" (idempotent).
|
||||
Enumerator struct {
|
||||
err error
|
||||
hit bool
|
||||
i int
|
||||
k int
|
||||
q *d
|
||||
t *Tree
|
||||
ver int64
|
||||
}
|
||||
|
||||
// Tree is a B+tree.
|
||||
Tree struct {
|
||||
c int
|
||||
cmp Cmp
|
||||
first *d
|
||||
last *d
|
||||
r interface{}
|
||||
ver int64
|
||||
}
|
||||
|
||||
xe struct { // x element
|
||||
ch interface{}
|
||||
k int
|
||||
}
|
||||
|
||||
x struct { // index page
|
||||
c int
|
||||
x [2*kx + 2]xe
|
||||
}
|
||||
)
|
||||
|
||||
var ( // R/O zero values
|
||||
zd d
|
||||
zde de
|
||||
ze Enumerator
|
||||
zk int
|
||||
zt Tree
|
||||
zx x
|
||||
zxe xe
|
||||
)
|
||||
|
||||
func clr(q interface{}) {
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
for i := 0; i <= x.c; i++ { // Ch0 Sep0 ... Chn-1 Sepn-1 Chn
|
||||
clr(x.x[i].ch)
|
||||
}
|
||||
*x = zx
|
||||
btXPool.Put(x)
|
||||
case *d:
|
||||
*x = zd
|
||||
btDPool.Put(x)
|
||||
}
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------- x
|
||||
|
||||
func newX(ch0 interface{}) *x {
|
||||
r := btXPool.Get().(*x)
|
||||
r.x[0].ch = ch0
|
||||
return r
|
||||
}
|
||||
|
||||
func (q *x) extract(i int) {
|
||||
q.c--
|
||||
if i < q.c {
|
||||
copy(q.x[i:], q.x[i+1:q.c+1])
|
||||
q.x[q.c].ch = q.x[q.c+1].ch
|
||||
q.x[q.c].k = zk // GC
|
||||
q.x[q.c+1] = zxe // GC
|
||||
}
|
||||
}
|
||||
|
||||
func (q *x) insert(i int, k int, ch interface{}) *x {
|
||||
c := q.c
|
||||
if i < c {
|
||||
q.x[c+1].ch = q.x[c].ch
|
||||
copy(q.x[i+2:], q.x[i+1:c])
|
||||
q.x[i+1].k = q.x[i].k
|
||||
}
|
||||
c++
|
||||
q.c = c
|
||||
q.x[i].k = k
|
||||
q.x[i+1].ch = ch
|
||||
return q
|
||||
}
|
||||
|
||||
func (q *x) siblings(i int) (l, r *d) {
|
||||
if i >= 0 {
|
||||
if i > 0 {
|
||||
l = q.x[i-1].ch.(*d)
|
||||
}
|
||||
if i < q.c {
|
||||
r = q.x[i+1].ch.(*d)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------- d
|
||||
|
||||
func (l *d) mvL(r *d, c int) {
|
||||
copy(l.d[l.c:], r.d[:c])
|
||||
copy(r.d[:], r.d[c:r.c])
|
||||
l.c += c
|
||||
r.c -= c
|
||||
}
|
||||
|
||||
func (l *d) mvR(r *d, c int) {
|
||||
copy(r.d[c:], r.d[:r.c])
|
||||
copy(r.d[:c], l.d[l.c-c:])
|
||||
r.c += c
|
||||
l.c -= c
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------- Tree
|
||||
|
||||
// TreeNew returns a newly created, empty Tree. The compare function is used
|
||||
// for key collation.
|
||||
func TreeNew(cmp Cmp) *Tree {
|
||||
return btTPool.get(cmp)
|
||||
}
|
||||
|
||||
// Clear removes all K/V pairs from the tree.
|
||||
func (t *Tree) Clear() {
|
||||
if t.r == nil {
|
||||
return
|
||||
}
|
||||
|
||||
clr(t.r)
|
||||
t.c, t.first, t.last, t.r = 0, nil, nil, nil
|
||||
t.ver++
|
||||
}
|
||||
|
||||
// Close performs Clear and recycles t to a pool for possible later reuse. No
|
||||
// references to t should exist or such references must not be used afterwards.
|
||||
func (t *Tree) Close() {
|
||||
t.Clear()
|
||||
*t = zt
|
||||
btTPool.Put(t)
|
||||
}
|
||||
|
||||
func (t *Tree) cat(p *x, q, r *d, pi int) {
|
||||
t.ver++
|
||||
q.mvL(r, r.c)
|
||||
if r.n != nil {
|
||||
r.n.p = q
|
||||
} else {
|
||||
t.last = q
|
||||
}
|
||||
q.n = r.n
|
||||
*r = zd
|
||||
btDPool.Put(r)
|
||||
if p.c > 1 {
|
||||
p.extract(pi)
|
||||
p.x[pi].ch = q
|
||||
return
|
||||
}
|
||||
|
||||
switch x := t.r.(type) {
|
||||
case *x:
|
||||
*x = zx
|
||||
btXPool.Put(x)
|
||||
case *d:
|
||||
*x = zd
|
||||
btDPool.Put(x)
|
||||
}
|
||||
t.r = q
|
||||
}
|
||||
|
||||
func (t *Tree) catX(p, q, r *x, pi int) {
|
||||
t.ver++
|
||||
q.x[q.c].k = p.x[pi].k
|
||||
copy(q.x[q.c+1:], r.x[:r.c])
|
||||
q.c += r.c + 1
|
||||
q.x[q.c].ch = r.x[r.c].ch
|
||||
*r = zx
|
||||
btXPool.Put(r)
|
||||
if p.c > 1 {
|
||||
p.c--
|
||||
pc := p.c
|
||||
if pi < pc {
|
||||
p.x[pi].k = p.x[pi+1].k
|
||||
copy(p.x[pi+1:], p.x[pi+2:pc+1])
|
||||
p.x[pc].ch = p.x[pc+1].ch
|
||||
p.x[pc].k = zk // GC
|
||||
p.x[pc+1].ch = nil // GC
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
switch x := t.r.(type) {
|
||||
case *x:
|
||||
*x = zx
|
||||
btXPool.Put(x)
|
||||
case *d:
|
||||
*x = zd
|
||||
btDPool.Put(x)
|
||||
}
|
||||
t.r = q
|
||||
}
|
||||
|
||||
// Delete removes the k's KV pair, if it exists, in which case Delete returns
|
||||
// true.
|
||||
func (t *Tree) Delete(k int) (ok bool) {
|
||||
pi := -1
|
||||
var p *x
|
||||
q := t.r
|
||||
if q == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
for {
|
||||
var i int
|
||||
i, ok = t.find(q, k)
|
||||
if ok {
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
if x.c < kx && q != t.r {
|
||||
x, i = t.underflowX(p, x, pi, i)
|
||||
}
|
||||
pi = i + 1
|
||||
p = x
|
||||
q = x.x[pi].ch
|
||||
ok = false
|
||||
continue
|
||||
case *d:
|
||||
t.extract(x, i)
|
||||
if x.c >= kd {
|
||||
return true
|
||||
}
|
||||
|
||||
if q != t.r {
|
||||
t.underflow(p, x, pi)
|
||||
} else if t.c == 0 {
|
||||
t.Clear()
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
if x.c < kx && q != t.r {
|
||||
x, i = t.underflowX(p, x, pi, i)
|
||||
}
|
||||
pi = i
|
||||
p = x
|
||||
q = x.x[i].ch
|
||||
case *d:
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tree) extract(q *d, i int) { // (r int) {
|
||||
t.ver++
|
||||
//r = q.d[i].v // prepared for Extract
|
||||
q.c--
|
||||
if i < q.c {
|
||||
copy(q.d[i:], q.d[i+1:q.c+1])
|
||||
}
|
||||
q.d[q.c] = zde // GC
|
||||
t.c--
|
||||
return
|
||||
}
|
||||
|
||||
func (t *Tree) find(q interface{}, k int) (i int, ok bool) {
|
||||
var mk int
|
||||
l := 0
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
h := x.c - 1
|
||||
for l <= h {
|
||||
m := (l + h) >> 1
|
||||
mk = x.x[m].k
|
||||
switch cmp := t.cmp(k, mk); {
|
||||
case cmp > 0:
|
||||
l = m + 1
|
||||
case cmp == 0:
|
||||
return m, true
|
||||
default:
|
||||
h = m - 1
|
||||
}
|
||||
}
|
||||
case *d:
|
||||
h := x.c - 1
|
||||
for l <= h {
|
||||
m := (l + h) >> 1
|
||||
mk = x.d[m].k
|
||||
switch cmp := t.cmp(k, mk); {
|
||||
case cmp > 0:
|
||||
l = m + 1
|
||||
case cmp == 0:
|
||||
return m, true
|
||||
default:
|
||||
h = m - 1
|
||||
}
|
||||
}
|
||||
}
|
||||
return l, false
|
||||
}
|
||||
|
||||
// First returns the first item of the tree in the key collating order, or
|
||||
// (zero-value, zero-value) if the tree is empty.
|
||||
func (t *Tree) First() (k int, v int) {
|
||||
if q := t.first; q != nil {
|
||||
q := &q.d[0]
|
||||
k, v = q.k, q.v
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Get returns the value associated with k and true if it exists. Otherwise Get
|
||||
// returns (zero-value, false).
|
||||
func (t *Tree) Get(k int) (v int, ok bool) {
|
||||
q := t.r
|
||||
if q == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
var i int
|
||||
if i, ok = t.find(q, k); ok {
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
q = x.x[i+1].ch
|
||||
continue
|
||||
case *d:
|
||||
return x.d[i].v, true
|
||||
}
|
||||
}
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
q = x.x[i].ch
|
||||
default:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tree) insert(q *d, i int, k int, v int) *d {
|
||||
t.ver++
|
||||
c := q.c
|
||||
if i < c {
|
||||
copy(q.d[i+1:], q.d[i:c])
|
||||
}
|
||||
c++
|
||||
q.c = c
|
||||
q.d[i].k, q.d[i].v = k, v
|
||||
t.c++
|
||||
return q
|
||||
}
|
||||
|
||||
// Last returns the last item of the tree in the key collating order, or
|
||||
// (zero-value, zero-value) if the tree is empty.
|
||||
func (t *Tree) Last() (k int, v int) {
|
||||
if q := t.last; q != nil {
|
||||
q := &q.d[q.c-1]
|
||||
k, v = q.k, q.v
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Len returns the number of items in the tree.
|
||||
func (t *Tree) Len() int {
|
||||
return t.c
|
||||
}
|
||||
|
||||
func (t *Tree) overflow(p *x, q *d, pi, i int, k int, v int) {
|
||||
t.ver++
|
||||
l, r := p.siblings(pi)
|
||||
|
||||
if l != nil && l.c < 2*kd {
|
||||
l.mvL(q, 1)
|
||||
t.insert(q, i-1, k, v)
|
||||
p.x[pi-1].k = q.d[0].k
|
||||
return
|
||||
}
|
||||
|
||||
if r != nil && r.c < 2*kd {
|
||||
if i < 2*kd {
|
||||
q.mvR(r, 1)
|
||||
t.insert(q, i, k, v)
|
||||
p.x[pi].k = r.d[0].k
|
||||
return
|
||||
}
|
||||
|
||||
t.insert(r, 0, k, v)
|
||||
p.x[pi].k = k
|
||||
return
|
||||
}
|
||||
|
||||
t.split(p, q, pi, i, k, v)
|
||||
}
|
||||
|
||||
// Seek returns an Enumerator positioned on a an item such that k >= item's
|
||||
// key. ok reports if k == item.key The Enumerator's position is possibly
|
||||
// after the last item in the tree.
|
||||
func (t *Tree) Seek(k int) (e *Enumerator, ok bool) {
|
||||
q := t.r
|
||||
if q == nil {
|
||||
e = btEPool.get(nil, false, 0, k, nil, t, t.ver)
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
var i int
|
||||
if i, ok = t.find(q, k); ok {
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
q = x.x[i+1].ch
|
||||
continue
|
||||
case *d:
|
||||
return btEPool.get(nil, ok, i, k, x, t, t.ver), true
|
||||
}
|
||||
}
|
||||
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
q = x.x[i].ch
|
||||
case *d:
|
||||
return btEPool.get(nil, ok, i, k, x, t, t.ver), false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SeekFirst returns an enumerator positioned on the first KV pair in the tree,
|
||||
// if any. For an empty tree, err == io.EOF is returned and e will be nil.
|
||||
func (t *Tree) SeekFirst() (e *Enumerator, err error) {
|
||||
q := t.first
|
||||
if q == nil {
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
return btEPool.get(nil, true, 0, q.d[0].k, q, t, t.ver), nil
|
||||
}
|
||||
|
||||
// SeekLast returns an enumerator positioned on the last KV pair in the tree,
|
||||
// if any. For an empty tree, err == io.EOF is returned and e will be nil.
|
||||
func (t *Tree) SeekLast() (e *Enumerator, err error) {
|
||||
q := t.last
|
||||
if q == nil {
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
return btEPool.get(nil, true, q.c-1, q.d[q.c-1].k, q, t, t.ver), nil
|
||||
}
|
||||
|
||||
// Set sets the value associated with k.
|
||||
func (t *Tree) Set(k int, v int) {
|
||||
//dbg("--- PRE Set(%v, %v)\n%s", k, v, t.dump())
|
||||
//defer func() {
|
||||
// dbg("--- POST\n%s\n====\n", t.dump())
|
||||
//}()
|
||||
|
||||
pi := -1
|
||||
var p *x
|
||||
q := t.r
|
||||
if q == nil {
|
||||
z := t.insert(btDPool.Get().(*d), 0, k, v)
|
||||
t.r, t.first, t.last = z, z, z
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
i, ok := t.find(q, k)
|
||||
if ok {
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
if x.c > 2*kx {
|
||||
x, i = t.splitX(p, x, pi, i)
|
||||
}
|
||||
pi = i + 1
|
||||
p = x
|
||||
q = x.x[i+1].ch
|
||||
continue
|
||||
case *d:
|
||||
x.d[i].v = v
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
if x.c > 2*kx {
|
||||
x, i = t.splitX(p, x, pi, i)
|
||||
}
|
||||
pi = i
|
||||
p = x
|
||||
q = x.x[i].ch
|
||||
case *d:
|
||||
switch {
|
||||
case x.c < 2*kd:
|
||||
t.insert(x, i, k, v)
|
||||
default:
|
||||
t.overflow(p, x, pi, i, k, v)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Put combines Get and Set in a more efficient way where the tree is walked
|
||||
// only once. The upd(ater) receives (old-value, true) if a KV pair for k
|
||||
// exists or (zero-value, false) otherwise. It can then return a (new-value,
|
||||
// true) to create or overwrite the existing value in the KV pair, or
|
||||
// (whatever, false) if it decides not to create or not to update the value of
|
||||
// the KV pair.
|
||||
//
|
||||
// tree.Set(k, v) call conceptually equals calling
|
||||
//
|
||||
// tree.Put(k, func(int, bool){ return v, true })
|
||||
//
|
||||
// modulo the differing return values.
|
||||
func (t *Tree) Put(k int, upd func(oldV int, exists bool) (newV int, write bool)) (oldV int, written bool) {
|
||||
pi := -1
|
||||
var p *x
|
||||
q := t.r
|
||||
var newV int
|
||||
if q == nil {
|
||||
// new KV pair in empty tree
|
||||
newV, written = upd(newV, false)
|
||||
if !written {
|
||||
return
|
||||
}
|
||||
|
||||
z := t.insert(btDPool.Get().(*d), 0, k, newV)
|
||||
t.r, t.first, t.last = z, z, z
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
i, ok := t.find(q, k)
|
||||
if ok {
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
if x.c > 2*kx {
|
||||
x, i = t.splitX(p, x, pi, i)
|
||||
}
|
||||
pi = i + 1
|
||||
p = x
|
||||
q = x.x[i+1].ch
|
||||
continue
|
||||
case *d:
|
||||
oldV = x.d[i].v
|
||||
newV, written = upd(oldV, true)
|
||||
if !written {
|
||||
return
|
||||
}
|
||||
|
||||
x.d[i].v = newV
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
if x.c > 2*kx {
|
||||
x, i = t.splitX(p, x, pi, i)
|
||||
}
|
||||
pi = i
|
||||
p = x
|
||||
q = x.x[i].ch
|
||||
case *d: // new KV pair
|
||||
newV, written = upd(newV, false)
|
||||
if !written {
|
||||
return
|
||||
}
|
||||
|
||||
switch {
|
||||
case x.c < 2*kd:
|
||||
t.insert(x, i, k, newV)
|
||||
default:
|
||||
t.overflow(p, x, pi, i, k, newV)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tree) split(p *x, q *d, pi, i int, k int, v int) {
|
||||
t.ver++
|
||||
r := btDPool.Get().(*d)
|
||||
if q.n != nil {
|
||||
r.n = q.n
|
||||
r.n.p = r
|
||||
} else {
|
||||
t.last = r
|
||||
}
|
||||
q.n = r
|
||||
r.p = q
|
||||
|
||||
copy(r.d[:], q.d[kd:2*kd])
|
||||
for i := range q.d[kd:] {
|
||||
q.d[kd+i] = zde
|
||||
}
|
||||
q.c = kd
|
||||
r.c = kd
|
||||
var done bool
|
||||
if i > kd {
|
||||
done = true
|
||||
t.insert(r, i-kd, k, v)
|
||||
}
|
||||
if pi >= 0 {
|
||||
p.insert(pi, r.d[0].k, r)
|
||||
} else {
|
||||
t.r = newX(q).insert(0, r.d[0].k, r)
|
||||
}
|
||||
if done {
|
||||
return
|
||||
}
|
||||
|
||||
t.insert(q, i, k, v)
|
||||
}
|
||||
|
||||
func (t *Tree) splitX(p *x, q *x, pi int, i int) (*x, int) {
|
||||
t.ver++
|
||||
r := btXPool.Get().(*x)
|
||||
copy(r.x[:], q.x[kx+1:])
|
||||
q.c = kx
|
||||
r.c = kx
|
||||
if pi >= 0 {
|
||||
p.insert(pi, q.x[kx].k, r)
|
||||
q.x[kx].k = zk
|
||||
for i := range q.x[kx+1:] {
|
||||
q.x[kx+i+1] = zxe
|
||||
}
|
||||
|
||||
switch {
|
||||
case i < kx:
|
||||
return q, i
|
||||
case i == kx:
|
||||
return p, pi
|
||||
default: // i > kx
|
||||
return r, i - kx - 1
|
||||
}
|
||||
}
|
||||
|
||||
nr := newX(q).insert(0, q.x[kx].k, r)
|
||||
t.r = nr
|
||||
q.x[kx].k = zk
|
||||
for i := range q.x[kx+1:] {
|
||||
q.x[kx+i+1] = zxe
|
||||
}
|
||||
|
||||
switch {
|
||||
case i < kx:
|
||||
return q, i
|
||||
case i == kx:
|
||||
return nr, 0
|
||||
default: // i > kx
|
||||
return r, i - kx - 1
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tree) underflow(p *x, q *d, pi int) {
|
||||
t.ver++
|
||||
l, r := p.siblings(pi)
|
||||
|
||||
if l != nil && l.c+q.c >= 2*kd {
|
||||
l.mvR(q, 1)
|
||||
p.x[pi-1].k = q.d[0].k
|
||||
return
|
||||
}
|
||||
|
||||
if r != nil && q.c+r.c >= 2*kd {
|
||||
q.mvL(r, 1)
|
||||
p.x[pi].k = r.d[0].k
|
||||
r.d[r.c] = zde // GC
|
||||
return
|
||||
}
|
||||
|
||||
if l != nil {
|
||||
t.cat(p, l, q, pi-1)
|
||||
return
|
||||
}
|
||||
|
||||
t.cat(p, q, r, pi)
|
||||
}
|
||||
|
||||
func (t *Tree) underflowX(p *x, q *x, pi int, i int) (*x, int) {
|
||||
t.ver++
|
||||
var l, r *x
|
||||
|
||||
if pi >= 0 {
|
||||
if pi > 0 {
|
||||
l = p.x[pi-1].ch.(*x)
|
||||
}
|
||||
if pi < p.c {
|
||||
r = p.x[pi+1].ch.(*x)
|
||||
}
|
||||
}
|
||||
|
||||
if l != nil && l.c > kx {
|
||||
q.x[q.c+1].ch = q.x[q.c].ch
|
||||
copy(q.x[1:], q.x[:q.c])
|
||||
q.x[0].ch = l.x[l.c].ch
|
||||
q.x[0].k = p.x[pi-1].k
|
||||
q.c++
|
||||
i++
|
||||
l.c--
|
||||
p.x[pi-1].k = l.x[l.c].k
|
||||
return q, i
|
||||
}
|
||||
|
||||
if r != nil && r.c > kx {
|
||||
q.x[q.c].k = p.x[pi].k
|
||||
q.c++
|
||||
q.x[q.c].ch = r.x[0].ch
|
||||
p.x[pi].k = r.x[0].k
|
||||
copy(r.x[:], r.x[1:r.c])
|
||||
r.c--
|
||||
rc := r.c
|
||||
r.x[rc].ch = r.x[rc+1].ch
|
||||
r.x[rc].k = zk
|
||||
r.x[rc+1].ch = nil
|
||||
return q, i
|
||||
}
|
||||
|
||||
if l != nil {
|
||||
i += l.c + 1
|
||||
t.catX(p, l, q, pi-1)
|
||||
q = l
|
||||
return q, i
|
||||
}
|
||||
|
||||
t.catX(p, q, r, pi)
|
||||
return q, i
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------- Enumerator
|
||||
|
||||
// Close recycles e to a pool for possible later reuse. No references to e
|
||||
// should exist or such references must not be used afterwards.
|
||||
func (e *Enumerator) Close() {
|
||||
*e = ze
|
||||
btEPool.Put(e)
|
||||
}
|
||||
|
||||
// Next returns the currently enumerated item, if it exists and moves to the
|
||||
// next item in the key collation order. If there is no item to return, err ==
|
||||
// io.EOF is returned.
|
||||
func (e *Enumerator) Next() (k int, v int, err error) {
|
||||
if err = e.err; err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if e.ver != e.t.ver {
|
||||
f, hit := e.t.Seek(e.k)
|
||||
if !e.hit && hit {
|
||||
if err = f.next(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
*e = *f
|
||||
f.Close()
|
||||
}
|
||||
if e.q == nil {
|
||||
e.err, err = io.EOF, io.EOF
|
||||
return
|
||||
}
|
||||
|
||||
if e.i >= e.q.c {
|
||||
if err = e.next(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
i := e.q.d[e.i]
|
||||
k, v = i.k, i.v
|
||||
e.k, e.hit = k, false
|
||||
e.next()
|
||||
return
|
||||
}
|
||||
|
||||
func (e *Enumerator) next() error {
|
||||
if e.q == nil {
|
||||
e.err = io.EOF
|
||||
return io.EOF
|
||||
}
|
||||
|
||||
switch {
|
||||
case e.i < e.q.c-1:
|
||||
e.i++
|
||||
default:
|
||||
if e.q, e.i = e.q.n, 0; e.q == nil {
|
||||
e.err = io.EOF
|
||||
}
|
||||
}
|
||||
return e.err
|
||||
}
|
||||
|
||||
// Prev returns the currently enumerated item, if it exists and moves to the
|
||||
// previous item in the key collation order. If there is no item to return, err
|
||||
// == io.EOF is returned.
|
||||
func (e *Enumerator) Prev() (k int, v int, err error) {
|
||||
if err = e.err; err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if e.ver != e.t.ver {
|
||||
f, hit := e.t.Seek(e.k)
|
||||
if !e.hit && hit {
|
||||
if err = f.prev(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
*e = *f
|
||||
f.Close()
|
||||
}
|
||||
if e.q == nil {
|
||||
e.err, err = io.EOF, io.EOF
|
||||
return
|
||||
}
|
||||
|
||||
if e.i >= e.q.c {
|
||||
if err = e.next(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
i := e.q.d[e.i]
|
||||
k, v = i.k, i.v
|
||||
e.k, e.hit = k, false
|
||||
e.prev()
|
||||
return
|
||||
}
|
||||
|
||||
func (e *Enumerator) prev() error {
|
||||
if e.q == nil {
|
||||
e.err = io.EOF
|
||||
return io.EOF
|
||||
}
|
||||
|
||||
switch {
|
||||
case e.i > 0:
|
||||
e.i--
|
||||
default:
|
||||
if e.q = e.q.p; e.q == nil {
|
||||
e.err = io.EOF
|
||||
break
|
||||
}
|
||||
|
||||
e.i = e.q.c - 1
|
||||
}
|
||||
return e.err
|
||||
}
|
27
vendor/github.com/cznic/bufs/LICENSE
generated
vendored
Normal file
27
vendor/github.com/cznic/bufs/LICENSE
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
Copyright (c) 2014 The bufs Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the names of the authors nor the names of the
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
391
vendor/github.com/cznic/bufs/bufs.go
generated
vendored
Normal file
391
vendor/github.com/cznic/bufs/bufs.go
generated
vendored
Normal file
@ -0,0 +1,391 @@
|
||||
// Copyright 2014 The bufs Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package bufs implements a simple buffer cache.
|
||||
//
|
||||
// The intended use scheme is like:
|
||||
//
|
||||
// type Foo struct {
|
||||
// buffers bufs.Buffers
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// // Bar can call Qux, but not the other way around (in this example).
|
||||
// const maxFooDepth = 2
|
||||
//
|
||||
// func NewFoo() *Foo {
|
||||
// return &Foo{buffers: bufs.New(maxFooDepth), ...}
|
||||
// }
|
||||
//
|
||||
// func (f *Foo) Bar(n int) {
|
||||
// buf := f.buffers.Alloc(n) // needed locally for computation and/or I/O
|
||||
// defer f.buffers.Free()
|
||||
// ...
|
||||
// f.Qux(whatever)
|
||||
// }
|
||||
//
|
||||
// func (f *Foo) Qux(n int) {
|
||||
// buf := f.buffers.Alloc(n) // needed locally for computation and/or I/O
|
||||
// defer f.buffers.Free()
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// The whole idea behind 'bufs' is that when calling e.g. Foo.Bar N times, then
|
||||
// normally, without using 'bufs', there will be 2*N (in this example) []byte
|
||||
// buffers allocated. While using 'bufs', only 2 buffers (in this example)
|
||||
// will ever be created. For large N it can be a substantial difference.
|
||||
//
|
||||
// It's not a good idea to use Buffers to cache too big buffers. The cost of
|
||||
// having a cached buffer is that the buffer is naturally not eligible for
|
||||
// garbage collection. Of course, that holds only while the Foo instance is
|
||||
// reachable, in the above example.
|
||||
//
|
||||
// The buffer count limit is intentionally "hard" (read panicking), although
|
||||
// configurable in New(). The rationale is to prevent recursive calls, using
|
||||
// Alloc, to cause excessive, "static" memory consumption. Tune the limit
|
||||
// carefully or do not use Buffers from within [mutually] recursive functions
|
||||
// where the nesting depth is not realistically bounded to some rather small
|
||||
// number.
|
||||
//
|
||||
// Buffers cannot guarantee improvements to you program performance. There may
|
||||
// be a gain in case where they fit well. Firm grasp on what your code is
|
||||
// actually doing, when and in what order is essential to proper use of
|
||||
// Buffers. It's _highly_ recommended to first do profiling and memory
|
||||
// profiling before even thinking about using 'bufs'. The real world example,
|
||||
// and cause for this package, was a first correct, yet no optimizations done
|
||||
// version of a program; producing few MB of useful data while allocating 20+GB
|
||||
// of memory. Of course the garbage collector properly kicked in, yet the
|
||||
// memory abuse caused ~80+% of run time to be spent memory management. The
|
||||
// program _was_ expected to be slow in its still development phase, but the
|
||||
// bottleneck was guessed to be in I/O. Actually the hard disk was waiting for
|
||||
// the billions bytes being allocated and zeroed. Garbage collect on low
|
||||
// memory, rinse and repeat.
|
||||
//
|
||||
// In the provided tests, TestFoo and TestFooBufs do the same simulated work,
|
||||
// except the later uses Buffers while the former does not. Suggested test runs
|
||||
// which show the differences:
|
||||
//
|
||||
// $ go test -bench . -benchmem
|
||||
//
|
||||
// or
|
||||
//
|
||||
// $ go test -c
|
||||
// $ ./bufs.test -test.v -test.run Foo -test.memprofile mem.out -test.memprofilerate 1
|
||||
// $ go tool pprof bufs.test mem.out --alloc_space --nodefraction 0.0001 --edgefraction 0 -web
|
||||
// $ # Note: Foo vs FooBufs allocated memory is in hundreds of MBs vs 8 kB.
|
||||
//
|
||||
// or
|
||||
//
|
||||
// $ make demo # same as all of the above
|
||||
//
|
||||
//
|
||||
// NOTE: Alloc/Free calls must be properly nested in the same way as in for
|
||||
// example BeginTransaction/EndTransaction pairs. If your code can panic then
|
||||
// the pairing should be enforced by deferred calls.
|
||||
//
|
||||
// NOTE: Buffers objects do not allocate any space until requested by Alloc,
|
||||
// the mechanism works on demand only.
|
||||
//
|
||||
// FAQ: Why the 'bufs' package name?
|
||||
//
|
||||
// Package name 'bufs' was intentionally chosen instead of the perhaps more
|
||||
// conventional 'buf'. There are already too many 'buf' named things in the
|
||||
// code out there and that'll be a source of a lot of trouble. It's a bit
|
||||
// similar situation as in the case of package "strings" (not "string").
|
||||
package bufs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Buffers type represents a buffer ([]byte) cache.
|
||||
//
|
||||
// NOTE: Do not modify Buffers directly, use only its methods. Do not create
|
||||
// additional values (copies) of Buffers, that'll break its functionality. Use
|
||||
// a pointer instead to refer to a single instance from different
|
||||
// places/scopes.
|
||||
type Buffers [][]byte
|
||||
|
||||
// New returns a newly created instance of Buffers with a maximum capacity of n
|
||||
// buffers.
|
||||
//
|
||||
// NOTE: 'bufs.New(n)' is the same as 'make(bufs.Buffers, n)'.
|
||||
func New(n int) Buffers {
|
||||
return make(Buffers, n)
|
||||
}
|
||||
|
||||
// Alloc will return a buffer such that len(r) == n. It will firstly try to
|
||||
// find an existing and unused buffer of big enough size. Only when there is no
|
||||
// such, then one of the buffer slots is reallocated to a bigger size.
|
||||
//
|
||||
// It's okay to use append with buffers returned by Alloc. But it can cause
|
||||
// allocation in that case and will again be producing load for the garbage
|
||||
// collector. The best use of Alloc is for I/O buffers where the needed size of
|
||||
// the buffer is figured out at some point of the code path in a 'final size'
|
||||
// sense. Another real world example are compression/decompression buffers.
|
||||
//
|
||||
// NOTE: The buffer returned by Alloc _is not_ zeroed. That's okay for e.g.
|
||||
// passing a buffer to io.Reader. If you need a zeroed buffer use Calloc.
|
||||
//
|
||||
// NOTE: Buffers returned from Alloc _must not_ be exposed/returned to your
|
||||
// clients. Those buffers are intended to be used strictly internally, within
|
||||
// the methods of some "object".
|
||||
//
|
||||
// NOTE: Alloc will panic if there are no buffers (buffer slots) left.
|
||||
func (p *Buffers) Alloc(n int) (r []byte) {
|
||||
b := *p
|
||||
if len(b) == 0 {
|
||||
panic(errors.New("Buffers.Alloc: out of buffers"))
|
||||
}
|
||||
|
||||
biggest, best, biggestI, bestI := -1, -1, -1, -1
|
||||
for i, v := range b {
|
||||
//ln := len(v)
|
||||
// The above was correct, buts it's just confusing. It worked
|
||||
// because not the buffers, but slices of them are returned in
|
||||
// the 'if best >= n' code path.
|
||||
ln := cap(v)
|
||||
|
||||
if ln >= biggest {
|
||||
biggest, biggestI = ln, i
|
||||
}
|
||||
|
||||
if ln >= n && (bestI < 0 || best > ln) {
|
||||
best, bestI = ln, i
|
||||
if ln == n {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
last := len(b) - 1
|
||||
if best >= n {
|
||||
r = b[bestI]
|
||||
b[last], b[bestI] = b[bestI], b[last]
|
||||
*p = b[:last]
|
||||
return r[:n]
|
||||
}
|
||||
|
||||
r = make([]byte, n, overCommit(n))
|
||||
b[biggestI] = r
|
||||
b[last], b[biggestI] = b[biggestI], b[last]
|
||||
*p = b[:last]
|
||||
return
|
||||
}
|
||||
|
||||
// Calloc will acquire a buffer using Alloc and then clears it to zeros. The
|
||||
// zeroing goes up to n, not cap(r).
|
||||
func (p *Buffers) Calloc(n int) (r []byte) {
|
||||
r = p.Alloc(n)
|
||||
for i := range r {
|
||||
r[i] = 0
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Free makes the lastly allocated by Alloc buffer free (available) again for
|
||||
// Alloc.
|
||||
//
|
||||
// NOTE: Improper Free invocations, like in the sequence {New, Alloc, Free,
|
||||
// Free}, will panic.
|
||||
func (p *Buffers) Free() {
|
||||
b := *p
|
||||
b = b[:len(b)+1]
|
||||
*p = b
|
||||
}
|
||||
|
||||
// Stats reports memory consumed by Buffers, without accounting for some
|
||||
// (smallish) additional overhead.
|
||||
func (p *Buffers) Stats() (bytes int) {
|
||||
b := *p
|
||||
b = b[:cap(b)]
|
||||
for _, v := range b {
|
||||
bytes += cap(v)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Cache caches buffers ([]byte). A zero value of Cache is ready for use.
|
||||
//
|
||||
// NOTE: Do not modify a Cache directly, use only its methods. Do not create
|
||||
// additional values (copies) of a Cache, that'll break its functionality. Use
|
||||
// a pointer instead to refer to a single instance from different
|
||||
// places/scopes.
|
||||
type Cache [][]byte
|
||||
|
||||
// Get returns a buffer ([]byte) of length n. If no such buffer is cached then
|
||||
// a biggest cached buffer is resized to have length n and returned. If there
|
||||
// are no cached items at all, Get returns a newly allocated buffer.
|
||||
//
|
||||
// In other words the cache policy is:
|
||||
//
|
||||
// - If the cache is empty, the buffer must be newly created and returned.
|
||||
// Cache remains empty.
|
||||
//
|
||||
// - If a buffer of sufficient size is found in the cache, remove it from the
|
||||
// cache and return it.
|
||||
//
|
||||
// - Otherwise the cache is non empty, but no cached buffer is big enough.
|
||||
// Enlarge the biggest cached buffer, remove it from the cache and return it.
|
||||
// This provide cached buffers size adjustment based on demand.
|
||||
//
|
||||
// In short, if the cache is not empty, Get guarantees to make it always one
|
||||
// item less. This rules prevent uncontrolled cache grow in some scenarios.
|
||||
// The older policy was not preventing that. Another advantage is better cached
|
||||
// buffers sizes "auto tuning", although not in every possible use case.
|
||||
//
|
||||
// NOTE: The buffer returned by Get _is not guaranteed_ to be zeroed. That's
|
||||
// okay for e.g. passing a buffer to io.Reader. If you need a zeroed buffer
|
||||
// use Cget.
|
||||
func (c *Cache) Get(n int) []byte {
|
||||
r, _ := c.get(n)
|
||||
return r
|
||||
}
|
||||
|
||||
func (c *Cache) get(n int) (r []byte, isZeroed bool) {
|
||||
s := *c
|
||||
lens := len(s)
|
||||
if lens == 0 {
|
||||
r, isZeroed = make([]byte, n, overCommit(n)), true
|
||||
return
|
||||
}
|
||||
|
||||
i := sort.Search(lens, func(x int) bool { return len(s[x]) >= n })
|
||||
if i == lens {
|
||||
i--
|
||||
s[i] = make([]byte, n, overCommit(n))
|
||||
}
|
||||
r = s[i][:n]
|
||||
copy(s[i:], s[i+1:])
|
||||
s[lens-1] = nil
|
||||
s = s[:lens-1]
|
||||
*c = s
|
||||
return r, false
|
||||
}
|
||||
|
||||
// Cget will acquire a buffer using Get and then clears it to zeros. The
|
||||
// zeroing goes up to n, not cap(r).
|
||||
func (c *Cache) Cget(n int) (r []byte) {
|
||||
r, ok := c.get(n)
|
||||
if ok {
|
||||
return
|
||||
}
|
||||
|
||||
for i := range r {
|
||||
r[i] = 0
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Put caches b for possible later reuse (via Get). No other references to b's
|
||||
// backing array may exist. Otherwise a big mess is sooner or later inevitable.
|
||||
func (c *Cache) Put(b []byte) {
|
||||
b = b[:cap(b)]
|
||||
lenb := len(b)
|
||||
if lenb == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
s := *c
|
||||
lens := len(s)
|
||||
i := sort.Search(lens, func(x int) bool { return len(s[x]) >= lenb })
|
||||
s = append(s, nil)
|
||||
copy(s[i+1:], s[i:])
|
||||
s[i] = b
|
||||
*c = s
|
||||
return
|
||||
}
|
||||
|
||||
// Stats reports memory consumed by a Cache, without accounting for some
|
||||
// (smallish) additional overhead. 'n' is the number of cached buffers, bytes
|
||||
// is their combined capacity.
|
||||
func (c Cache) Stats() (n, bytes int) {
|
||||
n = len(c)
|
||||
for _, v := range c {
|
||||
bytes += cap(v)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// CCache is a Cache which is safe for concurrent use by multiple goroutines.
|
||||
type CCache struct {
|
||||
c Cache
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// Get returns a buffer ([]byte) of length n. If no such buffer is cached then
|
||||
// a biggest cached buffer is resized to have length n and returned. If there
|
||||
// are no cached items at all, Get returns a newly allocated buffer.
|
||||
//
|
||||
// In other words the cache policy is:
|
||||
//
|
||||
// - If the cache is empty, the buffer must be newly created and returned.
|
||||
// Cache remains empty.
|
||||
//
|
||||
// - If a buffer of sufficient size is found in the cache, remove it from the
|
||||
// cache and return it.
|
||||
//
|
||||
// - Otherwise the cache is non empty, but no cached buffer is big enough.
|
||||
// Enlarge the biggest cached buffer, remove it from the cache and return it.
|
||||
// This provide cached buffers size adjustment based on demand.
|
||||
//
|
||||
// In short, if the cache is not empty, Get guarantees to make it always one
|
||||
// item less. This rules prevent uncontrolled cache grow in some scenarios.
|
||||
// The older policy was not preventing that. Another advantage is better cached
|
||||
// buffers sizes "auto tuning", although not in every possible use case.
|
||||
//
|
||||
// NOTE: The buffer returned by Get _is not guaranteed_ to be zeroed. That's
|
||||
// okay for e.g. passing a buffer to io.Reader. If you need a zeroed buffer
|
||||
// use Cget.
|
||||
func (c *CCache) Get(n int) []byte {
|
||||
c.mu.Lock()
|
||||
r, _ := c.c.get(n)
|
||||
c.mu.Unlock()
|
||||
return r
|
||||
}
|
||||
|
||||
// Cget will acquire a buffer using Get and then clears it to zeros. The
|
||||
// zeroing goes up to n, not cap(r).
|
||||
func (c *CCache) Cget(n int) (r []byte) {
|
||||
c.mu.Lock()
|
||||
r = c.c.Cget(n)
|
||||
c.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// Put caches b for possible later reuse (via Get). No other references to b's
|
||||
// backing array may exist. Otherwise a big mess is sooner or later inevitable.
|
||||
func (c *CCache) Put(b []byte) {
|
||||
c.mu.Lock()
|
||||
c.c.Put(b)
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// Stats reports memory consumed by a Cache, without accounting for some
|
||||
// (smallish) additional overhead. 'n' is the number of cached buffers, bytes
|
||||
// is their combined capacity.
|
||||
func (c *CCache) Stats() (n, bytes int) {
|
||||
c.mu.Lock()
|
||||
n, bytes = c.c.Stats()
|
||||
c.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// GCache is a ready to use global instance of a CCache.
|
||||
var GCache CCache
|
||||
|
||||
func overCommit(n int) int {
|
||||
switch {
|
||||
case n < 8:
|
||||
return 8
|
||||
case n < 1e5:
|
||||
return 2 * n
|
||||
case n < 1e6:
|
||||
return 3 * n / 2
|
||||
default:
|
||||
return n
|
||||
}
|
||||
}
|
27
vendor/github.com/cznic/fileutil/LICENSE
generated
vendored
Normal file
27
vendor/github.com/cznic/fileutil/LICENSE
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
Copyright (c) 2014 The fileutil Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the names of the authors nor the names of the
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
251
vendor/github.com/cznic/fileutil/falloc/docs.go
generated
vendored
Normal file
251
vendor/github.com/cznic/fileutil/falloc/docs.go
generated
vendored
Normal file
@ -0,0 +1,251 @@
|
||||
// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// blame: jnml, labs.nic.cz
|
||||
|
||||
/*
|
||||
|
||||
WIP: Package falloc provides allocation/deallocation of space within a
|
||||
file/store (WIP, unstable API).
|
||||
|
||||
Overall structure:
|
||||
File == n blocks.
|
||||
Block == n atoms.
|
||||
Atom == 16 bytes.
|
||||
|
||||
x6..x0 == least significant 7 bytes of a 64 bit integer, highest (7th) byte is
|
||||
0 and is not stored in the file.
|
||||
|
||||
Block first byte
|
||||
|
||||
Aka block type tag.
|
||||
|
||||
------------------------------------------------------------------------------
|
||||
|
||||
0xFF: Free atom (free block of size 1).
|
||||
+------++---------++---------++------+
|
||||
| 0 || 1...7 || 8...14 || 15 |
|
||||
+------++---------++---------++------+
|
||||
| 0xFF || p6...p0 || n6...n0 || 0xFF |
|
||||
+------++---------++---------++------+
|
||||
|
||||
Link to the previous free block (atom addressed) is p6...p0, next dtto in
|
||||
n6...n0. Doubly linked lists of "compatible" free blocks allows for free space
|
||||
reclaiming and merging. "Compatible" == of size at least some K. Heads of all
|
||||
such lists are organized per K or intervals of Ks elsewhere.
|
||||
|
||||
------------------------------------------------------------------------------
|
||||
|
||||
0xFE: Free block, size == s6...s0 atoms.
|
||||
+------++---------++---------++---------++--
|
||||
| +0 || 1...7 || 8...14 || 15...21 || 22...16*size-1
|
||||
+------++---------++---------++---------++--
|
||||
| 0xFE || p6...p0 || n6...n0 || s6...s0 || ...
|
||||
+------++---------++---------++---------++--
|
||||
|
||||
Prev and next links as in the 0xFF first byte case. End of this block - see
|
||||
"Block last byte": 0xFE bellow. Data between == undefined.
|
||||
|
||||
------------------------------------------------------------------------------
|
||||
|
||||
0xFD: Relocated block.
|
||||
+------++---------++-----------++------+
|
||||
| 0 || 1...7 || 8...14 || 15 |
|
||||
+------++---------++-----------++------+
|
||||
| 0xFD || r6...r0 || undefined || 0x00 | // == used block
|
||||
+------++---------++-----------++------+
|
||||
|
||||
Relocation link is r6..r0 == atom address. Relocations MUST NOT chain and MUST
|
||||
point to a "content" block, i.e. one with the first byte in 0x00...0xFC.
|
||||
|
||||
Relocated block allows to permanently assign a handle/file pointer ("atom"
|
||||
address) to some content and resize the content anytime afterwards w/o having
|
||||
to update all the possible existing references to the original handle.
|
||||
|
||||
------------------------------------------------------------------------------
|
||||
|
||||
0xFC: Used long block.
|
||||
+------++---------++--------------------++---------+---+
|
||||
| 0 || 1...2 || 3...N+2 || | |
|
||||
+------++---------++--------------------++---------+---+
|
||||
| 0xFC || n1...n0 || N bytes of content || padding | Z |
|
||||
+------++---------++--------------------++---------+---+
|
||||
|
||||
This block type is used for content of length in N == 238...61680 bytes. N is
|
||||
encoded as a 2 byte unsigned integer n1..n0 in network byte order. Values
|
||||
bellow 238 are reserved, those content lengths are to be carried by the
|
||||
0x00..0xFB block types.
|
||||
|
||||
1. n in 0x00EE...0xF0F0 is used for content under the same rules
|
||||
as in the 0x01..0xED type.
|
||||
|
||||
2. If the last byte of the content is not the last byte of an atom then
|
||||
the last byte of the block is 0x00.
|
||||
|
||||
3. If the last byte of the content IS the last byte of an atom:
|
||||
|
||||
3.1 If the last byte of content is in 0x00..0xFD then everything is OK.
|
||||
|
||||
3.2 If the last byte of content is 0xFE or 0xFF then the escape
|
||||
via n > 0xF0F0 MUST be used AND the block's last byte is 0x00 or 0x01,
|
||||
meaning value 0xFE and 0xFF respectively.
|
||||
|
||||
4. n in 0xF0F1...0xFFFF is like the escaped 0xEE..0xFB block.
|
||||
N == 13 + 16(n - 0xF0F1).
|
||||
|
||||
Discussion of the padding and Z fields - see the 0x01..0xED block type.
|
||||
|
||||
------------------------------------------------------------------------------
|
||||
|
||||
0xEE...0xFB: Used escaped short block.
|
||||
+---++----------------------++---+
|
||||
| 0 || 1...N-1 || |
|
||||
+---++----------------------++---+
|
||||
| X || N-1 bytes of content || Z |
|
||||
+---++----------------------++---+
|
||||
|
||||
N == 15 + 16(X - 0xEE). Z is the content last byte encoded as follows.
|
||||
|
||||
case Z == 0x00: The last byte of content is 0xFE
|
||||
|
||||
case Z == 0x01: The last byte of content is 0xFF
|
||||
|
||||
------------------------------------------------------------------------------
|
||||
|
||||
0x01...0xED: Used short block.
|
||||
+---++--------------------++---------+---+
|
||||
| 0 || 1...N || | |
|
||||
+---++--------------------++---------+---+
|
||||
| N || N bytes of content || padding | Z |
|
||||
+---++--------------------++---------+---+
|
||||
|
||||
This block type is used for content of length in 1...237 bytes. The value of
|
||||
the "padding" field, if of non zero length, is undefined.
|
||||
|
||||
If the last byte of content is the last byte of an atom (== its file byte
|
||||
offset & 0xF == 0xF) then such last byte MUST be in 0x00...0xFD.
|
||||
|
||||
If the last byte of content is the last byte of an atom AND the last byte of
|
||||
content is 0xFE or 0xFF then the short escape block type (0xEE...0xFB) MUST be
|
||||
used.
|
||||
|
||||
If the last byte of content is not the last byte of an atom, then the last byte
|
||||
of such block, i.e. the Z field, which is also a last byte of some atom, MUST
|
||||
be 0x00 (i.e. the used block marker). Other "tail" values are reserved.
|
||||
|
||||
------------------------------------------------------------------------------
|
||||
|
||||
0x00: Used empty block.
|
||||
+------++-----------++------+
|
||||
| 0 || 1...14 || 15 |
|
||||
+------++-----------++------+
|
||||
| 0x00 || undefined || 0x00 | // == used block, other "tail" values reserved.
|
||||
+------++-----------++------+
|
||||
|
||||
All of the rules for 0x01..0xED applies. Depicted only for its different
|
||||
semantics (e.g. an allocated [existing] string but with length of zero).
|
||||
|
||||
==============================================================================
|
||||
|
||||
Block last byte
|
||||
|
||||
------------------------------------------------------------------------------
|
||||
|
||||
0xFF: Free atom. Layout - see "Block first byte": FF.
|
||||
|
||||
------------------------------------------------------------------------------
|
||||
|
||||
0xFE: Free block, size n atoms. Preceding 7 bytes == size (s6...s0) of the free
|
||||
block in atoms, network byte order
|
||||
--++---------++------+
|
||||
|| -8...-2 || -1 |
|
||||
--++---------++------+
|
||||
... || s6...s0 || 0xFE | <- block's last byte
|
||||
--++---------++------+
|
||||
|
||||
Layout at start of this block - see "Block first byte": FE.
|
||||
|
||||
------------------------------------------------------------------------------
|
||||
|
||||
0x00...0xFD: Used (non free) block.
|
||||
|
||||
==============================================================================
|
||||
|
||||
Free lists table
|
||||
|
||||
The free lists table content is stored in the standard layout of a used block.
|
||||
|
||||
A table item is a 7 byte size field followed by a 7 byte atom address field
|
||||
(both in network byte order), thus every item is 14 contiguous bytes. The
|
||||
item's address field is pointing to a free block. The size field determines
|
||||
the minimal size (in atoms) of free blocks on that list.
|
||||
|
||||
The free list table is n above items, thus the content has 14n bytes. Note that
|
||||
the largest block content is 61680 bytes and as there are 14 bytes per table
|
||||
item, so the table is limited to at most 4405 entries.
|
||||
|
||||
Items in the table do not have to be sorted according to their size field values.
|
||||
|
||||
No two items can have the same value of the size field.
|
||||
|
||||
When freeing blocks, the block MUST be linked into an item list with the
|
||||
highest possible size field, which is less or equal to the number of atoms in
|
||||
the new free block.
|
||||
|
||||
When freeing a block, the block MUST be first merged with any adjacent free
|
||||
blocks (thus possibly creating a bigger free block) using information derived
|
||||
from the adjacent blocks first and last bytes. Such merged free blocks MUST be
|
||||
removed from their original doubly linked lists. Afterwards the new bigger free
|
||||
block is put to the free list table in the appropriate item.
|
||||
|
||||
Items with address field == 0 are legal. Such item is a placeholder for a empty
|
||||
list of free blocks of the item's size.
|
||||
|
||||
Items with size field == 0 are legal. Such item is a placeholder, used e.g. to
|
||||
avoid further reallocations/redirecting of the free lists table.
|
||||
|
||||
The largest possible allocation request (for content length 61680 bytes) is
|
||||
0xF10 (3856) atoms. All free blocks of this or bigger size are presumably put
|
||||
into a single table item with the size 3856. It may be useful to additionally
|
||||
have a free lists table item which links free blocks of some bigger size (say
|
||||
1M+) and then use the OS sparse file support (if present) to save the physical
|
||||
space used by such free blocks.
|
||||
|
||||
Smaller (<3856 atoms) free blocks can be organized exactly (every distinct size
|
||||
has its table item) or the sizes can run using other schema like e.g. "1, 2,
|
||||
4, 8, ..." (powers of 2) or "1, 2, 3, 5, 8, 13, ..." (the Fibonacci sequence)
|
||||
or they may be fine tuned to a specific usage pattern.
|
||||
|
||||
==============================================================================
|
||||
|
||||
Header
|
||||
|
||||
The first block of a file (atom address == file offset == 0) is the file header.
|
||||
The header block has the standard layout of a used short non escaped block.
|
||||
|
||||
Special conditions apply: The header block and its content MUST be like this:
|
||||
|
||||
+------+---------+---------+------+
|
||||
| 0 | 1...7 | 8...14 | 15 |
|
||||
+------+---------+---------+------+
|
||||
| 0x0F | m6...m0 | f6...f0 | FLTT |
|
||||
+------+---------+---------+------+
|
||||
|
||||
m6..m0 is a "magic" value 0xF1C1A1FE51B1E.
|
||||
|
||||
f6...f0 is the atom address of the free lists table (discussed elsewhere).
|
||||
If f6...f0 == 0x00 the there is no free lists table (yet).
|
||||
|
||||
FLTT describes the type of the Free List Table. Currently defined values:
|
||||
|
||||
------------------------------------------------------------------------------
|
||||
|
||||
FLTT == 0: Free List Table is fixed at atom address 2. It has a fixed size for 3856 entries
|
||||
for free list of size 1..3855 atoms and the last is for the list of free block >= 3856 atoms.
|
||||
*/
|
||||
package falloc
|
||||
|
||||
const (
|
||||
INVALID_HANDLE = Handle(-1)
|
||||
)
|
130
vendor/github.com/cznic/fileutil/falloc/error.go
generated
vendored
Normal file
130
vendor/github.com/cznic/fileutil/falloc/error.go
generated
vendored
Normal file
@ -0,0 +1,130 @@
|
||||
// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// blame: jnml, labs.nic.cz
|
||||
|
||||
package falloc
|
||||
|
||||
import "fmt"
|
||||
|
||||
// EBadRequest is an error produced for invalid operation, e.g. for data of more than maximum allowed.
|
||||
type EBadRequest struct {
|
||||
Name string
|
||||
Size int
|
||||
}
|
||||
|
||||
func (e *EBadRequest) Error() string {
|
||||
return fmt.Sprintf("%s: size %d", e.Name, e.Size)
|
||||
}
|
||||
|
||||
// EClose is a file/store close error.
|
||||
type EClose struct {
|
||||
Name string
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *EClose) Error() string {
|
||||
return fmt.Sprintf("%sx: %s", e.Name, e.Err)
|
||||
}
|
||||
|
||||
// ECorrupted is a file/store format error.
|
||||
type ECorrupted struct {
|
||||
Name string
|
||||
Ofs int64
|
||||
}
|
||||
|
||||
func (e *ECorrupted) Error() string {
|
||||
return fmt.Sprintf("%s: corrupted data @%#x", e.Name, e.Ofs)
|
||||
}
|
||||
|
||||
// ECreate is a file/store create error.
|
||||
type ECreate struct {
|
||||
Name string
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *ECreate) Error() string {
|
||||
return fmt.Sprintf("%s: %s", e.Name, e.Err)
|
||||
}
|
||||
|
||||
// EFreeList is a file/store format error.
|
||||
type EFreeList struct {
|
||||
Name string
|
||||
Size int64
|
||||
Block int64
|
||||
}
|
||||
|
||||
func (e *EFreeList) Error() string {
|
||||
return fmt.Sprintf("%s: invalid free list item, size %#x, block %#x", e.Name, e.Size, e.Block)
|
||||
}
|
||||
|
||||
// EHandle is an error type reported for invalid Handles.
|
||||
type EHandle struct {
|
||||
Name string
|
||||
Handle Handle
|
||||
}
|
||||
|
||||
func (e EHandle) Error() string {
|
||||
return fmt.Sprintf("%s: invalid handle %#x", e.Name, e.Handle)
|
||||
}
|
||||
|
||||
// EHeader is a file/store format error.
|
||||
type EHeader struct {
|
||||
Name string
|
||||
Header []byte
|
||||
Expected []byte
|
||||
}
|
||||
|
||||
func (e *EHeader) Error() string {
|
||||
return fmt.Sprintf("%s: invalid header, got [% x], expected [% x]", e.Name, e.Header, e.Expected)
|
||||
}
|
||||
|
||||
// ENullHandle is a file/store access error via a null handle.
|
||||
type ENullHandle string
|
||||
|
||||
func (e ENullHandle) Error() string {
|
||||
return fmt.Sprintf("%s: access via null handle", e)
|
||||
}
|
||||
|
||||
// EOpen is a file/store open error.
|
||||
type EOpen struct {
|
||||
Name string
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *EOpen) Error() string {
|
||||
return fmt.Sprintf("%s: %s", e.Name, e.Err)
|
||||
}
|
||||
|
||||
// ERead is a file/store read error.
|
||||
type ERead struct {
|
||||
Name string
|
||||
Ofs int64
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *ERead) Error() string {
|
||||
return fmt.Sprintf("%s, %#x: %s", e.Name, e.Ofs, e.Err)
|
||||
}
|
||||
|
||||
// ESize is a file/store size error.
|
||||
type ESize struct {
|
||||
Name string
|
||||
Size int64
|
||||
}
|
||||
|
||||
func (e *ESize) Error() string {
|
||||
return fmt.Sprintf("%s: invalid size %#x(%d), size %%16 != 0", e.Name, e.Size, e.Size)
|
||||
}
|
||||
|
||||
// EWrite is a file/store write error.
|
||||
type EWrite struct {
|
||||
Name string
|
||||
Ofs int64
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *EWrite) Error() string {
|
||||
return fmt.Sprintf("%s, %#x: %s", e.Name, e.Ofs, e.Err)
|
||||
}
|
676
vendor/github.com/cznic/fileutil/falloc/falloc.go
generated
vendored
Normal file
676
vendor/github.com/cznic/fileutil/falloc/falloc.go
generated
vendored
Normal file
@ -0,0 +1,676 @@
|
||||
// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// blame: jnml, labs.nic.cz
|
||||
|
||||
/*
|
||||
|
||||
This is an mostly (WIP) conforming implementation of the "specs" in docs.go.
|
||||
|
||||
The main incompletness is support for only one kind of FTL, though this table kind is still per "specs".
|
||||
|
||||
*/
|
||||
|
||||
package falloc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/cznic/fileutil/storage"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Handle is a reference to a block in a file/store.
|
||||
// Handle is an uint56 wrapped in an in64, i.e. the most significant byte must be always zero.
|
||||
type Handle int64
|
||||
|
||||
// Put puts the 7 least significant bytes of h into b. The MSB of h should be zero.
|
||||
func (h Handle) Put(b []byte) {
|
||||
for ofs := 6; ofs >= 0; ofs-- {
|
||||
b[ofs] = byte(h)
|
||||
h >>= 8
|
||||
}
|
||||
}
|
||||
|
||||
// Get gets the 7 least significant bytes of h from b. The MSB of h is zeroed.
|
||||
func (h *Handle) Get(b []byte) {
|
||||
var x Handle
|
||||
for ofs := 0; ofs <= 6; ofs++ {
|
||||
x = x<<8 | Handle(b[ofs])
|
||||
}
|
||||
*h = x
|
||||
}
|
||||
|
||||
// File is a file/store with space allocation/deallocation support.
|
||||
type File struct {
|
||||
f storage.Accessor
|
||||
atoms int64 // current file size in atom units
|
||||
canfree int64 // only blocks >= canfree can be subject to Free()
|
||||
freetab [3857]int64 // freetab[0] is unused, freetab[1] is size 1 ptr, freetab[2] is size 2 ptr, ...
|
||||
rwm sync.RWMutex
|
||||
}
|
||||
|
||||
func (f *File) read(b []byte, off int64) {
|
||||
if n, err := f.f.ReadAt(b, off); n != len(b) {
|
||||
panic(&ERead{f.f.Name(), off, err})
|
||||
}
|
||||
}
|
||||
|
||||
func (f *File) write(b []byte, off int64) {
|
||||
if n, err := f.f.WriteAt(b, off); n != len(b) {
|
||||
panic(&EWrite{f.f.Name(), off, err})
|
||||
}
|
||||
}
|
||||
|
||||
var ( // R/O
|
||||
hdr = []byte{0x0f, 0xf1, 0xc1, 0xa1, 0xfe, 0xa5, 0x1b, 0x1e, 0, 0, 0, 0, 0, 0, 2, 0} // free lists table @2
|
||||
empty = make([]byte, 16)
|
||||
zero = []byte{0}
|
||||
zero7 = make([]byte, 7)
|
||||
)
|
||||
|
||||
// New returns a new File backed by store or an error if any.
|
||||
// Any existing data in store are discarded.
|
||||
func New(store storage.Accessor) (f *File, err error) {
|
||||
f = &File{f: store}
|
||||
return f, storage.Mutate(store, func() (err error) {
|
||||
if err = f.f.Truncate(0); err != nil {
|
||||
return &ECreate{f.f.Name(), err}
|
||||
}
|
||||
|
||||
if _, err = f.Alloc(hdr[1:]); err != nil { //TODO internal panicking versions of the exported fns.
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = f.Alloc(nil); err != nil { // (empty) root @1
|
||||
return
|
||||
}
|
||||
|
||||
b := make([]byte, 3856*14)
|
||||
for i := 1; i <= 3856; i++ {
|
||||
Handle(i).Put(b[(i-1)*14:])
|
||||
}
|
||||
if _, err = f.Alloc(b); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
f.canfree = f.atoms
|
||||
return
|
||||
})
|
||||
}
|
||||
|
||||
// Open returns a new File backed by store or an error if any.
|
||||
// Store already has to be in a valid format.
|
||||
func Open(store storage.Accessor) (f *File, err error) {
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
f = nil
|
||||
err = e.(error)
|
||||
}
|
||||
}()
|
||||
|
||||
fi, err := store.Stat()
|
||||
if err != nil {
|
||||
panic(&EOpen{store.Name(), err})
|
||||
}
|
||||
|
||||
fs := fi.Size()
|
||||
if fs&0xf != 0 {
|
||||
panic(&ESize{store.Name(), fi.Size()})
|
||||
}
|
||||
|
||||
f = &File{f: store, atoms: fs >> 4}
|
||||
b := make([]byte, len(hdr))
|
||||
f.read(b, 0)
|
||||
if !bytes.Equal(b, hdr) {
|
||||
panic(&EHeader{store.Name(), b, append([]byte{}, hdr...)})
|
||||
}
|
||||
|
||||
var atoms int64
|
||||
b, atoms = f.readUsed(2)
|
||||
f.canfree = atoms + 2
|
||||
ofs := 0
|
||||
var size, p Handle
|
||||
for ofs < len(b) {
|
||||
size.Get(b[ofs:])
|
||||
ofs += 7
|
||||
p.Get(b[ofs:])
|
||||
ofs += 7
|
||||
if sz, pp := int64(size), int64(p); size == 0 || size > 3856 || (pp != 0 && pp < f.canfree) || pp<<4 > fs-16 {
|
||||
panic(&EFreeList{store.Name(), sz, pp})
|
||||
}
|
||||
|
||||
f.freetab[size] = int64(p)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Accessor returns the File's underlying Accessor.
|
||||
func (f *File) Accessor() storage.Accessor {
|
||||
return f.f
|
||||
}
|
||||
|
||||
// Close closes f and returns an error if any.
|
||||
func (f *File) Close() (err error) {
|
||||
return storage.Mutate(f.Accessor(), func() (err error) {
|
||||
if err = f.f.Close(); err != nil {
|
||||
err = &EClose{f.f.Name(), err}
|
||||
}
|
||||
return
|
||||
})
|
||||
}
|
||||
|
||||
// Root returns the handle of the DB root (top level directory, ...).
|
||||
func (f *File) Root() Handle {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (f *File) readUsed(atom int64) (content []byte, atoms int64) {
|
||||
b, redirected := make([]byte, 7), false
|
||||
redir:
|
||||
ofs := atom << 4
|
||||
f.read(b[:1], ofs)
|
||||
switch pre := b[0]; {
|
||||
default:
|
||||
panic(&ECorrupted{f.f.Name(), ofs})
|
||||
case pre == 0x00: // Empty block
|
||||
case pre >= 1 && pre <= 237: // Short
|
||||
content = make([]byte, pre)
|
||||
f.read(content, ofs+1)
|
||||
case pre >= 0xee && pre <= 0xfb: // Short esc
|
||||
content = make([]byte, 15+16*(pre-0xee))
|
||||
f.read(content, ofs+1)
|
||||
content[len(content)-1] += 0xfe
|
||||
case pre == 0xfc: // Long
|
||||
f.read(b[:2], ofs+1)
|
||||
n := int(b[0])<<8 + int(b[1])
|
||||
switch {
|
||||
default:
|
||||
panic(&ECorrupted{f.f.Name(), ofs + 1})
|
||||
case n >= 238 && n <= 61680: // Long non esc
|
||||
content = make([]byte, n)
|
||||
f.read(content, ofs+3)
|
||||
case n >= 61681: // Long esc
|
||||
content = make([]byte, 13+16*(n-0xf0f1))
|
||||
f.read(content, ofs+3)
|
||||
content[len(content)-1] += 0xfe
|
||||
}
|
||||
case pre == 0xfd: // redir
|
||||
if redirected {
|
||||
panic(&ECorrupted{f.f.Name(), ofs})
|
||||
}
|
||||
|
||||
f.read(b[:7], ofs+1)
|
||||
(*Handle)(&atom).Get(b)
|
||||
redirected = true
|
||||
goto redir
|
||||
}
|
||||
return content, rq2Atoms(len(content))
|
||||
}
|
||||
|
||||
func (f *File) writeUsed(b []byte, atom int64) {
|
||||
n := len(b)
|
||||
switch ofs, atoms, endmark := atom<<4, rq2Atoms(n), true; {
|
||||
default:
|
||||
panic("internal error")
|
||||
case n == 0:
|
||||
f.write(empty, ofs)
|
||||
case n <= 237:
|
||||
if (n+1)&0xf == 0 { // content end == atom end
|
||||
if v := b[n-1]; v >= 0xfe { // escape
|
||||
pre := []byte{byte((16*0xee + n - 15) >> 4)}
|
||||
f.write(pre, ofs)
|
||||
f.write(b[:n-1], ofs+1)
|
||||
f.write([]byte{v - 0xfe}, ofs+atoms<<4-1)
|
||||
return
|
||||
}
|
||||
endmark = false
|
||||
}
|
||||
// non esacpe
|
||||
pre := []byte{byte(n)}
|
||||
f.write(pre, ofs)
|
||||
f.write(b, ofs+1)
|
||||
if endmark {
|
||||
f.write(zero, ofs+atoms<<4-1) // last block byte <- used block
|
||||
}
|
||||
case n > 237 && n <= 61680:
|
||||
if (n+3)&0xf == 0 { // content end == atom end
|
||||
if v := b[n-1]; v >= 0xfe { // escape
|
||||
x := (16*0xf0f1 + n - 13) >> 4
|
||||
pre := []byte{0xFC, byte(x >> 8), byte(x)}
|
||||
f.write(pre, ofs)
|
||||
f.write(b[:n-1], ofs+3)
|
||||
f.write([]byte{v - 0xfe}, ofs+atoms<<4-1)
|
||||
return
|
||||
}
|
||||
endmark = false
|
||||
}
|
||||
// non esacpe
|
||||
pre := []byte{0xfc, byte(n >> 8), byte(n)}
|
||||
f.write(pre, ofs)
|
||||
f.write(b, ofs+3)
|
||||
if endmark {
|
||||
f.write(zero, ofs+atoms<<4-1) // last block byte <- used block
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func rq2Atoms(rqbytes int) (rqatoms int64) {
|
||||
if rqbytes > 237 {
|
||||
rqbytes += 2
|
||||
}
|
||||
return int64(rqbytes>>4 + 1)
|
||||
}
|
||||
|
||||
func (f *File) extend(b []byte) (handle int64) {
|
||||
handle = f.atoms
|
||||
f.writeUsed(b, handle)
|
||||
f.atoms += rq2Atoms(len(b))
|
||||
return
|
||||
}
|
||||
|
||||
// Alloc stores b in a newly allocated space and returns its handle and an error if any.
|
||||
func (f *File) Alloc(b []byte) (handle Handle, err error) {
|
||||
err = storage.Mutate(f.Accessor(), func() (err error) {
|
||||
rqAtoms := rq2Atoms(len(b))
|
||||
if rqAtoms > 3856 {
|
||||
return &EBadRequest{f.f.Name(), len(b)}
|
||||
}
|
||||
|
||||
for foundsize, foundp := range f.freetab[rqAtoms:] {
|
||||
if foundp != 0 {
|
||||
// this works only for the current unique sizes list (except the last item!)
|
||||
size := int64(foundsize) + rqAtoms
|
||||
handle = Handle(foundp)
|
||||
if size == 3856 {
|
||||
buf := make([]byte, 7)
|
||||
f.read(buf, int64(handle)<<4+15)
|
||||
(*Handle)(&size).Get(buf)
|
||||
}
|
||||
f.delFree(int64(handle), size)
|
||||
if rqAtoms < size {
|
||||
f.addFree(int64(handle)+rqAtoms, size-rqAtoms)
|
||||
}
|
||||
f.writeUsed(b, int64(handle))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
handle = Handle(f.extend(b))
|
||||
return
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// checkLeft returns the atom size of a free bleck left adjacent to block @atom.
|
||||
// If that block is not free the returned size is 0.
|
||||
func (f *File) checkLeft(atom int64) (size int64) {
|
||||
if atom <= f.canfree {
|
||||
return
|
||||
}
|
||||
|
||||
b := make([]byte, 7)
|
||||
fp := atom << 4
|
||||
f.read(b[:1], fp-1)
|
||||
switch last := b[0]; {
|
||||
case last <= 0xfd:
|
||||
// used block
|
||||
case last == 0xfe:
|
||||
f.read(b, fp-8)
|
||||
(*Handle)(&size).Get(b)
|
||||
case last == 0xff:
|
||||
size = 1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// getInfo returns the block @atom type and size.
|
||||
func (f *File) getInfo(atom int64) (pref byte, size int64) {
|
||||
b := make([]byte, 7)
|
||||
fp := atom << 4
|
||||
f.read(b[:1], fp)
|
||||
switch pref = b[0]; {
|
||||
case pref == 0: // Empty used
|
||||
size = 1
|
||||
case pref >= 1 && pref <= 237: // Short
|
||||
size = rq2Atoms(int(pref))
|
||||
case pref >= 0xee && pref <= 0xfb: // Short esc
|
||||
size = rq2Atoms(15 + 16*int(pref-0xee))
|
||||
case pref == 0xfc: // Long
|
||||
f.read(b[:2], fp+1)
|
||||
n := int(b[0])<<8 + int(b[1])
|
||||
switch {
|
||||
default:
|
||||
panic(&ECorrupted{f.f.Name(), fp + 1})
|
||||
case n >= 238 && n <= 61680: // Long non esc
|
||||
size = rq2Atoms(n)
|
||||
case n >= 61681: // Long esc
|
||||
size = rq2Atoms(13 + 16*(n-0xf0f1))
|
||||
}
|
||||
case pref == 0xfd: // reloc
|
||||
size = 1
|
||||
case pref == 0xfe:
|
||||
f.read(b, fp+15)
|
||||
(*Handle)(&size).Get(b)
|
||||
case pref == 0xff:
|
||||
size = 1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// getSize returns the atom size of the block @atom and wheter it is free.
|
||||
func (f *File) getSize(atom int64) (size int64, isFree bool) {
|
||||
var typ byte
|
||||
typ, size = f.getInfo(atom)
|
||||
isFree = typ >= 0xfe
|
||||
return
|
||||
}
|
||||
|
||||
// checkRight returns the atom size of a free bleck right adjacent to block @atom,atoms.
|
||||
// If that block is not free the returned size is 0.
|
||||
func (f *File) checkRight(atom, atoms int64) (size int64) {
|
||||
if atom+atoms >= f.atoms {
|
||||
return
|
||||
}
|
||||
|
||||
if sz, free := f.getSize(atom + atoms); free {
|
||||
size = sz
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// delFree removes the atoms@atom free block from the free block list
|
||||
func (f *File) delFree(atom, atoms int64) {
|
||||
b := make([]byte, 15)
|
||||
size := int(atoms)
|
||||
if n := len(f.freetab); atoms >= int64(n) {
|
||||
size = n - 1
|
||||
}
|
||||
fp := atom << 4
|
||||
f.read(b[1:], fp+1)
|
||||
var prev, next Handle
|
||||
prev.Get(b[1:])
|
||||
next.Get(b[8:])
|
||||
|
||||
switch {
|
||||
case prev == 0 && next != 0:
|
||||
next.Put(b)
|
||||
f.write(b[:7], int64(32+3+7+(size-1)*14))
|
||||
f.write(zero7, int64(next)<<4+1)
|
||||
f.freetab[size] = int64(next)
|
||||
case prev != 0 && next == 0:
|
||||
f.write(zero7, int64(prev)<<4+8)
|
||||
case prev != 0 && next != 0:
|
||||
prev.Put(b)
|
||||
f.write(b[:7], int64(next)<<4+1)
|
||||
next.Put(b)
|
||||
f.write(b[:7], int64(prev)<<4+8)
|
||||
default: // prev == 0 && next == 0:
|
||||
f.write(zero7, int64(32+3+7+(size-1)*14))
|
||||
f.freetab[size] = 0
|
||||
}
|
||||
}
|
||||
|
||||
// addFree adds atoms@atom to the free block lists and marks it free.
|
||||
func (f *File) addFree(atom, atoms int64) {
|
||||
b := make([]byte, 7)
|
||||
size := int(atoms)
|
||||
if n := len(f.freetab); atoms >= int64(n) {
|
||||
size = n - 1
|
||||
}
|
||||
head := f.freetab[size]
|
||||
if head == 0 { // empty list
|
||||
f.makeFree(0, atom, atoms, 0)
|
||||
Handle(atom).Put(b)
|
||||
f.write(b, int64(32+3+7+(size-1)*14))
|
||||
f.freetab[size] = atom
|
||||
return
|
||||
}
|
||||
|
||||
Handle(atom).Put(b)
|
||||
f.write(b, head<<4+1) // head.prev = atom
|
||||
f.makeFree(0, atom, atoms, head) // atom.next = head
|
||||
f.write(b, int64(32+3+7+(size-1)*14))
|
||||
f.freetab[size] = atom
|
||||
}
|
||||
|
||||
// makeFree sets up the content of a free block atoms@atom, fills the prev and next links.
|
||||
func (f *File) makeFree(prev, atom, atoms, next int64) {
|
||||
b := make([]byte, 23)
|
||||
fp := atom << 4
|
||||
if atoms == 1 {
|
||||
b[0] = 0xff
|
||||
Handle(prev).Put(b[1:])
|
||||
Handle(next).Put(b[8:])
|
||||
b[15] = 0xff
|
||||
f.write(b[:16], fp)
|
||||
return
|
||||
}
|
||||
|
||||
b[0] = 0xfe
|
||||
Handle(prev).Put(b[1:])
|
||||
Handle(next).Put(b[8:])
|
||||
Handle(atoms).Put(b[15:])
|
||||
f.write(b[:22], fp)
|
||||
b[22] = 0xfe
|
||||
f.write(b[15:], fp+atoms<<4-8)
|
||||
}
|
||||
|
||||
// Read reads and return the data associated with handle and an error if any.
|
||||
// Passing an invalid handle to Read may return invalid data without error.
|
||||
// It's like getting garbage via passing an invalid pointer to C.memcopy().
|
||||
func (f *File) Read(handle Handle) (b []byte, err error) {
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
b = nil
|
||||
err = e.(error)
|
||||
}
|
||||
}()
|
||||
|
||||
switch handle {
|
||||
case 0:
|
||||
panic(ENullHandle(f.f.Name()))
|
||||
case 2:
|
||||
panic(&EHandle{f.f.Name(), handle})
|
||||
default:
|
||||
b, _ = f.readUsed(int64(handle))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Free frees space associated with handle and returns an error if any. Passing an invalid
|
||||
// handle to Free or reusing handle afterwards will probably corrupt the database or provide
|
||||
// invalid data on Read. It's like corrupting memory via passing an invalid pointer to C.free()
|
||||
// or reusing that pointer.
|
||||
func (f *File) Free(handle Handle) (err error) {
|
||||
return storage.Mutate(f.Accessor(), func() (err error) {
|
||||
atom := int64(handle)
|
||||
atoms, isFree := f.getSize(atom)
|
||||
if isFree || atom < f.canfree {
|
||||
return &EHandle{f.f.Name(), handle}
|
||||
}
|
||||
|
||||
leftFree, rightFree := f.checkLeft(atom), f.checkRight(atom, atoms)
|
||||
switch {
|
||||
case leftFree != 0 && rightFree != 0:
|
||||
f.delFree(atom-leftFree, leftFree)
|
||||
f.delFree(atom+atoms, rightFree)
|
||||
f.addFree(atom-leftFree, leftFree+atoms+rightFree)
|
||||
case leftFree != 0 && rightFree == 0:
|
||||
f.delFree(atom-leftFree, leftFree)
|
||||
if atom+atoms == f.atoms { // the left free neighbour and this block together are an empy tail
|
||||
f.atoms = atom - leftFree
|
||||
f.f.Truncate(f.atoms << 4)
|
||||
return
|
||||
}
|
||||
|
||||
f.addFree(atom-leftFree, leftFree+atoms)
|
||||
case leftFree == 0 && rightFree != 0:
|
||||
f.delFree(atom+atoms, rightFree)
|
||||
f.addFree(atom, atoms+rightFree)
|
||||
default: // leftFree == 0 && rightFree == 0
|
||||
if atom+atoms < f.atoms { // isolated inner block
|
||||
f.addFree(atom, atoms)
|
||||
return
|
||||
}
|
||||
|
||||
f.f.Truncate(atom << 4) // isolated tail block, shrink file
|
||||
f.atoms = atom
|
||||
}
|
||||
return
|
||||
})
|
||||
}
|
||||
|
||||
// Realloc reallocates space associted with handle to acomodate b, returns the newhandle
|
||||
// newly associated with b and an error if any. If keepHandle == true then Realloc guarantees
|
||||
// newhandle == handle even if the new data are larger then the previous content associated
|
||||
// with handle. If !keepHandle && newhandle != handle then reusing handle will probably corrupt
|
||||
// the database.
|
||||
// The above effects are like corrupting memory/data via passing an invalid pointer to C.realloc().
|
||||
func (f *File) Realloc(handle Handle, b []byte, keepHandle bool) (newhandle Handle, err error) {
|
||||
err = storage.Mutate(f.Accessor(), func() (err error) {
|
||||
switch handle {
|
||||
case 0, 2:
|
||||
return &EHandle{f.f.Name(), handle}
|
||||
case 1:
|
||||
keepHandle = true
|
||||
}
|
||||
newhandle = handle
|
||||
atom, newatoms := int64(handle), rq2Atoms(len(b))
|
||||
if newatoms > 3856 {
|
||||
return &EBadRequest{f.f.Name(), len(b)}
|
||||
}
|
||||
|
||||
typ, oldatoms := f.getInfo(atom)
|
||||
switch {
|
||||
default:
|
||||
return &ECorrupted{f.f.Name(), atom << 4}
|
||||
case typ <= 0xfc: // non relocated used block
|
||||
switch {
|
||||
case newatoms == oldatoms: // in place replace
|
||||
f.writeUsed(b, atom)
|
||||
case newatoms < oldatoms: // in place shrink
|
||||
rightFree := f.checkRight(atom, oldatoms)
|
||||
if rightFree > 0 { // right join
|
||||
f.delFree(atom+oldatoms, rightFree)
|
||||
}
|
||||
f.addFree(atom+newatoms, oldatoms+rightFree-newatoms)
|
||||
f.writeUsed(b, atom)
|
||||
case newatoms > oldatoms:
|
||||
if rightFree := f.checkRight(atom, oldatoms); rightFree > 0 && newatoms <= oldatoms+rightFree {
|
||||
f.delFree(atom+oldatoms, rightFree)
|
||||
if newatoms < oldatoms+rightFree {
|
||||
f.addFree(atom+newatoms, oldatoms+rightFree-newatoms)
|
||||
}
|
||||
f.writeUsed(b, atom)
|
||||
return
|
||||
}
|
||||
|
||||
if !keepHandle {
|
||||
f.Free(Handle(atom))
|
||||
newhandle, err = f.Alloc(b)
|
||||
return
|
||||
}
|
||||
|
||||
// reloc
|
||||
newatom, e := f.Alloc(b)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
|
||||
buf := make([]byte, 16)
|
||||
buf[0] = 0xfd
|
||||
Handle(newatom).Put(buf[1:])
|
||||
f.Realloc(Handle(atom), buf[1:], true)
|
||||
f.write(buf[:1], atom<<4)
|
||||
}
|
||||
case typ == 0xfd: // reloc
|
||||
var target Handle
|
||||
buf := make([]byte, 7)
|
||||
f.read(buf, atom<<4+1)
|
||||
target.Get(buf)
|
||||
switch {
|
||||
case newatoms == 1:
|
||||
f.writeUsed(b, atom)
|
||||
f.Free(target)
|
||||
default:
|
||||
if rightFree := f.checkRight(atom, 1); rightFree > 0 && newatoms <= 1+rightFree {
|
||||
f.delFree(atom+1, rightFree)
|
||||
if newatoms < 1+rightFree {
|
||||
f.addFree(atom+newatoms, 1+rightFree-newatoms)
|
||||
}
|
||||
f.writeUsed(b, atom)
|
||||
f.Free(target)
|
||||
return
|
||||
}
|
||||
|
||||
newtarget, e := f.Realloc(Handle(target), b, false)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
|
||||
if newtarget != target {
|
||||
Handle(newtarget).Put(buf)
|
||||
f.write(buf, atom<<4+1)
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Lock locks f for writing. If the lock is already locked for reading or writing,
|
||||
// Lock blocks until the lock is available. To ensure that the lock eventually becomes available,
|
||||
// a blocked Lock call excludes new readers from acquiring the lock.
|
||||
func (f *File) Lock() {
|
||||
f.rwm.Lock()
|
||||
}
|
||||
|
||||
// RLock locks f for reading. If the lock is already locked for writing or there is a writer
|
||||
// already waiting to release the lock, RLock blocks until the writer has released the lock.
|
||||
func (f *File) RLock() {
|
||||
f.rwm.RLock()
|
||||
}
|
||||
|
||||
// Unlock unlocks f for writing. It is a run-time error if f is not locked for writing on entry to Unlock.
|
||||
//
|
||||
// As with Mutexes, a locked RWMutex is not associated with a particular goroutine.
|
||||
// One goroutine may RLock (Lock) f and then arrange for another goroutine to RUnlock (Unlock) it.
|
||||
func (f *File) Unlock() {
|
||||
f.rwm.Unlock()
|
||||
}
|
||||
|
||||
// RUnlock undoes a single RLock call; it does not affect other simultaneous readers.
|
||||
// It is a run-time error if f is not locked for reading on entry to RUnlock.
|
||||
func (f *File) RUnlock() {
|
||||
f.rwm.RUnlock()
|
||||
}
|
||||
|
||||
// LockedAlloc wraps Alloc in a Lock/Unlock pair.
|
||||
func (f *File) LockedAlloc(b []byte) (handle Handle, err error) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
return f.Alloc(b)
|
||||
}
|
||||
|
||||
// LockedFree wraps Free in a Lock/Unlock pair.
|
||||
func (f *File) LockedFree(handle Handle) (err error) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
return f.Free(handle)
|
||||
}
|
||||
|
||||
// LockedRead wraps Read in a RLock/RUnlock pair.
|
||||
func (f *File) LockedRead(handle Handle) (b []byte, err error) {
|
||||
f.RLock()
|
||||
defer f.RUnlock()
|
||||
return f.Read(handle)
|
||||
}
|
||||
|
||||
// LockedRealloc wraps Realloc in a Lock/Unlock pair.
|
||||
func (f *File) LockedRealloc(handle Handle, b []byte, keepHandle bool) (newhandle Handle, err error) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
return f.Realloc(handle, b, keepHandle)
|
||||
}
|
15
vendor/github.com/cznic/fileutil/falloc/test_deps.go
generated
vendored
Normal file
15
vendor/github.com/cznic/fileutil/falloc/test_deps.go
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// blame: jnml, labs.nic.cz
|
||||
|
||||
package falloc
|
||||
|
||||
// Pull test dependencies too.
|
||||
// Enables easy 'go test X' after 'go get X'
|
||||
import (
|
||||
_ "github.com/cznic/fileutil"
|
||||
_ "github.com/cznic/fileutil/storage"
|
||||
_ "github.com/cznic/mathutil"
|
||||
)
|
223
vendor/github.com/cznic/fileutil/fileutil.go
generated
vendored
Normal file
223
vendor/github.com/cznic/fileutil/fileutil.go
generated
vendored
Normal file
@ -0,0 +1,223 @@
|
||||
// Copyright (c) 2014 The fileutil Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package fileutil collects some file utility functions.
|
||||
package fileutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// GoMFile is a concurrent access safe version of MFile.
|
||||
type GoMFile struct {
|
||||
mfile *MFile
|
||||
mutex sync.Mutex
|
||||
}
|
||||
|
||||
// NewGoMFile return a newly created GoMFile.
|
||||
func NewGoMFile(fname string, flag int, perm os.FileMode, delta_ns int64) (m *GoMFile, err error) {
|
||||
m = &GoMFile{}
|
||||
if m.mfile, err = NewMFile(fname, flag, perm, delta_ns); err != nil {
|
||||
m = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (m *GoMFile) File() (file *os.File, err error) {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
return m.mfile.File()
|
||||
}
|
||||
|
||||
func (m *GoMFile) SetChanged() {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
m.mfile.SetChanged()
|
||||
}
|
||||
|
||||
func (m *GoMFile) SetHandler(h MFileHandler) {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
m.mfile.SetHandler(h)
|
||||
}
|
||||
|
||||
// MFileHandler resolves modifications of File.
|
||||
// Possible File context is expected to be a part of the handler's closure.
|
||||
type MFileHandler func(*os.File) error
|
||||
|
||||
// MFile represents an os.File with a guard/handler on change/modification.
|
||||
// Example use case is an app with a configuration file which can be modified at any time
|
||||
// and have to be reloaded in such event prior to performing something configurable by that
|
||||
// file. The checks are made only on access to the MFile file by
|
||||
// File() and a time threshold/hysteresis value can be chosen on creating a new MFile.
|
||||
type MFile struct {
|
||||
file *os.File
|
||||
handler MFileHandler
|
||||
t0 int64
|
||||
delta int64
|
||||
ctime int64
|
||||
}
|
||||
|
||||
// NewMFile returns a newly created MFile or Error if any.
|
||||
// The fname, flag and perm parameters have the same meaning as in os.Open.
|
||||
// For meaning of the delta_ns parameter please see the (m *MFile) File() docs.
|
||||
func NewMFile(fname string, flag int, perm os.FileMode, delta_ns int64) (m *MFile, err error) {
|
||||
m = &MFile{}
|
||||
m.t0 = time.Now().UnixNano()
|
||||
if m.file, err = os.OpenFile(fname, flag, perm); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var fi os.FileInfo
|
||||
if fi, err = m.file.Stat(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
m.ctime = fi.ModTime().UnixNano()
|
||||
m.delta = delta_ns
|
||||
runtime.SetFinalizer(m, func(m *MFile) {
|
||||
m.file.Close()
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// SetChanged forces next File() to unconditionally handle modification of the wrapped os.File.
|
||||
func (m *MFile) SetChanged() {
|
||||
m.ctime = -1
|
||||
}
|
||||
|
||||
// SetHandler sets a function to be invoked when modification of MFile is to be processed.
|
||||
func (m *MFile) SetHandler(h MFileHandler) {
|
||||
m.handler = h
|
||||
}
|
||||
|
||||
// File returns an os.File from MFile. If time elapsed between the last invocation of this function
|
||||
// and now is at least delta_ns ns (a parameter of NewMFile) then the file is checked for
|
||||
// change/modification. For delta_ns == 0 the modification is checked w/o getting os.Time().
|
||||
// If a change is detected a handler is invoked on the MFile file.
|
||||
// Any of these steps can produce an Error. If that happens the function returns nil, Error.
|
||||
func (m *MFile) File() (file *os.File, err error) {
|
||||
var now int64
|
||||
|
||||
mustCheck := m.delta == 0
|
||||
if !mustCheck {
|
||||
now = time.Now().UnixNano()
|
||||
mustCheck = now-m.t0 > m.delta
|
||||
}
|
||||
|
||||
if mustCheck { // check interval reached
|
||||
var fi os.FileInfo
|
||||
if fi, err = m.file.Stat(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if fi.ModTime().UnixNano() != m.ctime { // modification detected
|
||||
if m.handler == nil {
|
||||
return nil, fmt.Errorf("no handler set for modified file %q", m.file.Name())
|
||||
}
|
||||
if err = m.handler(m.file); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
m.ctime = fi.ModTime().UnixNano()
|
||||
}
|
||||
m.t0 = now
|
||||
}
|
||||
|
||||
return m.file, nil
|
||||
}
|
||||
|
||||
// Read reads buf from r. It will either fill the full buf or fail.
|
||||
// It wraps the functionality of an io.Reader which may return less bytes than requested,
|
||||
// but may block if not all data are ready for the io.Reader.
|
||||
func Read(r io.Reader, buf []byte) (err error) {
|
||||
have := 0
|
||||
remain := len(buf)
|
||||
got := 0
|
||||
for remain > 0 {
|
||||
if got, err = r.Read(buf[have:]); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
remain -= got
|
||||
have += got
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// "os" and/or "syscall" extensions
|
||||
|
||||
// FadviseAdvice is used by Fadvise.
|
||||
type FadviseAdvice int
|
||||
|
||||
// FAdviseAdvice values.
|
||||
const (
|
||||
// $ grep FADV /usr/include/bits/fcntl.h
|
||||
POSIX_FADV_NORMAL FadviseAdvice = iota // No further special treatment.
|
||||
POSIX_FADV_RANDOM // Expect random page references.
|
||||
POSIX_FADV_SEQUENTIAL // Expect sequential page references.
|
||||
POSIX_FADV_WILLNEED // Will need these pages.
|
||||
POSIX_FADV_DONTNEED // Don't need these pages.
|
||||
POSIX_FADV_NOREUSE // Data will be accessed once.
|
||||
)
|
||||
|
||||
// TempFile creates a new temporary file in the directory dir with a name
|
||||
// ending with suffix, basename starting with prefix, opens the file for
|
||||
// reading and writing, and returns the resulting *os.File. If dir is the
|
||||
// empty string, TempFile uses the default directory for temporary files (see
|
||||
// os.TempDir). Multiple programs calling TempFile simultaneously will not
|
||||
// choose the same file. The caller can use f.Name() to find the pathname of
|
||||
// the file. It is the caller's responsibility to remove the file when no
|
||||
// longer needed.
|
||||
//
|
||||
// NOTE: This function differs from ioutil.TempFile.
|
||||
func TempFile(dir, prefix, suffix string) (f *os.File, err error) {
|
||||
if dir == "" {
|
||||
dir = os.TempDir()
|
||||
}
|
||||
|
||||
nconflict := 0
|
||||
for i := 0; i < 10000; i++ {
|
||||
name := filepath.Join(dir, prefix+nextInfix()+suffix)
|
||||
f, err = os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
|
||||
if os.IsExist(err) {
|
||||
if nconflict++; nconflict > 10 {
|
||||
rand = reseed()
|
||||
}
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Random number state.
|
||||
// We generate random temporary file names so that there's a good
|
||||
// chance the file doesn't exist yet - keeps the number of tries in
|
||||
// TempFile to a minimum.
|
||||
var rand uint32
|
||||
var randmu sync.Mutex
|
||||
|
||||
func reseed() uint32 {
|
||||
return uint32(time.Now().UnixNano() + int64(os.Getpid()))
|
||||
}
|
||||
|
||||
func nextInfix() string {
|
||||
randmu.Lock()
|
||||
r := rand
|
||||
if r == 0 {
|
||||
r = reseed()
|
||||
}
|
||||
r = r*1664525 + 1013904223 // constants from Numerical Recipes
|
||||
rand = r
|
||||
randmu.Unlock()
|
||||
return strconv.Itoa(int(1e9 + r%1e9))[1:]
|
||||
}
|
25
vendor/github.com/cznic/fileutil/fileutil_arm.go
generated
vendored
Normal file
25
vendor/github.com/cznic/fileutil/fileutil_arm.go
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
// Copyright (c) 2014 The fileutil Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package fileutil
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// PunchHole deallocates space inside a file in the byte range starting at
|
||||
// offset and continuing for len bytes. Not supported on ARM.
|
||||
func PunchHole(f *os.File, off, len int64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fadvise predeclares an access pattern for file data. See also 'man 2
|
||||
// posix_fadvise'. Not supported on ARM.
|
||||
func Fadvise(f *os.File, off, len int64, advice FadviseAdvice) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsEOF reports whether err is an EOF condition.
|
||||
func IsEOF(err error) bool { return err == io.EOF }
|
25
vendor/github.com/cznic/fileutil/fileutil_darwin.go
generated
vendored
Normal file
25
vendor/github.com/cznic/fileutil/fileutil_darwin.go
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
// Copyright (c) 2014 The fileutil Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package fileutil
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// PunchHole deallocates space inside a file in the byte range starting at
|
||||
// offset and continuing for len bytes. Not supported on OSX.
|
||||
func PunchHole(f *os.File, off, len int64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fadvise predeclares an access pattern for file data. See also 'man 2
|
||||
// posix_fadvise'. Not supported on OSX.
|
||||
func Fadvise(f *os.File, off, len int64, advice FadviseAdvice) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsEOF reports whether err is an EOF condition.
|
||||
func IsEOF(err error) bool { return err == io.EOF }
|
27
vendor/github.com/cznic/fileutil/fileutil_freebsd.go
generated
vendored
Normal file
27
vendor/github.com/cznic/fileutil/fileutil_freebsd.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
// Copyright (c) 2014 The fileutil Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !arm
|
||||
|
||||
package fileutil
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// PunchHole deallocates space inside a file in the byte range starting at
|
||||
// offset and continuing for len bytes. Unimplemented on FreeBSD.
|
||||
func PunchHole(f *os.File, off, len int64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fadvise predeclares an access pattern for file data. See also 'man 2
|
||||
// posix_fadvise'. Unimplemented on FreeBSD.
|
||||
func Fadvise(f *os.File, off, len int64, advice FadviseAdvice) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsEOF reports whether err is an EOF condition.
|
||||
func IsEOF(err error) bool { return err == io.EOF }
|
96
vendor/github.com/cznic/fileutil/fileutil_linux.go
generated
vendored
Normal file
96
vendor/github.com/cznic/fileutil/fileutil_linux.go
generated
vendored
Normal file
@ -0,0 +1,96 @@
|
||||
// Copyright (c) 2014 The fileutil Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !arm
|
||||
|
||||
package fileutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strconv"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func n(s []byte) byte {
|
||||
for i, c := range s {
|
||||
if c < '0' || c > '9' {
|
||||
s = s[:i]
|
||||
break
|
||||
}
|
||||
}
|
||||
v, _ := strconv.Atoi(string(s))
|
||||
return byte(v)
|
||||
}
|
||||
|
||||
func init() {
|
||||
b, err := ioutil.ReadFile("/proc/sys/kernel/osrelease")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
tokens := bytes.Split(b, []byte("."))
|
||||
if len(tokens) > 3 {
|
||||
tokens = tokens[:3]
|
||||
}
|
||||
switch len(tokens) {
|
||||
case 3:
|
||||
// Supported since kernel 2.6.38
|
||||
if bytes.Compare([]byte{n(tokens[0]), n(tokens[1]), n(tokens[2])}, []byte{2, 6, 38}) < 0 {
|
||||
puncher = func(*os.File, int64, int64) error { return nil }
|
||||
}
|
||||
case 2:
|
||||
if bytes.Compare([]byte{n(tokens[0]), n(tokens[1])}, []byte{2, 7}) < 0 {
|
||||
puncher = func(*os.File, int64, int64) error { return nil }
|
||||
}
|
||||
default:
|
||||
puncher = func(*os.File, int64, int64) error { return nil }
|
||||
}
|
||||
}
|
||||
|
||||
var puncher = func(f *os.File, off, len int64) error {
|
||||
const (
|
||||
/*
|
||||
/usr/include/linux$ grep FL_ falloc.h
|
||||
*/
|
||||
_FALLOC_FL_KEEP_SIZE = 0x01 // default is extend size
|
||||
_FALLOC_FL_PUNCH_HOLE = 0x02 // de-allocates range
|
||||
)
|
||||
|
||||
_, _, errno := syscall.Syscall6(
|
||||
syscall.SYS_FALLOCATE,
|
||||
uintptr(f.Fd()),
|
||||
uintptr(_FALLOC_FL_KEEP_SIZE|_FALLOC_FL_PUNCH_HOLE),
|
||||
uintptr(off),
|
||||
uintptr(len),
|
||||
0, 0)
|
||||
if errno != 0 {
|
||||
return os.NewSyscallError("SYS_FALLOCATE", errno)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PunchHole deallocates space inside a file in the byte range starting at
|
||||
// offset and continuing for len bytes. No-op for kernels < 2.6.38 (or < 2.7).
|
||||
func PunchHole(f *os.File, off, len int64) error {
|
||||
return puncher(f, off, len)
|
||||
}
|
||||
|
||||
// Fadvise predeclares an access pattern for file data. See also 'man 2
|
||||
// posix_fadvise'.
|
||||
func Fadvise(f *os.File, off, len int64, advice FadviseAdvice) error {
|
||||
_, _, errno := syscall.Syscall6(
|
||||
syscall.SYS_FADVISE64,
|
||||
uintptr(f.Fd()),
|
||||
uintptr(off),
|
||||
uintptr(len),
|
||||
uintptr(advice),
|
||||
0, 0)
|
||||
return os.NewSyscallError("SYS_FADVISE64", errno)
|
||||
}
|
||||
|
||||
// IsEOF reports whether err is an EOF condition.
|
||||
func IsEOF(err error) bool { return err == io.EOF }
|
27
vendor/github.com/cznic/fileutil/fileutil_netbsd.go
generated
vendored
Normal file
27
vendor/github.com/cznic/fileutil/fileutil_netbsd.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
// Copyright (c) 2014 The fileutil Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !arm
|
||||
|
||||
package fileutil
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// PunchHole deallocates space inside a file in the byte range starting at
|
||||
// offset and continuing for len bytes. Similar to FreeBSD, this is
|
||||
// unimplemented.
|
||||
func PunchHole(f *os.File, off, len int64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unimplemented on NetBSD.
|
||||
func Fadvise(f *os.File, off, len int64, advice FadviseAdvice) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsEOF reports whether err is an EOF condition.
|
||||
func IsEOF(err error) bool { return err == io.EOF }
|
25
vendor/github.com/cznic/fileutil/fileutil_openbsd.go
generated
vendored
Normal file
25
vendor/github.com/cznic/fileutil/fileutil_openbsd.go
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
// Copyright (c) 2014 The fileutil Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package fileutil
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// PunchHole deallocates space inside a file in the byte range starting at
|
||||
// offset and continuing for len bytes. Similar to FreeBSD, this is
|
||||
// unimplemented.
|
||||
func PunchHole(f *os.File, off, len int64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unimplemented on OpenBSD.
|
||||
func Fadvise(f *os.File, off, len int64, advice FadviseAdvice) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsEOF reports whether err is an EOF condition.
|
||||
func IsEOF(err error) bool { return err == io.EOF }
|
25
vendor/github.com/cznic/fileutil/fileutil_plan9.go
generated
vendored
Normal file
25
vendor/github.com/cznic/fileutil/fileutil_plan9.go
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
// Copyright (c) 2014 The fileutil Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package fileutil
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// PunchHole deallocates space inside a file in the byte range starting at
|
||||
// offset and continuing for len bytes. Unimplemented on Plan 9.
|
||||
func PunchHole(f *os.File, off, len int64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fadvise predeclares an access pattern for file data. See also 'man 2
|
||||
// posix_fadvise'. Unimplemented on Plan 9.
|
||||
func Fadvise(f *os.File, off, len int64, advice FadviseAdvice) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsEOF reports whether err is an EOF condition.
|
||||
func IsEOF(err error) bool { return err == io.EOF }
|
27
vendor/github.com/cznic/fileutil/fileutil_solaris.go
generated
vendored
Normal file
27
vendor/github.com/cznic/fileutil/fileutil_solaris.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
// Copyright (c) 2013 jnml. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.3
|
||||
|
||||
package fileutil
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// PunchHole deallocates space inside a file in the byte range starting at
|
||||
// offset and continuing for len bytes. Not supported on Solaris.
|
||||
func PunchHole(f *os.File, off, len int64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fadvise predeclares an access pattern for file data. See also 'man 2
|
||||
// posix_fadvise'. Not supported on Solaris.
|
||||
func Fadvise(f *os.File, off, len int64, advice FadviseAdvice) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsEOF reports whether err is an EOF condition.
|
||||
func IsEOF(err error) bool { return err == io.EOF }
|
183
vendor/github.com/cznic/fileutil/fileutil_windows.go
generated
vendored
Normal file
183
vendor/github.com/cznic/fileutil/fileutil_windows.go
generated
vendored
Normal file
@ -0,0 +1,183 @@
|
||||
// Copyright (c) 2014 The fileutil Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package fileutil
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// PunchHole deallocates space inside a file in the byte range starting at
|
||||
// offset and continuing for len bytes. Not supported on Windows.
|
||||
func PunchHole(f *os.File, off, len int64) error {
|
||||
return puncher(f, off, len)
|
||||
}
|
||||
|
||||
// Fadvise predeclares an access pattern for file data. See also 'man 2
|
||||
// posix_fadvise'. Not supported on Windows.
|
||||
func Fadvise(f *os.File, off, len int64, advice FadviseAdvice) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsEOF reports whether err is an EOF condition.
|
||||
func IsEOF(err error) bool {
|
||||
if err == io.EOF {
|
||||
return true
|
||||
}
|
||||
|
||||
// http://social.technet.microsoft.com/Forums/windowsserver/en-US/1a16311b-c625-46cf-830b-6a26af488435/how-to-solve-error-38-0x26-errorhandleeof-using-fsctlgetretrievalpointers
|
||||
x, ok := err.(*os.PathError)
|
||||
return ok && x.Op == "read" && x.Err.(syscall.Errno) == 0x26
|
||||
}
|
||||
|
||||
var (
|
||||
modkernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||
|
||||
procDeviceIOControl = modkernel32.NewProc("DeviceIoControl")
|
||||
|
||||
sparseFilesMu sync.Mutex
|
||||
sparseFiles map[uintptr]struct{}
|
||||
)
|
||||
|
||||
func init() {
|
||||
// sparseFiles is an fd set for already "sparsed" files - according to
|
||||
// msdn.microsoft.com/en-us/library/windows/desktop/aa364225(v=vs.85).aspx
|
||||
// the file handles are unique per process.
|
||||
sparseFiles = make(map[uintptr]struct{})
|
||||
}
|
||||
|
||||
// puncHoleWindows punches a hole into the given file starting at offset,
|
||||
// measuring "size" bytes
|
||||
// (http://msdn.microsoft.com/en-us/library/windows/desktop/aa364597%28v=vs.85%29.aspx)
|
||||
func puncher(file *os.File, offset, size int64) error {
|
||||
if err := ensureFileSparse(file); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// http://msdn.microsoft.com/en-us/library/windows/desktop/aa364411%28v=vs.85%29.aspx
|
||||
// typedef struct _FILE_ZERO_DATA_INFORMATION {
|
||||
// LARGE_INTEGER FileOffset;
|
||||
// LARGE_INTEGER BeyondFinalZero;
|
||||
//} FILE_ZERO_DATA_INFORMATION, *PFILE_ZERO_DATA_INFORMATION;
|
||||
type fileZeroDataInformation struct {
|
||||
FileOffset, BeyondFinalZero int64
|
||||
}
|
||||
|
||||
lpInBuffer := fileZeroDataInformation{
|
||||
FileOffset: offset,
|
||||
BeyondFinalZero: offset + size}
|
||||
return deviceIOControl(false, file.Fd(), uintptr(unsafe.Pointer(&lpInBuffer)), 16)
|
||||
}
|
||||
|
||||
// // http://msdn.microsoft.com/en-us/library/windows/desktop/cc948908%28v=vs.85%29.aspx
|
||||
// type fileSetSparseBuffer struct {
|
||||
// SetSparse bool
|
||||
// }
|
||||
|
||||
func ensureFileSparse(file *os.File) (err error) {
|
||||
fd := file.Fd()
|
||||
sparseFilesMu.Lock()
|
||||
if _, ok := sparseFiles[fd]; ok {
|
||||
sparseFilesMu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
if err = deviceIOControl(true, fd, 0, 0); err == nil {
|
||||
sparseFiles[fd] = struct{}{}
|
||||
}
|
||||
sparseFilesMu.Unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
func deviceIOControl(setSparse bool, fd, inBuf, inBufLen uintptr) (err error) {
|
||||
const (
|
||||
//http://source.winehq.org/source/include/winnt.h#L4605
|
||||
file_read_data = 1
|
||||
file_write_data = 2
|
||||
|
||||
// METHOD_BUFFERED 0
|
||||
method_buffered = 0
|
||||
// FILE_ANY_ACCESS 0
|
||||
file_any_access = 0
|
||||
// FILE_DEVICE_FILE_SYSTEM 0x00000009
|
||||
file_device_file_system = 0x00000009
|
||||
// FILE_SPECIAL_ACCESS (FILE_ANY_ACCESS)
|
||||
file_special_access = file_any_access
|
||||
file_read_access = file_read_data
|
||||
file_write_access = file_write_data
|
||||
|
||||
// http://source.winehq.org/source/include/winioctl.h
|
||||
// #define CTL_CODE ( DeviceType,
|
||||
// Function,
|
||||
// Method,
|
||||
// Access )
|
||||
// ((DeviceType) << 16) | ((Access) << 14) | ((Function) << 2) | (Method)
|
||||
|
||||
// FSCTL_SET_COMPRESSION CTL_CODE(FILE_DEVICE_FILE_SYSTEM, 16, METHOD_BUFFERED, FILE_READ_DATA | FILE_WRITE_DATA)
|
||||
fsctl_set_compression = (file_device_file_system << 16) | ((file_read_access | file_write_access) << 14) | (16 << 2) | method_buffered
|
||||
// FSCTL_SET_SPARSE CTL_CODE(FILE_DEVICE_FILE_SYSTEM, 49, METHOD_BUFFERED, FILE_SPECIAL_ACCESS)
|
||||
fsctl_set_sparse = (file_device_file_system << 16) | (file_special_access << 14) | (49 << 2) | method_buffered
|
||||
// FSCTL_SET_ZERO_DATA CTL_CODE(FILE_DEVICE_FILE_SYSTEM, 50, METHOD_BUFFERED, FILE_WRITE_DATA)
|
||||
fsctl_set_zero_data = (file_device_file_system << 16) | (file_write_data << 14) | (50 << 2) | method_buffered
|
||||
)
|
||||
retPtr := uintptr(unsafe.Pointer(&(make([]byte, 8)[0])))
|
||||
var r1 uintptr
|
||||
var e1 syscall.Errno
|
||||
if setSparse {
|
||||
// BOOL
|
||||
// WINAPI
|
||||
// DeviceIoControl( (HANDLE) hDevice, // handle to a file
|
||||
// FSCTL_SET_SPARSE, // dwIoControlCode
|
||||
// (PFILE_SET_SPARSE_BUFFER) lpInBuffer, // input buffer
|
||||
// (DWORD) nInBufferSize, // size of input buffer
|
||||
// NULL, // lpOutBuffer
|
||||
// 0, // nOutBufferSize
|
||||
// (LPDWORD) lpBytesReturned, // number of bytes returned
|
||||
// (LPOVERLAPPED) lpOverlapped ); // OVERLAPPED structure
|
||||
r1, _, e1 = syscall.Syscall9(procDeviceIOControl.Addr(), 8,
|
||||
fd,
|
||||
uintptr(fsctl_set_sparse),
|
||||
// If the lpInBuffer parameter is NULL, the operation will behave the same as if the SetSparse member of the FILE_SET_SPARSE_BUFFER structure were TRUE. In other words, the operation sets the file to a sparse file.
|
||||
0, // uintptr(unsafe.Pointer(&lpInBuffer)),
|
||||
0, // 1,
|
||||
0,
|
||||
0,
|
||||
retPtr,
|
||||
0,
|
||||
0)
|
||||
} else {
|
||||
// BOOL
|
||||
// WINAPI
|
||||
// DeviceIoControl( (HANDLE) hDevice, // handle to a file
|
||||
// FSCTL_SET_ZERO_DATA, // dwIoControlCode
|
||||
// (LPVOID) lpInBuffer, // input buffer
|
||||
// (DWORD) nInBufferSize, // size of input buffer
|
||||
// NULL, // lpOutBuffer
|
||||
// 0, // nOutBufferSize
|
||||
// (LPDWORD) lpBytesReturned, // number of bytes returned
|
||||
// (LPOVERLAPPED) lpOverlapped ); // OVERLAPPED structure
|
||||
r1, _, e1 = syscall.Syscall9(procDeviceIOControl.Addr(), 8,
|
||||
fd,
|
||||
uintptr(fsctl_set_zero_data),
|
||||
inBuf,
|
||||
inBufLen,
|
||||
0,
|
||||
0,
|
||||
retPtr,
|
||||
0,
|
||||
0)
|
||||
}
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = error(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
153
vendor/github.com/cznic/fileutil/hdb/hdb.go
generated
vendored
Normal file
153
vendor/github.com/cznic/fileutil/hdb/hdb.go
generated
vendored
Normal file
@ -0,0 +1,153 @@
|
||||
// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// blame: jnml, labs.nic.cz
|
||||
|
||||
/*
|
||||
WIP: Package hdb provides a "handle"/value DB like store, but actually it's
|
||||
closer to the model of a process's virtual memory and its alloc, free and move
|
||||
methods.
|
||||
|
||||
The hdb package is a thin layer around falloc.File providing stable-only
|
||||
handles and the basic synchronizing primitives. The central functionality of
|
||||
hdb are the New, Set, Get and Delete methods of Store.
|
||||
|
||||
Conceptual analogy:
|
||||
New alloc(sizeof(content)), return new "memory" pointer (a handle).
|
||||
|
||||
Get memmove() from "memory" "pointed to" by handle to the result content.
|
||||
Note: Handle "knows" the size of its content.
|
||||
|
||||
Set memmove() from content to "memory" pointed to by handle.
|
||||
In contrast to real memory, the new content may have different
|
||||
size than the previously stored one w/o additional handling
|
||||
and the "pointer" handle remains the same.
|
||||
|
||||
Delete free() the "memory" "pointed to" by handle.
|
||||
*/
|
||||
package hdb
|
||||
|
||||
import (
|
||||
"github.com/cznic/fileutil/falloc"
|
||||
"github.com/cznic/fileutil/storage"
|
||||
)
|
||||
|
||||
type Store struct {
|
||||
f *falloc.File
|
||||
}
|
||||
|
||||
// New returns a newly created Store backed by accessor, discarding its conents if any.
|
||||
// If successful, methods on the returned Store can be used for I/O.
|
||||
// It returns the Store and an error, if any.
|
||||
func New(accessor storage.Accessor) (store *Store, err error) {
|
||||
s := &Store{}
|
||||
if s.f, err = falloc.New(accessor); err == nil {
|
||||
store = s
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Open opens the Store from accessor.
|
||||
// If successful, methods on the returned Store can be used for data exchange.
|
||||
// It returns the Store and an error, if any.
|
||||
func Open(accessor storage.Accessor) (store *Store, err error) {
|
||||
s := &Store{}
|
||||
if s.f, err = falloc.Open(accessor); err == nil {
|
||||
store = s
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Close closes the store. Further access to the store has undefined behavior and may panic.
|
||||
// It returns an error, if any.
|
||||
func (s *Store) Close() (err error) {
|
||||
defer func() {
|
||||
s.f = nil
|
||||
}()
|
||||
|
||||
return s.f.Close()
|
||||
}
|
||||
|
||||
// Delete deletes the data associated with handle.
|
||||
// It returns an error if any.
|
||||
func (s *Store) Delete(handle falloc.Handle) (err error) {
|
||||
return s.f.Free(handle)
|
||||
}
|
||||
|
||||
// Get gets the data associated with handle.
|
||||
// It returns the data and an error, if any.
|
||||
func (s *Store) Get(handle falloc.Handle) (b []byte, err error) {
|
||||
return s.f.Read(handle)
|
||||
}
|
||||
|
||||
// New associates data with a new handle.
|
||||
// It returns the handle and an error, if any.
|
||||
func (s *Store) New(b []byte) (handle falloc.Handle, err error) {
|
||||
return s.f.Alloc(b)
|
||||
}
|
||||
|
||||
// Set associates data with an existing handle.
|
||||
// It returns an error, if any.
|
||||
func (s *Store) Set(handle falloc.Handle, b []byte) (err error) {
|
||||
_, err = s.f.Realloc(handle, b, true)
|
||||
return
|
||||
}
|
||||
|
||||
// Root returns the handle of the DB root (top level directory, ...).
|
||||
func (s *Store) Root() falloc.Handle {
|
||||
return s.f.Root()
|
||||
}
|
||||
|
||||
// File returns the underlying falloc.File of 's'.
|
||||
func (s *Store) File() *falloc.File {
|
||||
return s.f
|
||||
}
|
||||
|
||||
// Lock locks 's' for writing. If the lock is already locked for reading or writing,
|
||||
// Lock blocks until the lock is available. To ensure that the lock eventually becomes available,
|
||||
// a blocked Lock call excludes new readers from acquiring the lock.
|
||||
func (s *Store) Lock() {
|
||||
s.f.Lock()
|
||||
}
|
||||
|
||||
// RLock locks 's' for reading. If the lock is already locked for writing or there is a writer
|
||||
// already waiting to release the lock, RLock blocks until the writer has released the lock.
|
||||
func (s *Store) RLock() {
|
||||
s.f.RLock()
|
||||
}
|
||||
|
||||
// Unlock unlocks 's' for writing. It's a run-time error if 's' is not locked for writing on entry to Unlock.
|
||||
//
|
||||
// As with Mutexes, a locked RWMutex is not associated with a particular goroutine.
|
||||
// One goroutine may RLock (Lock) 's' and then arrange for another goroutine to RUnlock (Unlock) it.
|
||||
func (s *Store) Unlock() {
|
||||
s.f.Unlock()
|
||||
}
|
||||
|
||||
// RUnlock undoes a single RLock call; it does not affect other simultaneous readers.
|
||||
// It's a run-time error if 's' is not locked for reading on entry to RUnlock.
|
||||
func (s *Store) RUnlock() {
|
||||
s.f.RUnlock()
|
||||
}
|
||||
|
||||
// LockedNew wraps New in a Lock/Unlock pair.
|
||||
func (s *Store) LockedNew(b []byte) (handle falloc.Handle, err error) {
|
||||
return s.f.LockedAlloc(b)
|
||||
}
|
||||
|
||||
// LockedDelete wraps Delete in a Lock/Unlock pair.
|
||||
func (s *Store) LockedDelete(handle falloc.Handle) (err error) {
|
||||
return s.f.LockedFree(handle)
|
||||
}
|
||||
|
||||
// LockedGet wraps Get in a RLock/RUnlock pair.
|
||||
func (s *Store) LockedGet(handle falloc.Handle) (b []byte, err error) {
|
||||
return s.f.LockedRead(handle)
|
||||
}
|
||||
|
||||
// LockedSet wraps Set in a Lock/Unlock pair.
|
||||
func (s *Store) LockedSet(handle falloc.Handle, b []byte) (err error) {
|
||||
_, err = s.f.Realloc(handle, b, true)
|
||||
return
|
||||
}
|
13
vendor/github.com/cznic/fileutil/hdb/test_deps.go
generated
vendored
Normal file
13
vendor/github.com/cznic/fileutil/hdb/test_deps.go
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// blame: jnml, labs.nic.cz
|
||||
|
||||
package hdb
|
||||
|
||||
// Pull test dependencies too.
|
||||
// Enables easy 'go test X' after 'go get X'
|
||||
import (
|
||||
// nothing yet
|
||||
)
|
322
vendor/github.com/cznic/fileutil/storage/cache.go
generated
vendored
Normal file
322
vendor/github.com/cznic/fileutil/storage/cache.go
generated
vendored
Normal file
@ -0,0 +1,322 @@
|
||||
// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// blame: jnml, labs.nic.cz
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
type cachepage struct {
|
||||
b [512]byte
|
||||
dirty bool
|
||||
lru *list.Element
|
||||
pi int64
|
||||
valid int // page content is b[:valid]
|
||||
}
|
||||
|
||||
func (p *cachepage) wr(b []byte, off int) (wasDirty bool) {
|
||||
copy(p.b[off:], b)
|
||||
if n := off + len(b); n > p.valid {
|
||||
p.valid = n
|
||||
}
|
||||
wasDirty = p.dirty
|
||||
p.dirty = true
|
||||
return
|
||||
}
|
||||
|
||||
func (c *Cache) rd(off int64, read bool) (p *cachepage, ok bool) {
|
||||
c.Rq++
|
||||
pi := off >> 9
|
||||
if p, ok = c.m[pi]; ok {
|
||||
c.lru.MoveToBack(p.lru)
|
||||
return
|
||||
}
|
||||
|
||||
if !read {
|
||||
return
|
||||
}
|
||||
|
||||
fp := off &^ 511
|
||||
if fp >= c.size {
|
||||
return
|
||||
}
|
||||
|
||||
rq := 512
|
||||
if fp+512 > c.size {
|
||||
rq = int(c.size - fp)
|
||||
}
|
||||
p = &cachepage{pi: pi, valid: rq}
|
||||
p.lru = c.lru.PushBack(p)
|
||||
if n, err := c.f.ReadAt(p.b[:p.valid], fp); n != rq {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
c.Load++
|
||||
if c.advise != nil {
|
||||
c.advise(fp, 512, false)
|
||||
}
|
||||
c.m[pi], ok = p, true
|
||||
return
|
||||
}
|
||||
|
||||
func (c *Cache) wr(off int64) (p *cachepage) {
|
||||
var ok bool
|
||||
if p, ok = c.rd(off, false); ok {
|
||||
return
|
||||
}
|
||||
|
||||
pi := off >> 9
|
||||
p = &cachepage{pi: pi}
|
||||
p.lru = c.lru.PushBack(p)
|
||||
c.m[pi] = p
|
||||
return
|
||||
}
|
||||
|
||||
// Cache provides caching support for another store Accessor.
|
||||
type Cache struct {
|
||||
advise func(int64, int, bool)
|
||||
clean chan bool
|
||||
cleaning int32
|
||||
close chan bool
|
||||
f Accessor
|
||||
fi *FileInfo
|
||||
lock sync.Mutex
|
||||
lru *list.List
|
||||
m map[int64]*cachepage
|
||||
maxpages int
|
||||
size int64
|
||||
sync chan bool
|
||||
wlist *list.List
|
||||
write chan bool
|
||||
writing int32
|
||||
Rq int64 // Pages requested from cache
|
||||
Load int64 // Pages loaded (cache miss)
|
||||
Purge int64 // Pages purged
|
||||
Top int // "High water" pages
|
||||
}
|
||||
|
||||
// Implementation of Accessor.
|
||||
func (c *Cache) BeginUpdate() error { return nil }
|
||||
|
||||
// Implementation of Accessor.
|
||||
func (c *Cache) EndUpdate() error { return nil }
|
||||
|
||||
// NewCache creates a caching Accessor from store with total of maxcache bytes.
|
||||
// NewCache returns the new Cache, implementing Accessor or an error if any.
|
||||
//
|
||||
// The LRU mechanism is used, so the cache tries to keep often accessed pages cached.
|
||||
//
|
||||
func NewCache(store Accessor, maxcache int64, advise func(int64, int, bool)) (c *Cache, err error) {
|
||||
var fi os.FileInfo
|
||||
if fi, err = store.Stat(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
x := maxcache >> 9
|
||||
if x > math.MaxInt32/2 {
|
||||
x = math.MaxInt32 / 2
|
||||
}
|
||||
c = &Cache{
|
||||
advise: advise,
|
||||
clean: make(chan bool, 1),
|
||||
close: make(chan bool),
|
||||
f: store,
|
||||
lru: list.New(), // front == oldest used, back == last recently used
|
||||
m: make(map[int64]*cachepage),
|
||||
maxpages: int(x),
|
||||
size: fi.Size(),
|
||||
sync: make(chan bool),
|
||||
wlist: list.New(),
|
||||
write: make(chan bool, 1),
|
||||
}
|
||||
c.fi = NewFileInfo(fi, c)
|
||||
go c.writer()
|
||||
go c.cleaner(int((int64(c.maxpages) * 95) / 100)) // hysteresis
|
||||
return
|
||||
}
|
||||
|
||||
func (c *Cache) Accessor() Accessor {
|
||||
return c.f
|
||||
}
|
||||
|
||||
func (c *Cache) Close() (err error) {
|
||||
close(c.write)
|
||||
<-c.close
|
||||
close(c.clean)
|
||||
<-c.close
|
||||
return c.f.Close()
|
||||
}
|
||||
|
||||
func (c *Cache) Name() (s string) {
|
||||
return c.f.Name()
|
||||
}
|
||||
|
||||
func (c *Cache) ReadAt(b []byte, off int64) (n int, err error) {
|
||||
po := int(off) & 0x1ff
|
||||
bp := 0
|
||||
rem := len(b)
|
||||
m := 0
|
||||
for rem != 0 {
|
||||
c.lock.Lock() // X1+
|
||||
p, ok := c.rd(off, true)
|
||||
if !ok {
|
||||
c.lock.Unlock() // X1-
|
||||
return -1, io.EOF
|
||||
}
|
||||
|
||||
rq := rem
|
||||
if po+rq > 512 {
|
||||
rq = 512 - po
|
||||
}
|
||||
if n := copy(b[bp:bp+rq], p.b[po:p.valid]); n != rq {
|
||||
c.lock.Unlock() // X1-
|
||||
return -1, io.EOF
|
||||
}
|
||||
|
||||
m = len(c.m)
|
||||
c.lock.Unlock() // X1-
|
||||
po = 0
|
||||
bp += rq
|
||||
off += int64(rq)
|
||||
rem -= rq
|
||||
n += rq
|
||||
}
|
||||
if m > c.maxpages && atomic.CompareAndSwapInt32(&c.cleaning, 0, 1) {
|
||||
if m > c.Top {
|
||||
c.Top = m
|
||||
}
|
||||
c.clean <- true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (c *Cache) Stat() (fi os.FileInfo, err error) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
return c.fi, nil
|
||||
}
|
||||
|
||||
func (c *Cache) Sync() (err error) {
|
||||
c.write <- false
|
||||
<-c.sync
|
||||
return
|
||||
}
|
||||
|
||||
func (c *Cache) Truncate(size int64) (err error) {
|
||||
c.Sync() //TODO improve (discard pages, the writer goroutine should also be aware, ...)
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
c.size = size
|
||||
return c.f.Truncate(size)
|
||||
}
|
||||
|
||||
func (c *Cache) WriteAt(b []byte, off int64) (n int, err error) {
|
||||
po := int(off) & 0x1ff
|
||||
bp := 0
|
||||
rem := len(b)
|
||||
m := 0
|
||||
for rem != 0 {
|
||||
c.lock.Lock() // X+
|
||||
p := c.wr(off)
|
||||
rq := rem
|
||||
if po+rq > 512 {
|
||||
rq = 512 - po
|
||||
}
|
||||
if wasDirty := p.wr(b[bp:bp+rq], po); !wasDirty {
|
||||
c.wlist.PushBack(p)
|
||||
}
|
||||
m = len(c.m)
|
||||
po = 0
|
||||
bp += rq
|
||||
off += int64(rq)
|
||||
if off > c.size {
|
||||
c.size = off
|
||||
}
|
||||
c.lock.Unlock() // X-
|
||||
rem -= rq
|
||||
n += rq
|
||||
}
|
||||
if atomic.CompareAndSwapInt32(&c.writing, 0, 1) {
|
||||
c.write <- true
|
||||
}
|
||||
if m > c.maxpages && atomic.CompareAndSwapInt32(&c.cleaning, 0, 1) {
|
||||
if m > c.Top {
|
||||
c.Top = m
|
||||
}
|
||||
c.clean <- true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (c *Cache) writer() {
|
||||
for ok := true; ok; {
|
||||
var wr bool
|
||||
var off int64
|
||||
wr, ok = <-c.write
|
||||
for {
|
||||
c.lock.Lock() // X1+
|
||||
item := c.wlist.Front()
|
||||
if item == nil {
|
||||
c.lock.Unlock() // X1-
|
||||
break
|
||||
}
|
||||
|
||||
p := item.Value.(*cachepage)
|
||||
off = p.pi << 9
|
||||
if n, err := c.f.WriteAt(p.b[:p.valid], off); n != p.valid {
|
||||
c.lock.Unlock() // X1-
|
||||
panic("TODO Cache.writer errchan") //TODO +errchan
|
||||
panic(err)
|
||||
}
|
||||
|
||||
p.dirty = false
|
||||
c.wlist.Remove(item)
|
||||
if c.advise != nil {
|
||||
c.advise(off, 512, true)
|
||||
}
|
||||
c.lock.Unlock() // X1-
|
||||
}
|
||||
switch {
|
||||
case wr:
|
||||
atomic.AddInt32(&c.writing, -1)
|
||||
case ok:
|
||||
c.sync <- true
|
||||
}
|
||||
}
|
||||
c.close <- true
|
||||
}
|
||||
|
||||
func (c *Cache) cleaner(limit int) {
|
||||
for _ = range c.clean {
|
||||
var item *list.Element
|
||||
for {
|
||||
c.lock.Lock() // X1+
|
||||
if len(c.m) < limit {
|
||||
c.lock.Unlock() // X1-
|
||||
break
|
||||
}
|
||||
|
||||
if item == nil {
|
||||
item = c.lru.Front()
|
||||
}
|
||||
if p := item.Value.(*cachepage); !p.dirty {
|
||||
delete(c.m, p.pi)
|
||||
c.lru.Remove(item)
|
||||
c.Purge++
|
||||
}
|
||||
item = item.Next()
|
||||
c.lock.Unlock() // X1-
|
||||
}
|
||||
atomic.AddInt32(&c.cleaning, -1)
|
||||
}
|
||||
c.close <- true
|
||||
}
|
50
vendor/github.com/cznic/fileutil/storage/file.go
generated
vendored
Normal file
50
vendor/github.com/cznic/fileutil/storage/file.go
generated
vendored
Normal file
@ -0,0 +1,50 @@
|
||||
// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// blame: jnml, labs.nic.cz
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
// FileAccessor is the concrete type returned by NewFile and OpenFile.
|
||||
type FileAccessor struct {
|
||||
*os.File
|
||||
}
|
||||
|
||||
// Implementation of Accessor.
|
||||
func (f *FileAccessor) BeginUpdate() error { return nil }
|
||||
|
||||
// Implementation of Accessor.
|
||||
func (f *FileAccessor) EndUpdate() error { return nil }
|
||||
|
||||
// NewFile returns an Accessor backed by an os.File named name, It opens the
|
||||
// named file with specified flag (os.O_RDWR etc.) and perm, (0666 etc.) if
|
||||
// applicable. If successful, methods on the returned Accessor can be used for
|
||||
// I/O. It returns the Accessor and an Error, if any.
|
||||
//
|
||||
// NOTE: The returned Accessor implements BeginUpdate and EndUpdate as a no op.
|
||||
func NewFile(name string, flag int, perm os.FileMode) (store Accessor, err error) {
|
||||
var f FileAccessor
|
||||
if f.File, err = os.OpenFile(name, flag, perm); err == nil {
|
||||
store = &f
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// OpenFile returns an Accessor backed by an existing os.File named name, It
|
||||
// opens the named file with specified flag (os.O_RDWR etc.) and perm, (0666
|
||||
// etc.) if applicable. If successful, methods on the returned Accessor can be
|
||||
// used for I/O. It returns the Accessor and an Error, if any.
|
||||
//
|
||||
// NOTE: The returned Accessor implements BeginUpdate and EndUpdate as a no op.
|
||||
func OpenFile(name string, flag int, perm os.FileMode) (store Accessor, err error) {
|
||||
var f FileAccessor
|
||||
if f.File, err = os.OpenFile(name, flag, perm); err == nil {
|
||||
store = &f
|
||||
}
|
||||
return
|
||||
}
|
161
vendor/github.com/cznic/fileutil/storage/mem.go
generated
vendored
Normal file
161
vendor/github.com/cznic/fileutil/storage/mem.go
generated
vendored
Normal file
@ -0,0 +1,161 @@
|
||||
// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// blame: jnml, labs.nic.cz
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"os"
|
||||
)
|
||||
|
||||
//TODO -> exported type w/ exported fields
|
||||
type memaccessor struct {
|
||||
f *os.File
|
||||
fi *FileInfo
|
||||
b []byte
|
||||
}
|
||||
|
||||
// Implementation of Accessor.
|
||||
func (m *memaccessor) BeginUpdate() error { return nil }
|
||||
|
||||
// Implementation of Accessor.
|
||||
func (f *memaccessor) EndUpdate() error { return nil }
|
||||
|
||||
// NewMem returns a new Accessor backed by an os.File. The returned Accessor
|
||||
// keeps all of the store content in memory. The memory and file images are
|
||||
// synced only by Sync and Close. Recomended for small amounts of data only
|
||||
// and content which may be lost on process kill/crash. NewMem return the
|
||||
// Accessor or an error of any.
|
||||
//
|
||||
// NOTE: The returned Accessor implements BeginUpdate and EndUpdate as a no op.
|
||||
func NewMem(f *os.File) (store Accessor, err error) {
|
||||
a := &memaccessor{f: f}
|
||||
if err = f.Truncate(0); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var fi os.FileInfo
|
||||
if fi, err = a.f.Stat(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
a.fi = NewFileInfo(fi, a)
|
||||
store = a
|
||||
return
|
||||
}
|
||||
|
||||
// OpenMem return a new Accessor backed by an os.File. The store content is
|
||||
// loaded from f. The returned Accessor keeps all of the store content in
|
||||
// memory. The memory and file images are synced only Sync and Close.
|
||||
// Recomended for small amounts of data only and content which may be lost on
|
||||
// process kill/crash. OpenMem return the Accessor or an error of any.
|
||||
//
|
||||
// NOTE: The returned Accessor implements BeginUpdate and EndUpdate as a no op.
|
||||
func OpenMem(f *os.File) (store Accessor, err error) {
|
||||
a := &memaccessor{f: f}
|
||||
if a.b, err = ioutil.ReadAll(a.f); err != nil {
|
||||
a.f.Close()
|
||||
return
|
||||
}
|
||||
|
||||
var fi os.FileInfo
|
||||
if fi, err = a.f.Stat(); err != nil {
|
||||
a.f.Close()
|
||||
return
|
||||
}
|
||||
|
||||
a.fi = NewFileInfo(fi, a)
|
||||
store = a
|
||||
return
|
||||
}
|
||||
|
||||
// Close implements Accessor. Specifically it synchronizes the memory and file images.
|
||||
func (a *memaccessor) Close() (err error) {
|
||||
defer func() {
|
||||
a.b = nil
|
||||
if a.f != nil {
|
||||
if e := a.f.Close(); e != nil && err == nil {
|
||||
err = e
|
||||
}
|
||||
}
|
||||
a.f = nil
|
||||
}()
|
||||
|
||||
return a.Sync()
|
||||
}
|
||||
|
||||
func (a *memaccessor) Name() string {
|
||||
return a.f.Name()
|
||||
}
|
||||
|
||||
func (a *memaccessor) ReadAt(b []byte, off int64) (n int, err error) {
|
||||
if off < 0 || off > math.MaxInt32 {
|
||||
return -1, fmt.Errorf("ReadAt: illegal offset %#x", off)
|
||||
}
|
||||
|
||||
rq, fp := len(b), int(off)
|
||||
if fp+rq > len(a.b) {
|
||||
return -1, fmt.Errorf("ReadAt: illegal rq %#x @ offset %#x, len %#x", rq, fp, len(a.b))
|
||||
}
|
||||
|
||||
copy(b, a.b[fp:])
|
||||
return
|
||||
}
|
||||
|
||||
func (a *memaccessor) Stat() (fi os.FileInfo, err error) {
|
||||
i := a.fi
|
||||
i.FSize = int64(len(a.b))
|
||||
fi = i
|
||||
return
|
||||
}
|
||||
|
||||
// Sync implements Accessor. Specifically it synchronizes the memory and file images.
|
||||
func (a *memaccessor) Sync() (err error) {
|
||||
var n int
|
||||
if n, err = a.f.WriteAt(a.b, 0); n != len(a.b) {
|
||||
return
|
||||
}
|
||||
|
||||
return a.f.Truncate(int64(len(a.b)))
|
||||
}
|
||||
|
||||
func (a *memaccessor) Truncate(size int64) (err error) {
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
err = e.(error)
|
||||
}
|
||||
}()
|
||||
|
||||
if size > math.MaxInt32 {
|
||||
panic(errors.New("truncate: illegal size"))
|
||||
}
|
||||
|
||||
a.b = a.b[:int(size)]
|
||||
return
|
||||
}
|
||||
|
||||
func (a *memaccessor) WriteAt(b []byte, off int64) (n int, err error) {
|
||||
if off < 0 || off > math.MaxInt32 {
|
||||
return -1, errors.New("WriteAt: illegal offset")
|
||||
}
|
||||
|
||||
rq, fp, size := len(b), int(off), len(a.b)
|
||||
if need := rq + fp; need > size {
|
||||
if need <= cap(a.b) {
|
||||
a.b = a.b[:need]
|
||||
} else {
|
||||
nb := make([]byte, need, 2*need)
|
||||
copy(nb, a.b)
|
||||
a.b = nb
|
||||
}
|
||||
}
|
||||
|
||||
copy(a.b[int(off):], b)
|
||||
return
|
||||
}
|
74
vendor/github.com/cznic/fileutil/storage/probe.go
generated
vendored
Normal file
74
vendor/github.com/cznic/fileutil/storage/probe.go
generated
vendored
Normal file
@ -0,0 +1,74 @@
|
||||
// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// blame: jnml, labs.nic.cz
|
||||
|
||||
package storage
|
||||
|
||||
import "sync/atomic"
|
||||
|
||||
// Probe collects usage statistics of the embeded Accessor.
|
||||
// Probe itself IS an Accessor.
|
||||
type Probe struct {
|
||||
Accessor
|
||||
Chain *Probe
|
||||
OpsRd int64
|
||||
OpsWr int64
|
||||
BytesRd int64
|
||||
BytesWr int64
|
||||
SectorsRd int64 // Assuming 512 byte sector size
|
||||
SectorsWr int64
|
||||
}
|
||||
|
||||
// NewProbe returns a newly created probe which embedes the src Accessor.
|
||||
// The retuned *Probe satisfies Accessor. if chain != nil then Reset()
|
||||
// is cascaded down the chained Probes.
|
||||
func NewProbe(src Accessor, chain *Probe) *Probe {
|
||||
return &Probe{Accessor: src, Chain: chain}
|
||||
}
|
||||
|
||||
func reset(n *int64) {
|
||||
atomic.AddInt64(n, -atomic.AddInt64(n, 0))
|
||||
}
|
||||
|
||||
// Reset zeroes the collected statistics of p.
|
||||
func (p *Probe) Reset() {
|
||||
if p.Chain != nil {
|
||||
p.Chain.Reset()
|
||||
}
|
||||
reset(&p.OpsRd)
|
||||
reset(&p.OpsWr)
|
||||
reset(&p.BytesRd)
|
||||
reset(&p.BytesWr)
|
||||
reset(&p.SectorsRd)
|
||||
reset(&p.SectorsWr)
|
||||
}
|
||||
|
||||
func (p *Probe) ReadAt(b []byte, off int64) (n int, err error) {
|
||||
n, err = p.Accessor.ReadAt(b, off)
|
||||
atomic.AddInt64(&p.OpsRd, 1)
|
||||
atomic.AddInt64(&p.BytesRd, int64(n))
|
||||
if n <= 0 {
|
||||
return
|
||||
}
|
||||
|
||||
sectorFirst := off >> 9
|
||||
sectorLast := (off + int64(n) - 1) >> 9
|
||||
atomic.AddInt64(&p.SectorsRd, sectorLast-sectorFirst+1)
|
||||
return
|
||||
}
|
||||
|
||||
func (p *Probe) WriteAt(b []byte, off int64) (n int, err error) {
|
||||
n, err = p.Accessor.WriteAt(b, off)
|
||||
atomic.AddInt64(&p.OpsWr, 1)
|
||||
atomic.AddInt64(&p.BytesWr, int64(n))
|
||||
if n <= 0 {
|
||||
return
|
||||
}
|
||||
|
||||
sectorFirst := off >> 9
|
||||
sectorLast := (off + int64(n) - 1) >> 9
|
||||
atomic.AddInt64(&p.SectorsWr, sectorLast-sectorFirst+1)
|
||||
return
|
||||
}
|
141
vendor/github.com/cznic/fileutil/storage/storage.go
generated
vendored
Normal file
141
vendor/github.com/cznic/fileutil/storage/storage.go
generated
vendored
Normal file
@ -0,0 +1,141 @@
|
||||
// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// blame: jnml, labs.nic.cz
|
||||
|
||||
// WIP: Package storage defines and implements storage providers and store accessors.
|
||||
package storage
|
||||
|
||||
import (
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// FileInfo is a type implementing os.FileInfo which has setable fields, like
|
||||
// the older os.FileInfo used to have. It is used wehere e.g. the Size is
|
||||
// needed to be faked (encapsulated/memory only file, file cache, etc.).
|
||||
type FileInfo struct {
|
||||
FName string // base name of the file
|
||||
FSize int64 // length in bytes
|
||||
FMode os.FileMode // file mode bits
|
||||
FModTime time.Time // modification time
|
||||
FIsDir bool // abbreviation for Mode().IsDir()
|
||||
sys interface{} // underlying data source (can be nil)
|
||||
}
|
||||
|
||||
// NewFileInfo creates FileInfo from os.FileInfo fi.
|
||||
func NewFileInfo(fi os.FileInfo, sys interface{}) *FileInfo {
|
||||
return &FileInfo{fi.Name(), fi.Size(), fi.Mode(), fi.ModTime(), fi.IsDir(), sys}
|
||||
}
|
||||
|
||||
// Implementation of os.FileInfo
|
||||
func (fi *FileInfo) Name() string {
|
||||
return fi.FName
|
||||
}
|
||||
|
||||
// Implementation of os.FileInfo
|
||||
func (fi *FileInfo) Size() int64 {
|
||||
return fi.FSize
|
||||
}
|
||||
|
||||
// Implementation of os.FileInfo
|
||||
func (fi *FileInfo) Mode() os.FileMode {
|
||||
return fi.FMode
|
||||
}
|
||||
|
||||
// Implementation of os.FileInfo
|
||||
func (fi *FileInfo) ModTime() time.Time {
|
||||
return fi.FModTime
|
||||
}
|
||||
|
||||
// Implementation of os.FileInfo
|
||||
func (fi *FileInfo) IsDir() bool {
|
||||
return fi.FIsDir
|
||||
}
|
||||
|
||||
func (fi *FileInfo) Sys() interface{} {
|
||||
return fi.sys
|
||||
}
|
||||
|
||||
// Accessor provides I/O methods to access a store.
|
||||
type Accessor interface {
|
||||
|
||||
// Close closes the store, rendering it unusable for I/O. It returns an
|
||||
// error, if any.
|
||||
Close() error
|
||||
|
||||
// Name returns the name of the file as presented to Open.
|
||||
Name() string
|
||||
|
||||
// ReadAt reads len(b) bytes from the store starting at byte offset off.
|
||||
// It returns the number of bytes read and the error, if any.
|
||||
// EOF is signaled by a zero count with err set to os.EOF.
|
||||
// ReadAt always returns a non-nil Error when n != len(b).
|
||||
ReadAt(b []byte, off int64) (n int, err error)
|
||||
|
||||
// Stat returns the FileInfo structure describing the store. It returns
|
||||
// the os.FileInfo and an error, if any.
|
||||
Stat() (fi os.FileInfo, err error)
|
||||
|
||||
// Sync commits the current contents of the store to stable storage.
|
||||
// Typically, this means flushing the file system's in-memory copy of
|
||||
// recently written data to disk.
|
||||
Sync() (err error)
|
||||
|
||||
// Truncate changes the size of the store. It does not change the I/O
|
||||
// offset.
|
||||
Truncate(size int64) error
|
||||
|
||||
// WriteAt writes len(b) bytes to the store starting at byte offset off.
|
||||
// It returns the number of bytes written and an error, if any.
|
||||
// WriteAt returns a non-nil Error when n != len(b).
|
||||
WriteAt(b []byte, off int64) (n int, err error)
|
||||
|
||||
// Before every [structural] change of a store the BeginUpdate is to be
|
||||
// called and paired with EndUpdate after the change makes the store's
|
||||
// state consistent again. Invocations of BeginUpdate may nest. On
|
||||
// invoking the last non nested EndUpdate an implicit "commit" should
|
||||
// be performed by the store/provider. The concrete mechanism is
|
||||
// unspecified. It could be for example a write-ahead log. Stores may
|
||||
// implement BeginUpdate and EndUpdate as a (documented) no op.
|
||||
BeginUpdate() error
|
||||
EndUpdate() error
|
||||
}
|
||||
|
||||
// Mutate is a helper/wrapper for executing f in between a.BeginUpdate and
|
||||
// a.EndUpdate. Any parameters and/or return values except an error should be
|
||||
// captured by a function literal passed as f. The returned err is either nil
|
||||
// or the first non nil error returned from the sequence of execution:
|
||||
// BeginUpdate, [f,] EndUpdate. The pair BeginUpdate/EndUpdate *is* invoked
|
||||
// always regardles of any possible errors produced. Mutate doesn't handle
|
||||
// panic, it should be used only with a function [literal] which doesn't panic.
|
||||
// Otherwise the pairing of BeginUpdate/EndUpdate is not guaranteed.
|
||||
//
|
||||
// NOTE: If BeginUpdate, which is invoked before f, returns a non-nil error,
|
||||
// then f is not invoked at all (but EndUpdate still is).
|
||||
func Mutate(a Accessor, f func() error) (err error) {
|
||||
defer func() {
|
||||
if e := a.EndUpdate(); e != nil && err == nil {
|
||||
err = e
|
||||
}
|
||||
}()
|
||||
|
||||
if err = a.BeginUpdate(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return f()
|
||||
}
|
||||
|
||||
// LockedMutate wraps Mutate in yet another layer consisting of a
|
||||
// l.Lock/l.Unlock pair. All other limitations apply as in Mutate, e.g. no
|
||||
// panics are allowed to happen - otherwise no guarantees can be made about
|
||||
// Unlock matching the Lock.
|
||||
func LockedMutate(a Accessor, l sync.Locker, f func() error) (err error) {
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
|
||||
return Mutate(a, f)
|
||||
}
|
13
vendor/github.com/cznic/fileutil/storage/test_deps.go
generated
vendored
Normal file
13
vendor/github.com/cznic/fileutil/storage/test_deps.go
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// blame: jnml, labs.nic.cz
|
||||
|
||||
package storage
|
||||
|
||||
// Pull test dependencies too.
|
||||
// Enables easy 'go test X' after 'go get X'
|
||||
import (
|
||||
// nothing yet
|
||||
)
|
13
vendor/github.com/cznic/fileutil/test_deps.go
generated
vendored
Normal file
13
vendor/github.com/cznic/fileutil/test_deps.go
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
// Copyright (c) 2014 The fileutil Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// blame: jnml, labs.nic.cz
|
||||
|
||||
package fileutil
|
||||
|
||||
// Pull test dependencies too.
|
||||
// Enables easy 'go test X' after 'go get X'
|
||||
import (
|
||||
// nothing yet
|
||||
)
|
27
vendor/github.com/cznic/mathutil/LICENSE
generated
vendored
Normal file
27
vendor/github.com/cznic/mathutil/LICENSE
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
Copyright (c) 2014 The mathutil Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the names of the authors nor the names of the
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
207
vendor/github.com/cznic/mathutil/bits.go
generated
vendored
Normal file
207
vendor/github.com/cznic/mathutil/bits.go
generated
vendored
Normal file
@ -0,0 +1,207 @@
|
||||
// Copyright (c) 2014 The mathutil Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package mathutil
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
)
|
||||
|
||||
// BitLenByte returns the bit width of the non zero part of n.
|
||||
func BitLenByte(n byte) int {
|
||||
return log2[n] + 1
|
||||
}
|
||||
|
||||
// BitLenUint16 returns the bit width of the non zero part of n.
|
||||
func BitLenUint16(n uint16) int {
|
||||
if b := n >> 8; b != 0 {
|
||||
return log2[b] + 8 + 1
|
||||
}
|
||||
|
||||
return log2[n] + 1
|
||||
}
|
||||
|
||||
// BitLenUint32 returns the bit width of the non zero part of n.
|
||||
func BitLenUint32(n uint32) int {
|
||||
if b := n >> 24; b != 0 {
|
||||
return log2[b] + 24 + 1
|
||||
}
|
||||
|
||||
if b := n >> 16; b != 0 {
|
||||
return log2[b] + 16 + 1
|
||||
}
|
||||
|
||||
if b := n >> 8; b != 0 {
|
||||
return log2[b] + 8 + 1
|
||||
}
|
||||
|
||||
return log2[n] + 1
|
||||
}
|
||||
|
||||
// BitLen returns the bit width of the non zero part of n.
|
||||
func BitLen(n int) int { // Should handle correctly [future] 64 bit Go ints
|
||||
if IntBits == 64 {
|
||||
return BitLenUint64(uint64(n))
|
||||
}
|
||||
|
||||
if b := byte(n >> 24); b != 0 {
|
||||
return log2[b] + 24 + 1
|
||||
}
|
||||
|
||||
if b := byte(n >> 16); b != 0 {
|
||||
return log2[b] + 16 + 1
|
||||
}
|
||||
|
||||
if b := byte(n >> 8); b != 0 {
|
||||
return log2[b] + 8 + 1
|
||||
}
|
||||
|
||||
return log2[byte(n)] + 1
|
||||
}
|
||||
|
||||
// BitLenUint returns the bit width of the non zero part of n.
|
||||
func BitLenUint(n uint) int { // Should handle correctly [future] 64 bit Go uints
|
||||
if IntBits == 64 {
|
||||
return BitLenUint64(uint64(n))
|
||||
}
|
||||
|
||||
if b := n >> 24; b != 0 {
|
||||
return log2[b] + 24 + 1
|
||||
}
|
||||
|
||||
if b := n >> 16; b != 0 {
|
||||
return log2[b] + 16 + 1
|
||||
}
|
||||
|
||||
if b := n >> 8; b != 0 {
|
||||
return log2[b] + 8 + 1
|
||||
}
|
||||
|
||||
return log2[n] + 1
|
||||
}
|
||||
|
||||
// BitLenUint64 returns the bit width of the non zero part of n.
|
||||
func BitLenUint64(n uint64) int {
|
||||
if b := n >> 56; b != 0 {
|
||||
return log2[b] + 56 + 1
|
||||
}
|
||||
|
||||
if b := n >> 48; b != 0 {
|
||||
return log2[b] + 48 + 1
|
||||
}
|
||||
|
||||
if b := n >> 40; b != 0 {
|
||||
return log2[b] + 40 + 1
|
||||
}
|
||||
|
||||
if b := n >> 32; b != 0 {
|
||||
return log2[b] + 32 + 1
|
||||
}
|
||||
|
||||
if b := n >> 24; b != 0 {
|
||||
return log2[b] + 24 + 1
|
||||
}
|
||||
|
||||
if b := n >> 16; b != 0 {
|
||||
return log2[b] + 16 + 1
|
||||
}
|
||||
|
||||
if b := n >> 8; b != 0 {
|
||||
return log2[b] + 8 + 1
|
||||
}
|
||||
|
||||
return log2[n] + 1
|
||||
}
|
||||
|
||||
// BitLenUintptr returns the bit width of the non zero part of n.
|
||||
func BitLenUintptr(n uintptr) int {
|
||||
if b := n >> 56; b != 0 {
|
||||
return log2[b] + 56 + 1
|
||||
}
|
||||
|
||||
if b := n >> 48; b != 0 {
|
||||
return log2[b] + 48 + 1
|
||||
}
|
||||
|
||||
if b := n >> 40; b != 0 {
|
||||
return log2[b] + 40 + 1
|
||||
}
|
||||
|
||||
if b := n >> 32; b != 0 {
|
||||
return log2[b] + 32 + 1
|
||||
}
|
||||
|
||||
if b := n >> 24; b != 0 {
|
||||
return log2[b] + 24 + 1
|
||||
}
|
||||
|
||||
if b := n >> 16; b != 0 {
|
||||
return log2[b] + 16 + 1
|
||||
}
|
||||
|
||||
if b := n >> 8; b != 0 {
|
||||
return log2[b] + 8 + 1
|
||||
}
|
||||
|
||||
return log2[n] + 1
|
||||
}
|
||||
|
||||
// PopCountByte returns population count of n (number of bits set in n).
|
||||
func PopCountByte(n byte) int {
|
||||
return int(popcnt[byte(n)])
|
||||
}
|
||||
|
||||
// PopCountUint16 returns population count of n (number of bits set in n).
|
||||
func PopCountUint16(n uint16) int {
|
||||
return int(popcnt[byte(n>>8)]) + int(popcnt[byte(n)])
|
||||
}
|
||||
|
||||
// PopCountUint32 returns population count of n (number of bits set in n).
|
||||
func PopCountUint32(n uint32) int {
|
||||
return int(popcnt[byte(n>>24)]) + int(popcnt[byte(n>>16)]) +
|
||||
int(popcnt[byte(n>>8)]) + int(popcnt[byte(n)])
|
||||
}
|
||||
|
||||
// PopCount returns population count of n (number of bits set in n).
|
||||
func PopCount(n int) int { // Should handle correctly [future] 64 bit Go ints
|
||||
if IntBits == 64 {
|
||||
return PopCountUint64(uint64(n))
|
||||
}
|
||||
|
||||
return PopCountUint32(uint32(n))
|
||||
}
|
||||
|
||||
// PopCountUint returns population count of n (number of bits set in n).
|
||||
func PopCountUint(n uint) int { // Should handle correctly [future] 64 bit Go uints
|
||||
if IntBits == 64 {
|
||||
return PopCountUint64(uint64(n))
|
||||
}
|
||||
|
||||
return PopCountUint32(uint32(n))
|
||||
}
|
||||
|
||||
// PopCountUintptr returns population count of n (number of bits set in n).
|
||||
func PopCountUintptr(n uintptr) int {
|
||||
if UintPtrBits == 64 {
|
||||
return PopCountUint64(uint64(n))
|
||||
}
|
||||
|
||||
return PopCountUint32(uint32(n))
|
||||
}
|
||||
|
||||
// PopCountUint64 returns population count of n (number of bits set in n).
|
||||
func PopCountUint64(n uint64) int {
|
||||
return int(popcnt[byte(n>>56)]) + int(popcnt[byte(n>>48)]) +
|
||||
int(popcnt[byte(n>>40)]) + int(popcnt[byte(n>>32)]) +
|
||||
int(popcnt[byte(n>>24)]) + int(popcnt[byte(n>>16)]) +
|
||||
int(popcnt[byte(n>>8)]) + int(popcnt[byte(n)])
|
||||
}
|
||||
|
||||
// PopCountBigInt returns population count of |n| (number of bits set in |n|).
|
||||
func PopCountBigInt(n *big.Int) (r int) {
|
||||
for _, v := range n.Bits() {
|
||||
r += PopCountUintptr(uintptr(v))
|
||||
}
|
||||
return
|
||||
}
|
46
vendor/github.com/cznic/mathutil/envelope.go
generated
vendored
Normal file
46
vendor/github.com/cznic/mathutil/envelope.go
generated
vendored
Normal file
@ -0,0 +1,46 @@
|
||||
// Copyright (c) 2014 The mathutil Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package mathutil
|
||||
|
||||
import (
|
||||
"math"
|
||||
)
|
||||
|
||||
// Approximation type determines approximation methods used by e.g. Envelope.
|
||||
type Approximation int
|
||||
|
||||
// Specific approximation method tags
|
||||
const (
|
||||
_ Approximation = iota
|
||||
Linear // As named
|
||||
Sinusoidal // Smooth for all derivations
|
||||
)
|
||||
|
||||
// Envelope is an utility for defining simple curves using a small (usually)
|
||||
// set of data points. Envelope returns a value defined by x, points and
|
||||
// approximation. The value of x must be in [0,1) otherwise the result is
|
||||
// undefined or the function may panic. Points are interpreted as dividing the
|
||||
// [0,1) interval in len(points)-1 sections, so len(points) must be > 1 or the
|
||||
// function may panic. According to the left and right points closing/adjacent
|
||||
// to the section the resulting value is interpolated using the chosen
|
||||
// approximation method. Unsupported values of approximation are silently
|
||||
// interpreted as 'Linear'.
|
||||
func Envelope(x float64, points []float64, approximation Approximation) float64 {
|
||||
step := 1 / float64(len(points)-1)
|
||||
fslot := math.Floor(x / step)
|
||||
mod := x - fslot*step
|
||||
slot := int(fslot)
|
||||
l, r := points[slot], points[slot+1]
|
||||
rmod := mod / step
|
||||
switch approximation {
|
||||
case Sinusoidal:
|
||||
k := (math.Sin(math.Pi*(rmod-0.5)) + 1) / 2
|
||||
return l + (r-l)*k
|
||||
case Linear:
|
||||
fallthrough
|
||||
default:
|
||||
return l + (r-l)*rmod
|
||||
}
|
||||
}
|
48
vendor/github.com/cznic/mathutil/example/example.go
generated
vendored
Normal file
48
vendor/github.com/cznic/mathutil/example/example.go
generated
vendored
Normal file
@ -0,0 +1,48 @@
|
||||
// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// blame: jnml, labs.nic.cz
|
||||
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"flag"
|
||||
"github.com/cznic/mathutil"
|
||||
"log"
|
||||
"math"
|
||||
"os"
|
||||
)
|
||||
|
||||
/*
|
||||
|
||||
$ # Usage e.g.:
|
||||
$ go run example.go -max 1024 > mathutil.dat # generate 1kB of "random" data
|
||||
|
||||
*/
|
||||
func main() {
|
||||
r, err := mathutil.NewFC32(math.MinInt32, math.MaxInt32, true)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
var mflag uint64
|
||||
flag.Uint64Var(&mflag, "max", 0, "limit output to max bytes")
|
||||
flag.Parse()
|
||||
stdout := bufio.NewWriter(os.Stdout)
|
||||
if mflag != 0 {
|
||||
for i := uint64(0); i < mflag; i++ {
|
||||
if err := stdout.WriteByte(byte(r.Next())); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
stdout.Flush()
|
||||
return
|
||||
}
|
||||
|
||||
for stdout.WriteByte(byte(r.Next())) == nil {
|
||||
}
|
||||
}
|
66
vendor/github.com/cznic/mathutil/example2/example2.go
generated
vendored
Normal file
66
vendor/github.com/cznic/mathutil/example2/example2.go
generated
vendored
Normal file
@ -0,0 +1,66 @@
|
||||
// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// blame: jnml, labs.nic.cz
|
||||
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/cznic/mathutil"
|
||||
"image"
|
||||
"image/png"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math"
|
||||
"math/rand"
|
||||
)
|
||||
|
||||
// $ go run example2.go # view rand.png and rnd.png by your favorite pic viewer
|
||||
//
|
||||
// see http://www.boallen.com/random-numbers.html
|
||||
func main() {
|
||||
sqr := image.Rect(0, 0, 511, 511)
|
||||
r, err := mathutil.NewFC32(math.MinInt32, math.MaxInt32, true)
|
||||
if err != nil {
|
||||
log.Fatal("NewFC32", err)
|
||||
}
|
||||
|
||||
img := image.NewGray(sqr)
|
||||
for y := 0; y < 512; y++ {
|
||||
for x := 0; x < 512; x++ {
|
||||
if r.Next()&1 != 0 {
|
||||
img.Set(x, y, image.White)
|
||||
}
|
||||
}
|
||||
}
|
||||
buf := bytes.NewBuffer(nil)
|
||||
if err := png.Encode(buf, img); err != nil {
|
||||
log.Fatal("Encode rnd.png ", err)
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile("rnd.png", buf.Bytes(), 0666); err != nil {
|
||||
log.Fatal("ioutil.WriteFile/rnd.png ", err)
|
||||
}
|
||||
|
||||
r2 := rand.New(rand.NewSource(0))
|
||||
img = image.NewGray(sqr)
|
||||
for y := 0; y < 512; y++ {
|
||||
for x := 0; x < 512; x++ {
|
||||
if r2.Int()&1 != 0 {
|
||||
img.Set(x, y, image.White)
|
||||
}
|
||||
}
|
||||
}
|
||||
buf = bytes.NewBuffer(nil)
|
||||
if err := png.Encode(buf, img); err != nil {
|
||||
log.Fatal("Encode rand.png ", err)
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile("rand.png", buf.Bytes(), 0666); err != nil {
|
||||
log.Fatal("ioutil.WriteFile/rand.png ", err)
|
||||
}
|
||||
}
|
43
vendor/github.com/cznic/mathutil/example3/example3.go
generated
vendored
Normal file
43
vendor/github.com/cznic/mathutil/example3/example3.go
generated
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// blame: jnml, labs.nic.cz
|
||||
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"flag"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
)
|
||||
|
||||
/*
|
||||
|
||||
$ # Usage e.g.:
|
||||
$ go run example3.go -max 1024 > rand.dat # generate 1kB of "random" data
|
||||
|
||||
*/
|
||||
func main() {
|
||||
r := rand.New(rand.NewSource(1))
|
||||
var mflag uint64
|
||||
flag.Uint64Var(&mflag, "max", 0, "limit output to max bytes")
|
||||
flag.Parse()
|
||||
stdout := bufio.NewWriter(os.Stdout)
|
||||
if mflag != 0 {
|
||||
for i := uint64(0); i < mflag; i++ {
|
||||
if err := stdout.WriteByte(byte(r.Int())); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
stdout.Flush()
|
||||
return
|
||||
}
|
||||
|
||||
for stdout.WriteByte(byte(r.Int())) == nil {
|
||||
}
|
||||
}
|
90
vendor/github.com/cznic/mathutil/example4/main.go
generated
vendored
Normal file
90
vendor/github.com/cznic/mathutil/example4/main.go
generated
vendored
Normal file
@ -0,0 +1,90 @@
|
||||
// Copyright (c) 2011 jnml. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
// Let QRN be the number of quadratic residues of N. Let Q be QRN/N. From a
|
||||
// sorted list of primorial products < 2^32 find "record breakers". "Record
|
||||
// breaker" is N with new lowest Q.
|
||||
//
|
||||
// There are only 49 "record breakers" < 2^32.
|
||||
//
|
||||
// To run the example $ go run main.go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/cznic/mathutil"
|
||||
"github.com/cznic/sortutil"
|
||||
)
|
||||
|
||||
func main() {
|
||||
pp := mathutil.PrimorialProductsUint32(0, math.MaxUint32, 32)
|
||||
sort.Sort(sortutil.Uint32Slice(pp))
|
||||
var bestN, bestD uint32 = 1, 1
|
||||
order, checks := 0, 0
|
||||
var ixDirty uint32
|
||||
m := make([]byte, math.MaxUint32>>3)
|
||||
for _, n := range pp {
|
||||
for i := range m[:ixDirty+1] {
|
||||
m[i] = 0
|
||||
}
|
||||
ixDirty = 0
|
||||
checks++
|
||||
limit0 := mathutil.QScaleUint32(n, bestN, bestD)
|
||||
if limit0 > math.MaxUint32 {
|
||||
panic(0)
|
||||
}
|
||||
limit := uint32(limit0)
|
||||
n64 := uint64(n)
|
||||
hi := n64 >> 1
|
||||
hits := uint32(0)
|
||||
check := true
|
||||
fmt.Printf("\r%10d %d/%d", n, checks, len(pp))
|
||||
t0 := time.Now()
|
||||
for i := uint64(0); i < hi; i++ {
|
||||
sq := uint32(i * i % n64)
|
||||
ix := sq >> 3
|
||||
msk := byte(1 << (sq & 7))
|
||||
if m[ix]&msk == 0 {
|
||||
hits++
|
||||
if hits >= limit {
|
||||
check = false
|
||||
break
|
||||
}
|
||||
}
|
||||
m[ix] |= msk
|
||||
if ix > ixDirty {
|
||||
ixDirty = ix
|
||||
}
|
||||
}
|
||||
|
||||
adjPrime := ".." // Composite before
|
||||
if mathutil.IsPrime(n - 1) {
|
||||
adjPrime = "P." // Prime before
|
||||
}
|
||||
switch mathutil.IsPrime(n + 1) {
|
||||
case true:
|
||||
adjPrime += "P" // Prime after
|
||||
case false:
|
||||
adjPrime += "." // Composite after
|
||||
}
|
||||
|
||||
if check && mathutil.QCmpUint32(hits, n, bestN, bestD) < 0 {
|
||||
order++
|
||||
d := time.Since(t0)
|
||||
bestN, bestD = hits, n
|
||||
q := float64(hits) / float64(n)
|
||||
fmt.Printf(
|
||||
"\r%2s #%03d %d %d %.2f %.2E %s %s %v\n",
|
||||
adjPrime, order, n, hits,
|
||||
1/q, q, d, time.Now().Format("15:04:05"), mathutil.FactorInt(n),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
83
vendor/github.com/cznic/mathutil/ff/main.go
generated
vendored
Normal file
83
vendor/github.com/cznic/mathutil/ff/main.go
generated
vendored
Normal file
@ -0,0 +1,83 @@
|
||||
// Copyright (c) jnml. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
// Factor Finder - searches for Mersenne number factors of one specific special
|
||||
// form.
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/cznic/mathutil"
|
||||
)
|
||||
|
||||
const (
|
||||
pp = 1
|
||||
pp2 = 10
|
||||
)
|
||||
|
||||
var (
|
||||
_1 = big.NewInt(1)
|
||||
_2 = big.NewInt(2)
|
||||
)
|
||||
|
||||
func main() {
|
||||
runtime.GOMAXPROCS(2)
|
||||
oClass := flag.Uint64("c", 2, `factor "class" number`)
|
||||
oDuration := flag.Duration("d", time.Second, "duration to spend on one class")
|
||||
flag.Parse()
|
||||
class := *oClass
|
||||
for class&1 != 0 {
|
||||
class >>= 1
|
||||
}
|
||||
class = mathutil.MaxUint64(class, 2)
|
||||
|
||||
for {
|
||||
c := time.After(*oDuration)
|
||||
factor := big.NewInt(0)
|
||||
factor.SetUint64(class)
|
||||
exp := big.NewInt(0)
|
||||
oneClass:
|
||||
for {
|
||||
select {
|
||||
case <-c:
|
||||
break oneClass
|
||||
default:
|
||||
}
|
||||
|
||||
exp.Set(factor)
|
||||
factor.Lsh(factor, 1)
|
||||
factor.Add(factor, _1)
|
||||
if !factor.ProbablyPrime(pp) {
|
||||
continue
|
||||
}
|
||||
|
||||
if !exp.ProbablyPrime(pp) {
|
||||
continue
|
||||
}
|
||||
|
||||
if mathutil.ModPowBigInt(_2, exp, factor).Cmp(_1) != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if !factor.ProbablyPrime(pp2) {
|
||||
continue
|
||||
}
|
||||
|
||||
if !exp.ProbablyPrime(pp2) {
|
||||
continue
|
||||
}
|
||||
|
||||
fmt.Printf("%d: %s | M%s (%d bits)\n", class, factor, exp, factor.BitLen())
|
||||
}
|
||||
|
||||
class += 2
|
||||
}
|
||||
}
|
829
vendor/github.com/cznic/mathutil/mathutil.go
generated
vendored
Normal file
829
vendor/github.com/cznic/mathutil/mathutil.go
generated
vendored
Normal file
@ -0,0 +1,829 @@
|
||||
// Copyright (c) 2014 The mathutil Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package mathutil provides utilities supplementing the standard 'math' and
|
||||
// 'math/rand' packages.
|
||||
//
|
||||
// Compatibility issues
|
||||
//
|
||||
// 2013-12-13: The following functions have been REMOVED
|
||||
//
|
||||
// func Uint64ToBigInt(n uint64) *big.Int
|
||||
// func Uint64FromBigInt(n *big.Int) (uint64, bool)
|
||||
//
|
||||
// 2013-05-13: The following functions are now DEPRECATED
|
||||
//
|
||||
// func Uint64ToBigInt(n uint64) *big.Int
|
||||
// func Uint64FromBigInt(n *big.Int) (uint64, bool)
|
||||
//
|
||||
// These functions will be REMOVED with Go release 1.1+1.
|
||||
//
|
||||
// 2013-01-21: The following functions have been REMOVED
|
||||
//
|
||||
// func MaxInt() int
|
||||
// func MinInt() int
|
||||
// func MaxUint() uint
|
||||
// func UintPtrBits() int
|
||||
//
|
||||
// They are now replaced by untyped constants
|
||||
//
|
||||
// MaxInt
|
||||
// MinInt
|
||||
// MaxUint
|
||||
// UintPtrBits
|
||||
//
|
||||
// Additionally one more untyped constant was added
|
||||
//
|
||||
// IntBits
|
||||
//
|
||||
// This change breaks any existing code depending on the above removed
|
||||
// functions. They should have not been published in the first place, that was
|
||||
// unfortunate. Instead, defining such architecture and/or implementation
|
||||
// specific integer limits and bit widths as untyped constants improves
|
||||
// performance and allows for static dead code elimination if it depends on
|
||||
// these values. Thanks to minux for pointing it out in the mail list
|
||||
// (https://groups.google.com/d/msg/golang-nuts/tlPpLW6aJw8/NT3mpToH-a4J).
|
||||
//
|
||||
// 2012-12-12: The following functions will be DEPRECATED with Go release
|
||||
// 1.0.3+1 and REMOVED with Go release 1.0.3+2, b/c of
|
||||
// http://code.google.com/p/go/source/detail?r=954a79ee3ea8
|
||||
//
|
||||
// func Uint64ToBigInt(n uint64) *big.Int
|
||||
// func Uint64FromBigInt(n *big.Int) (uint64, bool)
|
||||
package mathutil
|
||||
|
||||
import (
|
||||
"math"
|
||||
"math/big"
|
||||
)
|
||||
|
||||
// Architecture and/or implementation specific integer limits and bit widths.
|
||||
const (
|
||||
MaxInt = 1<<(IntBits-1) - 1
|
||||
MinInt = -MaxInt - 1
|
||||
MaxUint = 1<<IntBits - 1
|
||||
IntBits = 1 << (^uint(0)>>32&1 + ^uint(0)>>16&1 + ^uint(0)>>8&1 + 3)
|
||||
UintPtrBits = 1 << (^uintptr(0)>>32&1 + ^uintptr(0)>>16&1 + ^uintptr(0)>>8&1 + 3)
|
||||
)
|
||||
|
||||
var (
|
||||
_1 = big.NewInt(1)
|
||||
_2 = big.NewInt(2)
|
||||
)
|
||||
|
||||
// GCDByte returns the greatest common divisor of a and b. Based on:
|
||||
// http://en.wikipedia.org/wiki/Euclidean_algorithm#Implementations
|
||||
func GCDByte(a, b byte) byte {
|
||||
for b != 0 {
|
||||
a, b = b, a%b
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
// GCDUint16 returns the greatest common divisor of a and b.
|
||||
func GCDUint16(a, b uint16) uint16 {
|
||||
for b != 0 {
|
||||
a, b = b, a%b
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
// GCD returns the greatest common divisor of a and b.
|
||||
func GCDUint32(a, b uint32) uint32 {
|
||||
for b != 0 {
|
||||
a, b = b, a%b
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
// GCD64 returns the greatest common divisor of a and b.
|
||||
func GCDUint64(a, b uint64) uint64 {
|
||||
for b != 0 {
|
||||
a, b = b, a%b
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
// ISqrt returns floor(sqrt(n)). Typical run time is few hundreds of ns.
|
||||
func ISqrt(n uint32) (x uint32) {
|
||||
if n == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if n >= math.MaxUint16*math.MaxUint16 {
|
||||
return math.MaxUint16
|
||||
}
|
||||
|
||||
var px, nx uint32
|
||||
for x = n; ; px, x = x, nx {
|
||||
nx = (x + n/x) / 2
|
||||
if nx == x || nx == px {
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SqrtUint64 returns floor(sqrt(n)). Typical run time is about 0.5 µs.
|
||||
func SqrtUint64(n uint64) (x uint64) {
|
||||
if n == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if n >= math.MaxUint32*math.MaxUint32 {
|
||||
return math.MaxUint32
|
||||
}
|
||||
|
||||
var px, nx uint64
|
||||
for x = n; ; px, x = x, nx {
|
||||
nx = (x + n/x) / 2
|
||||
if nx == x || nx == px {
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SqrtBig returns floor(sqrt(n)). It panics on n < 0.
|
||||
func SqrtBig(n *big.Int) (x *big.Int) {
|
||||
switch n.Sign() {
|
||||
case -1:
|
||||
panic(-1)
|
||||
case 0:
|
||||
return big.NewInt(0)
|
||||
}
|
||||
|
||||
var px, nx big.Int
|
||||
x = big.NewInt(0)
|
||||
x.SetBit(x, n.BitLen()/2+1, 1)
|
||||
for {
|
||||
nx.Rsh(nx.Add(x, nx.Div(n, x)), 1)
|
||||
if nx.Cmp(x) == 0 || nx.Cmp(&px) == 0 {
|
||||
break
|
||||
}
|
||||
px.Set(x)
|
||||
x.Set(&nx)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Log2Byte returns log base 2 of n. It's the same as index of the highest
|
||||
// bit set in n. For n == 0 -1 is returned.
|
||||
func Log2Byte(n byte) int {
|
||||
return log2[n]
|
||||
}
|
||||
|
||||
// Log2Uint16 returns log base 2 of n. It's the same as index of the highest
|
||||
// bit set in n. For n == 0 -1 is returned.
|
||||
func Log2Uint16(n uint16) int {
|
||||
if b := n >> 8; b != 0 {
|
||||
return log2[b] + 8
|
||||
}
|
||||
|
||||
return log2[n]
|
||||
}
|
||||
|
||||
// Log2Uint32 returns log base 2 of n. It's the same as index of the highest
|
||||
// bit set in n. For n == 0 -1 is returned.
|
||||
func Log2Uint32(n uint32) int {
|
||||
if b := n >> 24; b != 0 {
|
||||
return log2[b] + 24
|
||||
}
|
||||
|
||||
if b := n >> 16; b != 0 {
|
||||
return log2[b] + 16
|
||||
}
|
||||
|
||||
if b := n >> 8; b != 0 {
|
||||
return log2[b] + 8
|
||||
}
|
||||
|
||||
return log2[n]
|
||||
}
|
||||
|
||||
// Log2Uint64 returns log base 2 of n. It's the same as index of the highest
|
||||
// bit set in n. For n == 0 -1 is returned.
|
||||
func Log2Uint64(n uint64) int {
|
||||
if b := n >> 56; b != 0 {
|
||||
return log2[b] + 56
|
||||
}
|
||||
|
||||
if b := n >> 48; b != 0 {
|
||||
return log2[b] + 48
|
||||
}
|
||||
|
||||
if b := n >> 40; b != 0 {
|
||||
return log2[b] + 40
|
||||
}
|
||||
|
||||
if b := n >> 32; b != 0 {
|
||||
return log2[b] + 32
|
||||
}
|
||||
|
||||
if b := n >> 24; b != 0 {
|
||||
return log2[b] + 24
|
||||
}
|
||||
|
||||
if b := n >> 16; b != 0 {
|
||||
return log2[b] + 16
|
||||
}
|
||||
|
||||
if b := n >> 8; b != 0 {
|
||||
return log2[b] + 8
|
||||
}
|
||||
|
||||
return log2[n]
|
||||
}
|
||||
|
||||
// ModPowByte computes (b^e)%m. It panics for m == 0 || b == e == 0.
|
||||
//
|
||||
// See also: http://en.wikipedia.org/wiki/Modular_exponentiation#Right-to-left_binary_method
|
||||
func ModPowByte(b, e, m byte) byte {
|
||||
if b == 0 && e == 0 {
|
||||
panic(0)
|
||||
}
|
||||
|
||||
if m == 1 {
|
||||
return 0
|
||||
}
|
||||
|
||||
r := uint16(1)
|
||||
for b, m := uint16(b), uint16(m); e > 0; b, e = b*b%m, e>>1 {
|
||||
if e&1 == 1 {
|
||||
r = r * b % m
|
||||
}
|
||||
}
|
||||
return byte(r)
|
||||
}
|
||||
|
||||
// ModPowByte computes (b^e)%m. It panics for m == 0 || b == e == 0.
|
||||
func ModPowUint16(b, e, m uint16) uint16 {
|
||||
if b == 0 && e == 0 {
|
||||
panic(0)
|
||||
}
|
||||
|
||||
if m == 1 {
|
||||
return 0
|
||||
}
|
||||
|
||||
r := uint32(1)
|
||||
for b, m := uint32(b), uint32(m); e > 0; b, e = b*b%m, e>>1 {
|
||||
if e&1 == 1 {
|
||||
r = r * b % m
|
||||
}
|
||||
}
|
||||
return uint16(r)
|
||||
}
|
||||
|
||||
// ModPowUint32 computes (b^e)%m. It panics for m == 0 || b == e == 0.
|
||||
func ModPowUint32(b, e, m uint32) uint32 {
|
||||
if b == 0 && e == 0 {
|
||||
panic(0)
|
||||
}
|
||||
|
||||
if m == 1 {
|
||||
return 0
|
||||
}
|
||||
|
||||
r := uint64(1)
|
||||
for b, m := uint64(b), uint64(m); e > 0; b, e = b*b%m, e>>1 {
|
||||
if e&1 == 1 {
|
||||
r = r * b % m
|
||||
}
|
||||
}
|
||||
return uint32(r)
|
||||
}
|
||||
|
||||
// ModPowUint64 computes (b^e)%m. It panics for m == 0 || b == e == 0.
|
||||
func ModPowUint64(b, e, m uint64) (r uint64) {
|
||||
if b == 0 && e == 0 {
|
||||
panic(0)
|
||||
}
|
||||
|
||||
if m == 1 {
|
||||
return 0
|
||||
}
|
||||
|
||||
return modPowBigInt(big.NewInt(0).SetUint64(b), big.NewInt(0).SetUint64(e), big.NewInt(0).SetUint64(m)).Uint64()
|
||||
}
|
||||
|
||||
func modPowBigInt(b, e, m *big.Int) (r *big.Int) {
|
||||
r = big.NewInt(1)
|
||||
for i, n := 0, e.BitLen(); i < n; i++ {
|
||||
if e.Bit(i) != 0 {
|
||||
r.Mod(r.Mul(r, b), m)
|
||||
}
|
||||
b.Mod(b.Mul(b, b), m)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ModPowBigInt computes (b^e)%m. Returns nil for e < 0. It panics for m == 0 || b == e == 0.
|
||||
func ModPowBigInt(b, e, m *big.Int) (r *big.Int) {
|
||||
if b.Sign() == 0 && e.Sign() == 0 {
|
||||
panic(0)
|
||||
}
|
||||
|
||||
if m.Cmp(_1) == 0 {
|
||||
return big.NewInt(0)
|
||||
}
|
||||
|
||||
if e.Sign() < 0 {
|
||||
return
|
||||
}
|
||||
|
||||
return modPowBigInt(big.NewInt(0).Set(b), big.NewInt(0).Set(e), m)
|
||||
}
|
||||
|
||||
var uint64ToBigIntDelta big.Int
|
||||
|
||||
func init() {
|
||||
uint64ToBigIntDelta.SetBit(&uint64ToBigIntDelta, 63, 1)
|
||||
}
|
||||
|
||||
var uintptrBits int
|
||||
|
||||
func init() {
|
||||
x := uint64(math.MaxUint64)
|
||||
uintptrBits = BitLenUintptr(uintptr(x))
|
||||
}
|
||||
|
||||
// UintptrBits returns the bit width of an uintptr at the executing machine.
|
||||
func UintptrBits() int {
|
||||
return uintptrBits
|
||||
}
|
||||
|
||||
// AddUint128_64 returns the uint128 sum of uint64 a and b.
|
||||
func AddUint128_64(a, b uint64) (hi uint64, lo uint64) {
|
||||
lo = a + b
|
||||
if lo < a {
|
||||
hi = 1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MulUint128_64 returns the uint128 bit product of uint64 a and b.
|
||||
func MulUint128_64(a, b uint64) (hi, lo uint64) {
|
||||
/*
|
||||
2^(2 W) ahi bhi + 2^W alo bhi + 2^W ahi blo + alo blo
|
||||
|
||||
FEDCBA98 76543210 FEDCBA98 76543210
|
||||
---- alo*blo ----
|
||||
---- alo*bhi ----
|
||||
---- ahi*blo ----
|
||||
---- ahi*bhi ----
|
||||
*/
|
||||
const w = 32
|
||||
const m = 1<<w - 1
|
||||
ahi, bhi, alo, blo := a>>w, b>>w, a&m, b&m
|
||||
lo = alo * blo
|
||||
mid1 := alo * bhi
|
||||
mid2 := ahi * blo
|
||||
c1, lo := AddUint128_64(lo, mid1<<w)
|
||||
c2, lo := AddUint128_64(lo, mid2<<w)
|
||||
_, hi = AddUint128_64(ahi*bhi, mid1>>w+mid2>>w+uint64(c1+c2))
|
||||
return
|
||||
}
|
||||
|
||||
// PowerizeBigInt returns (e, p) such that e is the smallest number for which p
|
||||
// == b^e is greater or equal n. For n < 0 or b < 2 (0, nil) is returned.
|
||||
//
|
||||
// NOTE: Run time for large values of n (above about 2^1e6 ~= 1e300000) can be
|
||||
// significant and/or unacceptabe. For any smaller values of n the function
|
||||
// typically performs in sub second time. For "small" values of n (cca bellow
|
||||
// 2^1e3 ~= 1e300) the same can be easily below 10 µs.
|
||||
//
|
||||
// A special (and trivial) case of b == 2 is handled separately and performs
|
||||
// much faster.
|
||||
func PowerizeBigInt(b, n *big.Int) (e uint32, p *big.Int) {
|
||||
switch {
|
||||
case b.Cmp(_2) < 0 || n.Sign() < 0:
|
||||
return
|
||||
case n.Sign() == 0 || n.Cmp(_1) == 0:
|
||||
return 0, big.NewInt(1)
|
||||
case b.Cmp(_2) == 0:
|
||||
p = big.NewInt(0)
|
||||
e = uint32(n.BitLen() - 1)
|
||||
p.SetBit(p, int(e), 1)
|
||||
if p.Cmp(n) < 0 {
|
||||
p.Mul(p, _2)
|
||||
e++
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
bw := b.BitLen()
|
||||
nw := n.BitLen()
|
||||
p = big.NewInt(1)
|
||||
var bb, r big.Int
|
||||
for {
|
||||
switch p.Cmp(n) {
|
||||
case -1:
|
||||
x := uint32((nw - p.BitLen()) / bw)
|
||||
if x == 0 {
|
||||
x = 1
|
||||
}
|
||||
e += x
|
||||
switch x {
|
||||
case 1:
|
||||
p.Mul(p, b)
|
||||
default:
|
||||
r.Set(_1)
|
||||
bb.Set(b)
|
||||
e := x
|
||||
for {
|
||||
if e&1 != 0 {
|
||||
r.Mul(&r, &bb)
|
||||
}
|
||||
if e >>= 1; e == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
bb.Mul(&bb, &bb)
|
||||
}
|
||||
p.Mul(p, &r)
|
||||
}
|
||||
case 0, 1:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PowerizeUint32BigInt returns (e, p) such that e is the smallest number for
|
||||
// which p == b^e is greater or equal n. For n < 0 or b < 2 (0, nil) is
|
||||
// returned.
|
||||
//
|
||||
// More info: see PowerizeBigInt.
|
||||
func PowerizeUint32BigInt(b uint32, n *big.Int) (e uint32, p *big.Int) {
|
||||
switch {
|
||||
case b < 2 || n.Sign() < 0:
|
||||
return
|
||||
case n.Sign() == 0 || n.Cmp(_1) == 0:
|
||||
return 0, big.NewInt(1)
|
||||
case b == 2:
|
||||
p = big.NewInt(0)
|
||||
e = uint32(n.BitLen() - 1)
|
||||
p.SetBit(p, int(e), 1)
|
||||
if p.Cmp(n) < 0 {
|
||||
p.Mul(p, _2)
|
||||
e++
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var bb big.Int
|
||||
bb.SetInt64(int64(b))
|
||||
return PowerizeBigInt(&bb, n)
|
||||
}
|
||||
|
||||
/*
|
||||
ProbablyPrimeUint32 returns true if n is prime or n is a pseudoprime to base a.
|
||||
It implements the Miller-Rabin primality test for one specific value of 'a' and
|
||||
k == 1.
|
||||
|
||||
Wrt pseudocode shown at
|
||||
http://en.wikipedia.org/wiki/Miller-Rabin_primality_test#Algorithm_and_running_time
|
||||
|
||||
Input: n > 3, an odd integer to be tested for primality;
|
||||
Input: k, a parameter that determines the accuracy of the test
|
||||
Output: composite if n is composite, otherwise probably prime
|
||||
write n − 1 as 2^s·d with d odd by factoring powers of 2 from n − 1
|
||||
LOOP: repeat k times:
|
||||
pick a random integer a in the range [2, n − 2]
|
||||
x ← a^d mod n
|
||||
if x = 1 or x = n − 1 then do next LOOP
|
||||
for r = 1 .. s − 1
|
||||
x ← x^2 mod n
|
||||
if x = 1 then return composite
|
||||
if x = n − 1 then do next LOOP
|
||||
return composite
|
||||
return probably prime
|
||||
|
||||
... this function behaves like passing 1 for 'k' and additionaly a
|
||||
fixed/non-random 'a'. Otherwise it's the same algorithm.
|
||||
|
||||
See also: http://mathworld.wolfram.com/Rabin-MillerStrongPseudoprimeTest.html
|
||||
*/
|
||||
func ProbablyPrimeUint32(n, a uint32) bool {
|
||||
d, s := n-1, 0
|
||||
for ; d&1 == 0; d, s = d>>1, s+1 {
|
||||
}
|
||||
x := uint64(ModPowUint32(a, d, n))
|
||||
if x == 1 || uint32(x) == n-1 {
|
||||
return true
|
||||
}
|
||||
|
||||
for ; s > 1; s-- {
|
||||
if x = x * x % uint64(n); x == 1 {
|
||||
return false
|
||||
}
|
||||
|
||||
if uint32(x) == n-1 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ProbablyPrimeUint64_32 returns true if n is prime or n is a pseudoprime to
|
||||
// base a. It implements the Miller-Rabin primality test for one specific value
|
||||
// of 'a' and k == 1. See also ProbablyPrimeUint32.
|
||||
func ProbablyPrimeUint64_32(n uint64, a uint32) bool {
|
||||
d, s := n-1, 0
|
||||
for ; d&1 == 0; d, s = d>>1, s+1 {
|
||||
}
|
||||
x := ModPowUint64(uint64(a), d, n)
|
||||
if x == 1 || x == n-1 {
|
||||
return true
|
||||
}
|
||||
|
||||
bx, bn := big.NewInt(0).SetUint64(x), big.NewInt(0).SetUint64(n)
|
||||
for ; s > 1; s-- {
|
||||
if x = bx.Mod(bx.Mul(bx, bx), bn).Uint64(); x == 1 {
|
||||
return false
|
||||
}
|
||||
|
||||
if x == n-1 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ProbablyPrimeBigInt_32 returns true if n is prime or n is a pseudoprime to
|
||||
// base a. It implements the Miller-Rabin primality test for one specific value
|
||||
// of 'a' and k == 1. See also ProbablyPrimeUint32.
|
||||
func ProbablyPrimeBigInt_32(n *big.Int, a uint32) bool {
|
||||
var d big.Int
|
||||
d.Set(n)
|
||||
d.Sub(&d, _1) // d <- n-1
|
||||
s := 0
|
||||
for ; d.Bit(s) == 0; s++ {
|
||||
}
|
||||
nMinus1 := big.NewInt(0).Set(&d)
|
||||
d.Rsh(&d, uint(s))
|
||||
|
||||
x := ModPowBigInt(big.NewInt(int64(a)), &d, n)
|
||||
if x.Cmp(_1) == 0 || x.Cmp(nMinus1) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
for ; s > 1; s-- {
|
||||
if x = x.Mod(x.Mul(x, x), n); x.Cmp(_1) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
if x.Cmp(nMinus1) == 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ProbablyPrimeBigInt returns true if n is prime or n is a pseudoprime to base
|
||||
// a. It implements the Miller-Rabin primality test for one specific value of
|
||||
// 'a' and k == 1. See also ProbablyPrimeUint32.
|
||||
func ProbablyPrimeBigInt(n, a *big.Int) bool {
|
||||
var d big.Int
|
||||
d.Set(n)
|
||||
d.Sub(&d, _1) // d <- n-1
|
||||
s := 0
|
||||
for ; d.Bit(s) == 0; s++ {
|
||||
}
|
||||
nMinus1 := big.NewInt(0).Set(&d)
|
||||
d.Rsh(&d, uint(s))
|
||||
|
||||
x := ModPowBigInt(a, &d, n)
|
||||
if x.Cmp(_1) == 0 || x.Cmp(nMinus1) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
for ; s > 1; s-- {
|
||||
if x = x.Mod(x.Mul(x, x), n); x.Cmp(_1) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
if x.Cmp(nMinus1) == 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Max returns the larger of a and b.
|
||||
func Max(a, b int) int {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// Min returns the smaller of a and b.
|
||||
func Min(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// UMax returns the larger of a and b.
|
||||
func UMax(a, b uint) uint {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// UMin returns the smaller of a and b.
|
||||
func UMin(a, b uint) uint {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// MaxByte returns the larger of a and b.
|
||||
func MaxByte(a, b byte) byte {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// MinByte returns the smaller of a and b.
|
||||
func MinByte(a, b byte) byte {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// MaxInt8 returns the larger of a and b.
|
||||
func MaxInt8(a, b int8) int8 {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// MinInt8 returns the smaller of a and b.
|
||||
func MinInt8(a, b int8) int8 {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// MaxUint16 returns the larger of a and b.
|
||||
func MaxUint16(a, b uint16) uint16 {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// MinUint16 returns the smaller of a and b.
|
||||
func MinUint16(a, b uint16) uint16 {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// MaxInt16 returns the larger of a and b.
|
||||
func MaxInt16(a, b int16) int16 {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// MinInt16 returns the smaller of a and b.
|
||||
func MinInt16(a, b int16) int16 {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// MaxUint32 returns the larger of a and b.
|
||||
func MaxUint32(a, b uint32) uint32 {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// MinUint32 returns the smaller of a and b.
|
||||
func MinUint32(a, b uint32) uint32 {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// MaxInt32 returns the larger of a and b.
|
||||
func MaxInt32(a, b int32) int32 {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// MinInt32 returns the smaller of a and b.
|
||||
func MinInt32(a, b int32) int32 {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// MaxUint64 returns the larger of a and b.
|
||||
func MaxUint64(a, b uint64) uint64 {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// MinUint64 returns the smaller of a and b.
|
||||
func MinUint64(a, b uint64) uint64 {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// MaxInt64 returns the larger of a and b.
|
||||
func MaxInt64(a, b int64) int64 {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// MinInt64 returns the smaller of a and b.
|
||||
func MinInt64(a, b int64) int64 {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// ToBase produces n in base b. For example
|
||||
//
|
||||
// ToBase(2047, 22) -> [1, 5, 4]
|
||||
//
|
||||
// 1 * 22^0 1
|
||||
// 5 * 22^1 110
|
||||
// 4 * 22^2 1936
|
||||
// ----
|
||||
// 2047
|
||||
//
|
||||
// ToBase panics for bases < 2.
|
||||
func ToBase(n *big.Int, b int) []int {
|
||||
var nn big.Int
|
||||
nn.Set(n)
|
||||
if b < 2 {
|
||||
panic("invalid base")
|
||||
}
|
||||
|
||||
k := 1
|
||||
switch nn.Sign() {
|
||||
case -1:
|
||||
nn.Neg(&nn)
|
||||
k = -1
|
||||
case 0:
|
||||
return []int{0}
|
||||
}
|
||||
|
||||
bb := big.NewInt(int64(b))
|
||||
var r []int
|
||||
rem := big.NewInt(0)
|
||||
for nn.Sign() != 0 {
|
||||
nn.QuoRem(&nn, bb, rem)
|
||||
r = append(r, k*int(rem.Int64()))
|
||||
}
|
||||
return r
|
||||
}
|
297
vendor/github.com/cznic/mathutil/mersenne/mersenne.go
generated
vendored
Normal file
297
vendor/github.com/cznic/mathutil/mersenne/mersenne.go
generated
vendored
Normal file
@ -0,0 +1,297 @@
|
||||
// Copyright (c) 2014 The mersenne Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package mersenne collects utilities related to Mersenne numbers[1] and/or some
|
||||
of their properties.
|
||||
|
||||
Exponent
|
||||
|
||||
In this documentation the term 'exponent' refers to 'n' of a Mersenne number Mn
|
||||
equal to 2^n-1. This package supports only uint32 sized exponents. New()
|
||||
currently supports exponents only up to math.MaxInt32 (31 bits, up to 256 MB
|
||||
required to represent such Mn in memory as a big.Int).
|
||||
|
||||
Links
|
||||
|
||||
Referenced from above:
|
||||
[1] http://en.wikipedia.org/wiki/Mersenne_number
|
||||
*/
|
||||
package mersenne
|
||||
|
||||
import (
|
||||
"math"
|
||||
"math/big"
|
||||
|
||||
"github.com/cznic/mathutil"
|
||||
"github.com/remyoudompheng/bigfft"
|
||||
)
|
||||
|
||||
var (
|
||||
_0 = big.NewInt(0)
|
||||
_1 = big.NewInt(1)
|
||||
_2 = big.NewInt(2)
|
||||
)
|
||||
|
||||
// Knowns list the exponent of currently (March 2012) known Mersenne primes
|
||||
// exponents in order. See also: http://oeis.org/A000043 for a partial list.
|
||||
var Knowns = []uint32{
|
||||
2, // #1
|
||||
3, // #2
|
||||
5, // #3
|
||||
7, // #4
|
||||
13, // #5
|
||||
17, // #6
|
||||
19, // #7
|
||||
31, // #8
|
||||
61, // #9
|
||||
89, // #10
|
||||
|
||||
107, // #11
|
||||
127, // #12
|
||||
521, // #13
|
||||
607, // #14
|
||||
1279, // #15
|
||||
2203, // #16
|
||||
2281, // #17
|
||||
3217, // #18
|
||||
4253, // #19
|
||||
4423, // #20
|
||||
|
||||
9689, // #21
|
||||
9941, // #22
|
||||
11213, // #23
|
||||
19937, // #24
|
||||
21701, // #25
|
||||
23209, // #26
|
||||
44497, // #27
|
||||
86243, // #28
|
||||
110503, // #29
|
||||
132049, // #30
|
||||
|
||||
216091, // #31
|
||||
756839, // #32
|
||||
859433, // #33
|
||||
1257787, // #34
|
||||
1398269, // #35
|
||||
2976221, // #36
|
||||
3021377, // #37
|
||||
6972593, // #38
|
||||
13466917, // #39
|
||||
20996011, // #40
|
||||
|
||||
24036583, // #41
|
||||
25964951, // #42
|
||||
30402457, // #43
|
||||
32582657, // #44
|
||||
37156667, // #45
|
||||
42643801, // #46
|
||||
43112609, // #47
|
||||
57885161, // #48
|
||||
74207281, // #49
|
||||
}
|
||||
|
||||
// Known maps the exponent of known Mersenne primes its ordinal number/rank.
|
||||
// Ranks > 41 are currently provisional.
|
||||
var Known map[uint32]int
|
||||
|
||||
func init() {
|
||||
Known = map[uint32]int{}
|
||||
for i, v := range Knowns {
|
||||
Known[v] = i + 1
|
||||
}
|
||||
}
|
||||
|
||||
// New returns Mn == 2^n-1 for n <= math.MaxInt32 or nil otherwise.
|
||||
func New(n uint32) (m *big.Int) {
|
||||
if n > math.MaxInt32 {
|
||||
return
|
||||
}
|
||||
|
||||
m = big.NewInt(0)
|
||||
return m.Sub(m.SetBit(m, int(n), 1), _1)
|
||||
}
|
||||
|
||||
// HasFactorUint32 returns true if d | Mn. Typical run time for a 32 bit factor
|
||||
// and a 32 bit exponent is < 1 µs.
|
||||
func HasFactorUint32(d, n uint32) bool {
|
||||
return d == 1 || d&1 != 0 && mathutil.ModPowUint32(2, n, d) == 1
|
||||
}
|
||||
|
||||
// HasFactorUint64 returns true if d | Mn. Typical run time for a 64 bit factor
|
||||
// and a 32 bit exponent is < 30 µs.
|
||||
func HasFactorUint64(d uint64, n uint32) bool {
|
||||
return d == 1 || d&1 != 0 && mathutil.ModPowUint64(2, uint64(n), d) == 1
|
||||
}
|
||||
|
||||
// HasFactorBigInt returns true if d | Mn, d > 0. Typical run time for a 128
|
||||
// bit factor and a 32 bit exponent is < 75 µs.
|
||||
func HasFactorBigInt(d *big.Int, n uint32) bool {
|
||||
return d.Cmp(_1) == 0 || d.Sign() > 0 && d.Bit(0) == 1 &&
|
||||
mathutil.ModPowBigInt(_2, big.NewInt(int64(n)), d).Cmp(_1) == 0
|
||||
}
|
||||
|
||||
// HasFactorBigInt2 returns true if d | Mn, d > 0
|
||||
func HasFactorBigInt2(d, n *big.Int) bool {
|
||||
return d.Cmp(_1) == 0 || d.Sign() > 0 && d.Bit(0) == 1 &&
|
||||
mathutil.ModPowBigInt(_2, n, d).Cmp(_1) == 0
|
||||
}
|
||||
|
||||
/*
|
||||
FromFactorBigInt returns n such that d | Mn if n <= max and d is odd. In other
|
||||
cases zero is returned.
|
||||
|
||||
It is conjectured that every odd d ∊ N divides infinitely many Mersenne numbers.
|
||||
The returned n should be the exponent of smallest such Mn.
|
||||
|
||||
NOTE: The computation of n from a given d performs roughly in O(n). It is
|
||||
thus highly recomended to use the 'max' argument to limit the "searched"
|
||||
exponent upper bound as appropriate. Otherwise the computation can take a long
|
||||
time as a large factor can be a divisor of a Mn with exponent above the uint32
|
||||
limits.
|
||||
|
||||
The FromFactorBigInt function is a modification of the original Will
|
||||
Edgington's "reverse method", discussed here:
|
||||
http://tech.groups.yahoo.com/group/primenumbers/message/15061
|
||||
*/
|
||||
func FromFactorBigInt(d *big.Int, max uint32) (n uint32) {
|
||||
if d.Bit(0) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var m big.Int
|
||||
for n < max {
|
||||
m.Add(&m, d)
|
||||
i := 0
|
||||
for ; m.Bit(i) == 1; i++ {
|
||||
if n == math.MaxUint32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
n++
|
||||
}
|
||||
m.Rsh(&m, uint(i))
|
||||
if m.Sign() == 0 {
|
||||
if n > max {
|
||||
n = 0
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Mod sets mod to n % Mexp and returns mod. It panics for exp == 0 || exp >=
|
||||
// math.MaxInt32 || n < 0.
|
||||
func Mod(mod, n *big.Int, exp uint32) *big.Int {
|
||||
if exp == 0 || exp >= math.MaxInt32 || n.Sign() < 0 {
|
||||
panic(0)
|
||||
}
|
||||
|
||||
m := New(exp)
|
||||
mod.Set(n)
|
||||
var x big.Int
|
||||
for mod.BitLen() > int(exp) {
|
||||
x.Set(mod)
|
||||
x.Rsh(&x, uint(exp))
|
||||
mod.And(mod, m)
|
||||
mod.Add(mod, &x)
|
||||
}
|
||||
if mod.BitLen() == int(exp) && mod.Cmp(m) == 0 {
|
||||
mod.SetInt64(0)
|
||||
}
|
||||
return mod
|
||||
}
|
||||
|
||||
// ModPow2 returns x such that 2^Me % Mm == 2^x. It panics for m < 2. Typical
|
||||
// run time is < 1 µs. Use instead of ModPow(2, e, m) wherever possible.
|
||||
func ModPow2(e, m uint32) (x uint32) {
|
||||
/*
|
||||
m < 2 -> panic
|
||||
e == 0 -> x == 0
|
||||
e == 1 -> x == 1
|
||||
|
||||
2^M1 % M2 == 2^1 % 3 == 2^1 10 // 2^1, 3, 5, 7 ... +2k
|
||||
2^M1 % M3 == 2^1 % 7 == 2^1 010 // 2^1, 4, 7, ... +3k
|
||||
2^M1 % M4 == 2^1 % 15 == 2^1 0010 // 2^1, 5, 9, 13... +4k
|
||||
2^M1 % M5 == 2^1 % 31 == 2^1 00010 // 2^1, 6, 11, 16... +5k
|
||||
|
||||
2^M2 % M2 == 2^3 % 3 == 2^1 10.. // 2^3, 5, 7, 9, 11, ... +2k
|
||||
2^M2 % M3 == 2^3 % 7 == 2^0 001... // 2^3, 6, 9, 12, 15, ... +3k
|
||||
2^M2 % M4 == 2^3 % 15 == 2^3 1000 // 2^3, 7, 11, 15, 19, ... +4k
|
||||
2^M2 % M5 == 2^3 % 31 == 2^3 01000 // 2^3, 8, 13, 18, 23, ... +5k
|
||||
|
||||
2^M3 % M2 == 2^7 % 3 == 2^1 10..--.. // 2^3, 5, 7... +2k
|
||||
2^M3 % M3 == 2^7 % 7 == 2^1 010...--- // 2^1, 4, 7... +3k
|
||||
2^M3 % M4 == 2^7 % 15 == 2^3 1000.... // +4k
|
||||
2^M3 % M5 == 2^7 % 31 == 2^2 00100..... // +5k
|
||||
2^M3 % M6 == 2^7 % 63 == 2^1 000010...... // +6k
|
||||
2^M3 % M7 == 2^7 % 127 == 2^0 0000001.......
|
||||
2^M3 % M8 == 2^7 % 255 == 2^7 10000000
|
||||
2^M3 % M9 == 2^7 % 511 == 2^7 010000000
|
||||
|
||||
2^M4 % M2 == 2^15 % 3 == 2^1 10..--..--..--..
|
||||
2^M4 % M3 == 2^15 % 7 == 2^0 1...---...---...
|
||||
2^M4 % M4 == 2^15 % 15 == 2^3 1000....----....
|
||||
2^M4 % M5 == 2^15 % 31 == 2^0 1.....-----.....
|
||||
2^M4 % M6 == 2^15 % 63 == 2^3 1000......------
|
||||
2^M4 % M7 == 2^15 % 127 == 2^1 10.......-------
|
||||
2^M4 % M8 == 2^15 % 255 == 2^7 10000000........
|
||||
2^M4 % M9 == 2^15 % 511 == 2^6 1000000.........
|
||||
*/
|
||||
switch {
|
||||
case m < 2:
|
||||
panic(0)
|
||||
case e < 2:
|
||||
return e
|
||||
}
|
||||
|
||||
if x = mathutil.ModPowUint32(2, e, m); x == 0 {
|
||||
return m - 1
|
||||
}
|
||||
|
||||
return x - 1
|
||||
}
|
||||
|
||||
// ModPow returns b^Me % Mm. Run time grows quickly with 'e' and/or 'm' when b
|
||||
// != 2 (then ModPow2 is used).
|
||||
func ModPow(b, e, m uint32) (r *big.Int) {
|
||||
if m == 1 {
|
||||
return big.NewInt(0)
|
||||
}
|
||||
|
||||
if b == 2 {
|
||||
x := ModPow2(e, m)
|
||||
r = big.NewInt(0)
|
||||
r.SetBit(r, int(x), 1)
|
||||
return
|
||||
}
|
||||
|
||||
bb := big.NewInt(int64(b))
|
||||
r = big.NewInt(1)
|
||||
for ; e != 0; e-- {
|
||||
r = bigfft.Mul(r, bb)
|
||||
Mod(r, r, m)
|
||||
bb = bigfft.Mul(bb, bb)
|
||||
Mod(bb, bb, m)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ProbablyPrime returns true if Mn is prime or is a pseudoprime to base a.
|
||||
// Note: Every Mp, prime p, is a prime or is a pseudoprime to base 2, actually
|
||||
// to every base 2^i, i ∊ [1, p). In contrast - it is conjectured (w/o any
|
||||
// known counterexamples) that no composite Mp, prime p, is a pseudoprime to
|
||||
// base 3.
|
||||
func ProbablyPrime(n, a uint32) bool {
|
||||
//TODO +test, +bench
|
||||
if a == 2 {
|
||||
return ModPow2(n-1, n) == 0
|
||||
}
|
||||
|
||||
nMinus1 := New(n)
|
||||
nMinus1.Sub(nMinus1, _1)
|
||||
x := ModPow(a, n-1, n)
|
||||
return x.Cmp(_1) == 0 || x.Cmp(nMinus1) == 0
|
||||
}
|
39
vendor/github.com/cznic/mathutil/permute.go
generated
vendored
Normal file
39
vendor/github.com/cznic/mathutil/permute.go
generated
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
// Copyright (c) 2014 The mathutil Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package mathutil
|
||||
|
||||
import (
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Generate the first permutation of data.
|
||||
func PermutationFirst(data sort.Interface) {
|
||||
sort.Sort(data)
|
||||
}
|
||||
|
||||
// Generate the next permutation of data if possible and return true.
|
||||
// Return false if there is no more permutation left.
|
||||
// Based on the algorithm described here:
|
||||
// http://en.wikipedia.org/wiki/Permutation#Generation_in_lexicographic_order
|
||||
func PermutationNext(data sort.Interface) bool {
|
||||
var k, l int
|
||||
for k = data.Len() - 2; ; k-- { // 1.
|
||||
if k < 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
if data.Less(k, k+1) {
|
||||
break
|
||||
}
|
||||
}
|
||||
for l = data.Len() - 1; !data.Less(k, l); l-- { // 2.
|
||||
}
|
||||
data.Swap(k, l) // 3.
|
||||
for i, j := k+1, data.Len()-1; i < j; i++ { // 4.
|
||||
data.Swap(i, j)
|
||||
j--
|
||||
}
|
||||
return true
|
||||
}
|
335
vendor/github.com/cznic/mathutil/primes.go
generated
vendored
Normal file
335
vendor/github.com/cznic/mathutil/primes.go
generated
vendored
Normal file
@ -0,0 +1,335 @@
|
||||
// Copyright (c) 2014 The mathutil Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package mathutil
|
||||
|
||||
import (
|
||||
"math"
|
||||
)
|
||||
|
||||
// IsPrimeUint16 returns true if n is prime. Typical run time is few ns.
|
||||
func IsPrimeUint16(n uint16) bool {
|
||||
return n > 0 && primes16[n-1] == 1
|
||||
}
|
||||
|
||||
// NextPrimeUint16 returns first prime > n and true if successful or an
|
||||
// undefined value and false if there is no next prime in the uint16 limits.
|
||||
// Typical run time is few ns.
|
||||
func NextPrimeUint16(n uint16) (p uint16, ok bool) {
|
||||
return n + uint16(primes16[n]), n < 65521
|
||||
}
|
||||
|
||||
// IsPrime returns true if n is prime. Typical run time is about 100 ns.
|
||||
//
|
||||
//TODO rename to IsPrimeUint32
|
||||
func IsPrime(n uint32) bool {
|
||||
switch {
|
||||
case n&1 == 0:
|
||||
return n == 2
|
||||
case n%3 == 0:
|
||||
return n == 3
|
||||
case n%5 == 0:
|
||||
return n == 5
|
||||
case n%7 == 0:
|
||||
return n == 7
|
||||
case n%11 == 0:
|
||||
return n == 11
|
||||
case n%13 == 0:
|
||||
return n == 13
|
||||
case n%17 == 0:
|
||||
return n == 17
|
||||
case n%19 == 0:
|
||||
return n == 19
|
||||
case n%23 == 0:
|
||||
return n == 23
|
||||
case n%29 == 0:
|
||||
return n == 29
|
||||
case n%31 == 0:
|
||||
return n == 31
|
||||
case n%37 == 0:
|
||||
return n == 37
|
||||
case n%41 == 0:
|
||||
return n == 41
|
||||
case n%43 == 0:
|
||||
return n == 43
|
||||
case n%47 == 0:
|
||||
return n == 47
|
||||
case n%53 == 0:
|
||||
return n == 53 // Benchmarked optimum
|
||||
case n < 65536:
|
||||
// use table data
|
||||
return IsPrimeUint16(uint16(n))
|
||||
default:
|
||||
mod := ModPowUint32(2, (n+1)/2, n)
|
||||
if mod != 2 && mod != n-2 {
|
||||
return false
|
||||
}
|
||||
blk := &lohi[n>>24]
|
||||
lo, hi := blk.lo, blk.hi
|
||||
for lo <= hi {
|
||||
index := (lo + hi) >> 1
|
||||
liar := liars[index]
|
||||
switch {
|
||||
case n > liar:
|
||||
lo = index + 1
|
||||
case n < liar:
|
||||
hi = index - 1
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// IsPrimeUint64 returns true if n is prime. Typical run time is few tens of µs.
|
||||
//
|
||||
// SPRP bases: http://miller-rabin.appspot.com
|
||||
func IsPrimeUint64(n uint64) bool {
|
||||
switch {
|
||||
case n%2 == 0:
|
||||
return n == 2
|
||||
case n%3 == 0:
|
||||
return n == 3
|
||||
case n%5 == 0:
|
||||
return n == 5
|
||||
case n%7 == 0:
|
||||
return n == 7
|
||||
case n%11 == 0:
|
||||
return n == 11
|
||||
case n%13 == 0:
|
||||
return n == 13
|
||||
case n%17 == 0:
|
||||
return n == 17
|
||||
case n%19 == 0:
|
||||
return n == 19
|
||||
case n%23 == 0:
|
||||
return n == 23
|
||||
case n%29 == 0:
|
||||
return n == 29
|
||||
case n%31 == 0:
|
||||
return n == 31
|
||||
case n%37 == 0:
|
||||
return n == 37
|
||||
case n%41 == 0:
|
||||
return n == 41
|
||||
case n%43 == 0:
|
||||
return n == 43
|
||||
case n%47 == 0:
|
||||
return n == 47
|
||||
case n%53 == 0:
|
||||
return n == 53
|
||||
case n%59 == 0:
|
||||
return n == 59
|
||||
case n%61 == 0:
|
||||
return n == 61
|
||||
case n%67 == 0:
|
||||
return n == 67
|
||||
case n%71 == 0:
|
||||
return n == 71
|
||||
case n%73 == 0:
|
||||
return n == 73
|
||||
case n%79 == 0:
|
||||
return n == 79
|
||||
case n%83 == 0:
|
||||
return n == 83
|
||||
case n%89 == 0:
|
||||
return n == 89 // Benchmarked optimum
|
||||
case n <= math.MaxUint16:
|
||||
return IsPrimeUint16(uint16(n))
|
||||
case n <= math.MaxUint32:
|
||||
return ProbablyPrimeUint32(uint32(n), 11000544) &&
|
||||
ProbablyPrimeUint32(uint32(n), 31481107)
|
||||
case n < 105936894253:
|
||||
return ProbablyPrimeUint64_32(n, 2) &&
|
||||
ProbablyPrimeUint64_32(n, 1005905886) &&
|
||||
ProbablyPrimeUint64_32(n, 1340600841)
|
||||
case n < 31858317218647:
|
||||
return ProbablyPrimeUint64_32(n, 2) &&
|
||||
ProbablyPrimeUint64_32(n, 642735) &&
|
||||
ProbablyPrimeUint64_32(n, 553174392) &&
|
||||
ProbablyPrimeUint64_32(n, 3046413974)
|
||||
case n < 3071837692357849:
|
||||
return ProbablyPrimeUint64_32(n, 2) &&
|
||||
ProbablyPrimeUint64_32(n, 75088) &&
|
||||
ProbablyPrimeUint64_32(n, 642735) &&
|
||||
ProbablyPrimeUint64_32(n, 203659041) &&
|
||||
ProbablyPrimeUint64_32(n, 3613982119)
|
||||
default:
|
||||
return ProbablyPrimeUint64_32(n, 2) &&
|
||||
ProbablyPrimeUint64_32(n, 325) &&
|
||||
ProbablyPrimeUint64_32(n, 9375) &&
|
||||
ProbablyPrimeUint64_32(n, 28178) &&
|
||||
ProbablyPrimeUint64_32(n, 450775) &&
|
||||
ProbablyPrimeUint64_32(n, 9780504) &&
|
||||
ProbablyPrimeUint64_32(n, 1795265022)
|
||||
}
|
||||
}
|
||||
|
||||
// NextPrime returns first prime > n and true if successful or an undefined value and false if there
|
||||
// is no next prime in the uint32 limits. Typical run time is about 2 µs.
|
||||
//
|
||||
//TODO rename to NextPrimeUint32
|
||||
func NextPrime(n uint32) (p uint32, ok bool) {
|
||||
switch {
|
||||
case n < 65521:
|
||||
p16, _ := NextPrimeUint16(uint16(n))
|
||||
return uint32(p16), true
|
||||
case n >= math.MaxUint32-4:
|
||||
return
|
||||
}
|
||||
|
||||
n++
|
||||
var d0, d uint32
|
||||
switch mod := n % 6; mod {
|
||||
case 0:
|
||||
d0, d = 1, 4
|
||||
case 1:
|
||||
d = 4
|
||||
case 2, 3, 4:
|
||||
d0, d = 5-mod, 2
|
||||
case 5:
|
||||
d = 2
|
||||
}
|
||||
|
||||
p = n + d0
|
||||
if p < n { // overflow
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
if IsPrime(p) {
|
||||
return p, true
|
||||
}
|
||||
|
||||
p0 := p
|
||||
p += d
|
||||
if p < p0 { // overflow
|
||||
break
|
||||
}
|
||||
|
||||
d ^= 6
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NextPrimeUint64 returns first prime > n and true if successful or an undefined value and false if there
|
||||
// is no next prime in the uint64 limits. Typical run time is in hundreds of µs.
|
||||
func NextPrimeUint64(n uint64) (p uint64, ok bool) {
|
||||
switch {
|
||||
case n < 65521:
|
||||
p16, _ := NextPrimeUint16(uint16(n))
|
||||
return uint64(p16), true
|
||||
case n >= 18446744073709551557: // last uint64 prime
|
||||
return
|
||||
}
|
||||
|
||||
n++
|
||||
var d0, d uint64
|
||||
switch mod := n % 6; mod {
|
||||
case 0:
|
||||
d0, d = 1, 4
|
||||
case 1:
|
||||
d = 4
|
||||
case 2, 3, 4:
|
||||
d0, d = 5-mod, 2
|
||||
case 5:
|
||||
d = 2
|
||||
}
|
||||
|
||||
p = n + d0
|
||||
if p < n { // overflow
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
if ok = IsPrimeUint64(p); ok {
|
||||
break
|
||||
}
|
||||
|
||||
p0 := p
|
||||
p += d
|
||||
if p < p0 { // overflow
|
||||
break
|
||||
}
|
||||
|
||||
d ^= 6
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// FactorTerm is one term of an integer factorization.
|
||||
type FactorTerm struct {
|
||||
Prime uint32 // The divisor
|
||||
Power uint32 // Term == Prime^Power
|
||||
}
|
||||
|
||||
// FactorTerms represent a factorization of an integer
|
||||
type FactorTerms []FactorTerm
|
||||
|
||||
// FactorInt returns prime factorization of n > 1 or nil otherwise.
|
||||
// Resulting factors are ordered by Prime. Typical run time is few µs.
|
||||
func FactorInt(n uint32) (f FactorTerms) {
|
||||
switch {
|
||||
case n < 2:
|
||||
return
|
||||
case IsPrime(n):
|
||||
return []FactorTerm{{n, 1}}
|
||||
}
|
||||
|
||||
f, w := make([]FactorTerm, 9), 0
|
||||
for p := 2; p < len(primes16); p += int(primes16[p]) {
|
||||
if uint(p*p) > uint(n) {
|
||||
break
|
||||
}
|
||||
|
||||
power := uint32(0)
|
||||
for n%uint32(p) == 0 {
|
||||
n /= uint32(p)
|
||||
power++
|
||||
}
|
||||
if power != 0 {
|
||||
f[w] = FactorTerm{uint32(p), power}
|
||||
w++
|
||||
}
|
||||
if n == 1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if n != 1 {
|
||||
f[w] = FactorTerm{n, 1}
|
||||
w++
|
||||
}
|
||||
return f[:w]
|
||||
}
|
||||
|
||||
// PrimorialProductsUint32 returns a slice of numbers in [lo, hi] which are a
|
||||
// product of max 'max' primorials. The slice is not sorted.
|
||||
//
|
||||
// See also: http://en.wikipedia.org/wiki/Primorial
|
||||
func PrimorialProductsUint32(lo, hi, max uint32) (r []uint32) {
|
||||
lo64, hi64 := int64(lo), int64(hi)
|
||||
if max > 31 { // N/A
|
||||
max = 31
|
||||
}
|
||||
|
||||
var f func(int64, int64, uint32)
|
||||
f = func(n, p int64, emax uint32) {
|
||||
e := uint32(1)
|
||||
for n <= hi64 && e <= emax {
|
||||
n *= p
|
||||
if n >= lo64 && n <= hi64 {
|
||||
r = append(r, uint32(n))
|
||||
}
|
||||
if n < hi64 {
|
||||
p, _ := NextPrime(uint32(p))
|
||||
f(n, int64(p), e)
|
||||
}
|
||||
e++
|
||||
}
|
||||
}
|
||||
|
||||
f(1, 2, max)
|
||||
return
|
||||
}
|
27
vendor/github.com/cznic/mathutil/rat.go
generated
vendored
Normal file
27
vendor/github.com/cznic/mathutil/rat.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
// Copyright (c) 2014 The mathutil Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package mathutil
|
||||
|
||||
// QCmpUint32 compares a/b and c/d and returns:
|
||||
//
|
||||
// -1 if a/b < c/d
|
||||
// 0 if a/b == c/d
|
||||
// +1 if a/b > c/d
|
||||
//
|
||||
func QCmpUint32(a, b, c, d uint32) int {
|
||||
switch x, y := uint64(a)*uint64(d), uint64(b)*uint64(c); {
|
||||
case x < y:
|
||||
return -1
|
||||
case x == y:
|
||||
return 0
|
||||
default: // x > y
|
||||
return 1
|
||||
}
|
||||
}
|
||||
|
||||
// QScaleUint32 returns a such that a/b >= c/d.
|
||||
func QScaleUint32(b, c, d uint32) (a uint64) {
|
||||
return 1 + (uint64(b)*uint64(c))/uint64(d)
|
||||
}
|
383
vendor/github.com/cznic/mathutil/rnd.go
generated
vendored
Normal file
383
vendor/github.com/cznic/mathutil/rnd.go
generated
vendored
Normal file
@ -0,0 +1,383 @@
|
||||
// Copyright (c) 2014 The mathutil Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package mathutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"math/big"
|
||||
)
|
||||
|
||||
// FC32 is a full cycle PRNG covering the 32 bit signed integer range.
|
||||
// In contrast to full cycle generators shown at e.g. http://en.wikipedia.org/wiki/Full_cycle,
|
||||
// this code doesn't produce values at constant delta (mod cycle length).
|
||||
// The 32 bit limit is per this implementation, the algorithm used has no intrinsic limit on the cycle size.
|
||||
// Properties include:
|
||||
// - Adjustable limits on creation (hi, lo).
|
||||
// - Positionable/randomly accessible (Pos, Seek).
|
||||
// - Repeatable (deterministic).
|
||||
// - Can run forward or backward (Next, Prev).
|
||||
// - For a billion numbers cycle the Next/Prev PRN can be produced in cca 100-150ns.
|
||||
// That's like 5-10 times slower compared to PRNs generated using the (non FC) rand package.
|
||||
type FC32 struct {
|
||||
cycle int64 // On average: 3 * delta / 2, (HQ: 2 * delta)
|
||||
delta int64 // hi - lo
|
||||
factors [][]int64 // This trades some space for hopefully a bit of speed (multiple adding vs multiplying).
|
||||
lo int
|
||||
mods []int // pos % set
|
||||
pos int64 // Within cycle.
|
||||
primes []int64 // Ordered. ∏ primes == cycle.
|
||||
set []int64 // Reordered primes (magnitude order bases) according to seed.
|
||||
}
|
||||
|
||||
// NewFC32 returns a newly created FC32 adjusted for the closed interval [lo, hi] or an Error if any.
|
||||
// If hq == true then trade some generation time for improved (pseudo)randomness.
|
||||
func NewFC32(lo, hi int, hq bool) (r *FC32, err error) {
|
||||
if lo > hi {
|
||||
return nil, fmt.Errorf("invalid range %d > %d", lo, hi)
|
||||
}
|
||||
|
||||
if uint64(hi)-uint64(lo) > math.MaxUint32 {
|
||||
return nil, fmt.Errorf("range out of int32 limits %d, %d", lo, hi)
|
||||
}
|
||||
|
||||
delta := int64(hi) - int64(lo)
|
||||
// Find the primorial covering whole delta
|
||||
n, set, p := int64(1), []int64{}, uint32(2)
|
||||
if hq {
|
||||
p++
|
||||
}
|
||||
for {
|
||||
set = append(set, int64(p))
|
||||
n *= int64(p)
|
||||
if n > delta {
|
||||
break
|
||||
}
|
||||
p, _ = NextPrime(p)
|
||||
}
|
||||
|
||||
// Adjust the set so n ∊ [delta, 2 * delta] (HQ: [delta, 3 * delta])
|
||||
// while keeping the cardinality of the set (correlates with the statistic "randomness quality")
|
||||
// at max, i.e. discard atmost one member.
|
||||
i := -1 // no candidate prime
|
||||
if n > 2*(delta+1) {
|
||||
for j, p := range set {
|
||||
q := n / p
|
||||
if q < delta+1 {
|
||||
break
|
||||
}
|
||||
|
||||
i = j // mark the highest candidate prime set index
|
||||
}
|
||||
}
|
||||
if i >= 0 { // shrink the inner cycle
|
||||
n = n / set[i]
|
||||
set = delete(set, i)
|
||||
}
|
||||
r = &FC32{
|
||||
cycle: n,
|
||||
delta: delta,
|
||||
factors: make([][]int64, len(set)),
|
||||
lo: lo,
|
||||
mods: make([]int, len(set)),
|
||||
primes: set,
|
||||
}
|
||||
r.Seed(1) // the default seed should be always non zero
|
||||
return
|
||||
}
|
||||
|
||||
// Cycle reports the length of the inner FCPRNG cycle.
|
||||
// Cycle is atmost the double (HQ: triple) of the generator period (hi - lo + 1).
|
||||
func (r *FC32) Cycle() int64 {
|
||||
return r.cycle
|
||||
}
|
||||
|
||||
// Next returns the first PRN after Pos.
|
||||
func (r *FC32) Next() int {
|
||||
return r.step(1)
|
||||
}
|
||||
|
||||
// Pos reports the current position within the inner cycle.
|
||||
func (r *FC32) Pos() int64 {
|
||||
return r.pos
|
||||
}
|
||||
|
||||
// Prev return the first PRN before Pos.
|
||||
func (r *FC32) Prev() int {
|
||||
return r.step(-1)
|
||||
}
|
||||
|
||||
// Seed uses the provided seed value to initialize the generator to a deterministic state.
|
||||
// A zero seed produces a "canonical" generator with worse randomness than for most non zero seeds.
|
||||
// Still, the FC property holds for any seed value.
|
||||
func (r *FC32) Seed(seed int64) {
|
||||
u := uint64(seed)
|
||||
r.set = mix(r.primes, &u)
|
||||
n := int64(1)
|
||||
for i, p := range r.set {
|
||||
k := make([]int64, p)
|
||||
v := int64(0)
|
||||
for j := range k {
|
||||
k[j] = v
|
||||
v += n
|
||||
}
|
||||
n *= p
|
||||
r.factors[i] = mix(k, &u)
|
||||
}
|
||||
}
|
||||
|
||||
// Seek sets Pos to |pos| % Cycle.
|
||||
func (r *FC32) Seek(pos int64) { //vet:ignore
|
||||
if pos < 0 {
|
||||
pos = -pos
|
||||
}
|
||||
pos %= r.cycle
|
||||
r.pos = pos
|
||||
for i, p := range r.set {
|
||||
r.mods[i] = int(pos % p)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *FC32) step(dir int) int {
|
||||
for { // avg loops per step: 3/2 (HQ: 2)
|
||||
y := int64(0)
|
||||
pos := r.pos
|
||||
pos += int64(dir)
|
||||
switch {
|
||||
case pos < 0:
|
||||
pos = r.cycle - 1
|
||||
case pos >= r.cycle:
|
||||
pos = 0
|
||||
}
|
||||
r.pos = pos
|
||||
for i, mod := range r.mods {
|
||||
mod += dir
|
||||
p := int(r.set[i])
|
||||
switch {
|
||||
case mod < 0:
|
||||
mod = p - 1
|
||||
case mod >= p:
|
||||
mod = 0
|
||||
}
|
||||
r.mods[i] = mod
|
||||
y += r.factors[i][mod]
|
||||
}
|
||||
if y <= r.delta {
|
||||
return int(y) + r.lo
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func delete(set []int64, i int) (y []int64) {
|
||||
for j, v := range set {
|
||||
if j != i {
|
||||
y = append(y, v)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func mix(set []int64, seed *uint64) (y []int64) {
|
||||
for len(set) != 0 {
|
||||
*seed = rol(*seed)
|
||||
i := int(*seed % uint64(len(set)))
|
||||
y = append(y, set[i])
|
||||
set = delete(set, i)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func rol(u uint64) (y uint64) {
|
||||
y = u << 1
|
||||
if int64(u) < 0 {
|
||||
y |= 1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// FCBig is a full cycle PRNG covering ranges outside of the int32 limits.
|
||||
// For more info see the FC32 docs.
|
||||
// Next/Prev PRN on a 1e15 cycle can be produced in about 2 µsec.
|
||||
type FCBig struct {
|
||||
cycle *big.Int // On average: 3 * delta / 2, (HQ: 2 * delta)
|
||||
delta *big.Int // hi - lo
|
||||
factors [][]*big.Int // This trades some space for hopefully a bit of speed (multiple adding vs multiplying).
|
||||
lo *big.Int
|
||||
mods []int // pos % set
|
||||
pos *big.Int // Within cycle.
|
||||
primes []int64 // Ordered. ∏ primes == cycle.
|
||||
set []int64 // Reordered primes (magnitude order bases) according to seed.
|
||||
}
|
||||
|
||||
// NewFCBig returns a newly created FCBig adjusted for the closed interval [lo, hi] or an Error if any.
|
||||
// If hq == true then trade some generation time for improved (pseudo)randomness.
|
||||
func NewFCBig(lo, hi *big.Int, hq bool) (r *FCBig, err error) {
|
||||
if lo.Cmp(hi) > 0 {
|
||||
return nil, fmt.Errorf("invalid range %d > %d", lo, hi)
|
||||
}
|
||||
|
||||
delta := big.NewInt(0)
|
||||
delta.Add(delta, hi).Sub(delta, lo)
|
||||
|
||||
// Find the primorial covering whole delta
|
||||
n, set, pp, p := big.NewInt(1), []int64{}, big.NewInt(0), uint32(2)
|
||||
if hq {
|
||||
p++
|
||||
}
|
||||
for {
|
||||
set = append(set, int64(p))
|
||||
pp.SetInt64(int64(p))
|
||||
n.Mul(n, pp)
|
||||
if n.Cmp(delta) > 0 {
|
||||
break
|
||||
}
|
||||
p, _ = NextPrime(p)
|
||||
}
|
||||
|
||||
// Adjust the set so n ∊ [delta, 2 * delta] (HQ: [delta, 3 * delta])
|
||||
// while keeping the cardinality of the set (correlates with the statistic "randomness quality")
|
||||
// at max, i.e. discard atmost one member.
|
||||
dd1 := big.NewInt(1)
|
||||
dd1.Add(dd1, delta)
|
||||
dd2 := big.NewInt(0)
|
||||
dd2.Lsh(dd1, 1)
|
||||
i := -1 // no candidate prime
|
||||
if n.Cmp(dd2) > 0 {
|
||||
q := big.NewInt(0)
|
||||
for j, p := range set {
|
||||
pp.SetInt64(p)
|
||||
q.Set(n)
|
||||
q.Div(q, pp)
|
||||
if q.Cmp(dd1) < 0 {
|
||||
break
|
||||
}
|
||||
|
||||
i = j // mark the highest candidate prime set index
|
||||
}
|
||||
}
|
||||
if i >= 0 { // shrink the inner cycle
|
||||
pp.SetInt64(set[i])
|
||||
n.Div(n, pp)
|
||||
set = delete(set, i)
|
||||
}
|
||||
r = &FCBig{
|
||||
cycle: n,
|
||||
delta: delta,
|
||||
factors: make([][]*big.Int, len(set)),
|
||||
lo: lo,
|
||||
mods: make([]int, len(set)),
|
||||
pos: big.NewInt(0),
|
||||
primes: set,
|
||||
}
|
||||
r.Seed(1) // the default seed should be always non zero
|
||||
return
|
||||
}
|
||||
|
||||
// Cycle reports the length of the inner FCPRNG cycle.
|
||||
// Cycle is atmost the double (HQ: triple) of the generator period (hi - lo + 1).
|
||||
func (r *FCBig) Cycle() *big.Int {
|
||||
return r.cycle
|
||||
}
|
||||
|
||||
// Next returns the first PRN after Pos.
|
||||
func (r *FCBig) Next() *big.Int {
|
||||
return r.step(1)
|
||||
}
|
||||
|
||||
// Pos reports the current position within the inner cycle.
|
||||
func (r *FCBig) Pos() *big.Int {
|
||||
return r.pos
|
||||
}
|
||||
|
||||
// Prev return the first PRN before Pos.
|
||||
func (r *FCBig) Prev() *big.Int {
|
||||
return r.step(-1)
|
||||
}
|
||||
|
||||
// Seed uses the provided seed value to initialize the generator to a deterministic state.
|
||||
// A zero seed produces a "canonical" generator with worse randomness than for most non zero seeds.
|
||||
// Still, the FC property holds for any seed value.
|
||||
func (r *FCBig) Seed(seed int64) {
|
||||
u := uint64(seed)
|
||||
r.set = mix(r.primes, &u)
|
||||
n := big.NewInt(1)
|
||||
v := big.NewInt(0)
|
||||
pp := big.NewInt(0)
|
||||
for i, p := range r.set {
|
||||
k := make([]*big.Int, p)
|
||||
v.SetInt64(0)
|
||||
for j := range k {
|
||||
k[j] = big.NewInt(0)
|
||||
k[j].Set(v)
|
||||
v.Add(v, n)
|
||||
}
|
||||
pp.SetInt64(p)
|
||||
n.Mul(n, pp)
|
||||
r.factors[i] = mixBig(k, &u)
|
||||
}
|
||||
}
|
||||
|
||||
// Seek sets Pos to |pos| % Cycle.
|
||||
func (r *FCBig) Seek(pos *big.Int) {
|
||||
r.pos.Set(pos)
|
||||
r.pos.Abs(r.pos)
|
||||
r.pos.Mod(r.pos, r.cycle)
|
||||
mod := big.NewInt(0)
|
||||
pp := big.NewInt(0)
|
||||
for i, p := range r.set {
|
||||
pp.SetInt64(p)
|
||||
r.mods[i] = int(mod.Mod(r.pos, pp).Int64())
|
||||
}
|
||||
}
|
||||
|
||||
func (r *FCBig) step(dir int) (y *big.Int) {
|
||||
y = big.NewInt(0)
|
||||
d := big.NewInt(int64(dir))
|
||||
for { // avg loops per step: 3/2 (HQ: 2)
|
||||
r.pos.Add(r.pos, d)
|
||||
switch {
|
||||
case r.pos.Sign() < 0:
|
||||
r.pos.Add(r.pos, r.cycle)
|
||||
case r.pos.Cmp(r.cycle) >= 0:
|
||||
r.pos.SetInt64(0)
|
||||
}
|
||||
for i, mod := range r.mods {
|
||||
mod += dir
|
||||
p := int(r.set[i])
|
||||
switch {
|
||||
case mod < 0:
|
||||
mod = p - 1
|
||||
case mod >= p:
|
||||
mod = 0
|
||||
}
|
||||
r.mods[i] = mod
|
||||
y.Add(y, r.factors[i][mod])
|
||||
}
|
||||
if y.Cmp(r.delta) <= 0 {
|
||||
y.Add(y, r.lo)
|
||||
return
|
||||
}
|
||||
y.SetInt64(0)
|
||||
}
|
||||
}
|
||||
|
||||
func deleteBig(set []*big.Int, i int) (y []*big.Int) {
|
||||
for j, v := range set {
|
||||
if j != i {
|
||||
y = append(y, v)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func mixBig(set []*big.Int, seed *uint64) (y []*big.Int) {
|
||||
for len(set) != 0 {
|
||||
*seed = rol(*seed)
|
||||
i := int(*seed % uint64(len(set)))
|
||||
y = append(y, set[i])
|
||||
set = deleteBig(set, i)
|
||||
}
|
||||
return
|
||||
}
|
6995
vendor/github.com/cznic/mathutil/tables.go
generated
vendored
Normal file
6995
vendor/github.com/cznic/mathutil/tables.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
11
vendor/github.com/cznic/mathutil/test_deps.go
generated
vendored
Normal file
11
vendor/github.com/cznic/mathutil/test_deps.go
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
// Copyright (c) 2014 The mathutil Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package mathutil
|
||||
|
||||
// Pull test dependencies too.
|
||||
// Enables easy 'go test X' after 'go get X'
|
||||
import (
|
||||
// nothing yet
|
||||
)
|
27
vendor/github.com/cznic/ql/LICENSE
generated
vendored
Normal file
27
vendor/github.com/cznic/ql/LICENSE
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
Copyright (c) 2014 The ql Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the names of the authors nor the names of the
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
155
vendor/github.com/cznic/ql/blob.go
generated
vendored
Normal file
155
vendor/github.com/cznic/ql/blob.go
generated
vendored
Normal file
@ -0,0 +1,155 @@
|
||||
// Copyright 2014 The ql Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ql
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/gob"
|
||||
"math/big"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const shortBlob = 256 // bytes
|
||||
|
||||
var (
|
||||
gobInitDuration = time.Duration(278)
|
||||
gobInitInt = big.NewInt(42)
|
||||
gobInitRat = big.NewRat(355, 113)
|
||||
gobInitTime time.Time
|
||||
)
|
||||
|
||||
func init() {
|
||||
var err error
|
||||
if gobInitTime, err = time.ParseInLocation(
|
||||
"Jan 2, 2006 at 3:04pm (MST)",
|
||||
"Jul 9, 2012 at 5:02am (CEST)",
|
||||
time.FixedZone("XYZ", 1234),
|
||||
); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
newGobCoder()
|
||||
}
|
||||
|
||||
type gobCoder struct {
|
||||
buf bytes.Buffer
|
||||
dec *gob.Decoder
|
||||
enc *gob.Encoder
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func newGobCoder() (g *gobCoder) {
|
||||
g = &gobCoder{}
|
||||
g.enc = gob.NewEncoder(&g.buf)
|
||||
if err := g.enc.Encode(gobInitInt); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if err := g.enc.Encode(gobInitRat); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if err := g.enc.Encode(gobInitTime); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if err := g.enc.Encode(gobInitDuration); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
g.dec = gob.NewDecoder(&g.buf)
|
||||
i := big.NewInt(0)
|
||||
if err := g.dec.Decode(i); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
r := big.NewRat(3, 5)
|
||||
if err := g.dec.Decode(r); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
t := time.Now()
|
||||
if err := g.dec.Decode(&t); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
var d time.Duration
|
||||
if err := g.dec.Decode(&d); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func isBlobType(v interface{}) (bool, Type) {
|
||||
switch v.(type) {
|
||||
case []byte:
|
||||
return true, Blob
|
||||
case *big.Int:
|
||||
return true, BigInt
|
||||
case *big.Rat:
|
||||
return true, BigRat
|
||||
case time.Time:
|
||||
return true, Time
|
||||
case time.Duration:
|
||||
return true, Duration
|
||||
default:
|
||||
return false, -1
|
||||
}
|
||||
}
|
||||
|
||||
func (g *gobCoder) encode(v interface{}) (b []byte, err error) {
|
||||
g.mu.Lock()
|
||||
defer g.mu.Unlock()
|
||||
|
||||
g.buf.Reset()
|
||||
switch x := v.(type) {
|
||||
case []byte:
|
||||
return x, nil
|
||||
case *big.Int:
|
||||
err = g.enc.Encode(x)
|
||||
case *big.Rat:
|
||||
err = g.enc.Encode(x)
|
||||
case time.Time:
|
||||
err = g.enc.Encode(x)
|
||||
case time.Duration:
|
||||
err = g.enc.Encode(int64(x))
|
||||
default:
|
||||
panic("internal error 002")
|
||||
}
|
||||
b = g.buf.Bytes()
|
||||
return
|
||||
}
|
||||
|
||||
func (g *gobCoder) decode(b []byte, typ int) (v interface{}, err error) {
|
||||
g.mu.Lock()
|
||||
defer g.mu.Unlock()
|
||||
|
||||
g.buf.Reset()
|
||||
g.buf.Write(b)
|
||||
switch typ {
|
||||
case qBlob:
|
||||
return b, nil
|
||||
case qBigInt:
|
||||
x := big.NewInt(0)
|
||||
err = g.dec.Decode(&x)
|
||||
v = x
|
||||
case qBigRat:
|
||||
x := big.NewRat(1, 1)
|
||||
err = g.dec.Decode(&x)
|
||||
v = x
|
||||
case qTime:
|
||||
var x time.Time
|
||||
err = g.dec.Decode(&x)
|
||||
v = x
|
||||
case qDuration:
|
||||
var x int64
|
||||
err = g.dec.Decode(&x)
|
||||
v = time.Duration(x)
|
||||
default:
|
||||
panic("internal error 003")
|
||||
}
|
||||
return
|
||||
}
|
725
vendor/github.com/cznic/ql/btree.go
generated
vendored
Normal file
725
vendor/github.com/cznic/ql/btree.go
generated
vendored
Normal file
@ -0,0 +1,725 @@
|
||||
// Copyright 2014 The ql Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ql
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
const (
|
||||
kx = 128 //DONE benchmark tune this number if using custom key/value type(s).
|
||||
kd = 64 //DONE benchmark tune this number if using custom key/value type(s).
|
||||
)
|
||||
|
||||
type (
|
||||
// cmp compares a and b. Return value is:
|
||||
//
|
||||
// < 0 if a < b
|
||||
// 0 if a == b
|
||||
// > 0 if a > b
|
||||
//
|
||||
cmp func(a, b []interface{}) int
|
||||
|
||||
d struct { // data page
|
||||
c int
|
||||
d [2*kd + 1]de
|
||||
n *d
|
||||
p *d
|
||||
}
|
||||
|
||||
de struct { // d element
|
||||
k []interface{}
|
||||
v []interface{}
|
||||
}
|
||||
|
||||
enumerator struct {
|
||||
err error
|
||||
hit bool
|
||||
i int
|
||||
k []interface{}
|
||||
q *d
|
||||
t *tree
|
||||
ver int64
|
||||
}
|
||||
|
||||
// tree is a B+tree.
|
||||
tree struct {
|
||||
c int
|
||||
cmp cmp
|
||||
first *d
|
||||
last *d
|
||||
r interface{}
|
||||
ver int64
|
||||
}
|
||||
|
||||
xe struct { // x element
|
||||
ch interface{}
|
||||
sep *d
|
||||
}
|
||||
|
||||
x struct { // index page
|
||||
c int
|
||||
x [2*kx + 2]xe
|
||||
}
|
||||
)
|
||||
|
||||
var ( // R/O zero values
|
||||
zd d
|
||||
zde de
|
||||
zx x
|
||||
zxe xe
|
||||
)
|
||||
|
||||
func clr(q interface{}) {
|
||||
switch z := q.(type) {
|
||||
case *x:
|
||||
for i := 0; i <= z.c; i++ { // Ch0 Sep0 ... Chn-1 Sepn-1 Chn
|
||||
clr(z.x[i].ch)
|
||||
}
|
||||
*z = zx // GC
|
||||
case *d:
|
||||
*z = zd // GC
|
||||
}
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------- x
|
||||
|
||||
func newX(ch0 interface{}) *x {
|
||||
r := &x{}
|
||||
r.x[0].ch = ch0
|
||||
return r
|
||||
}
|
||||
|
||||
func (q *x) extract(i int) {
|
||||
q.c--
|
||||
if i < q.c {
|
||||
copy(q.x[i:], q.x[i+1:q.c+1])
|
||||
q.x[q.c].ch = q.x[q.c+1].ch
|
||||
q.x[q.c].sep = nil // GC
|
||||
q.x[q.c+1] = zxe // GC
|
||||
}
|
||||
}
|
||||
|
||||
func (q *x) insert(i int, d *d, ch interface{}) *x {
|
||||
c := q.c
|
||||
if i < c {
|
||||
q.x[c+1].ch = q.x[c].ch
|
||||
copy(q.x[i+2:], q.x[i+1:c])
|
||||
q.x[i+1].sep = q.x[i].sep
|
||||
}
|
||||
c++
|
||||
q.c = c
|
||||
q.x[i].sep = d
|
||||
q.x[i+1].ch = ch
|
||||
return q
|
||||
}
|
||||
|
||||
func (q *x) siblings(i int) (l, r *d) {
|
||||
if i >= 0 {
|
||||
if i > 0 {
|
||||
l = q.x[i-1].ch.(*d)
|
||||
}
|
||||
if i < q.c {
|
||||
r = q.x[i+1].ch.(*d)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------- d
|
||||
|
||||
func (l *d) mvL(r *d, c int) {
|
||||
copy(l.d[l.c:], r.d[:c])
|
||||
copy(r.d[:], r.d[c:r.c])
|
||||
l.c += c
|
||||
r.c -= c
|
||||
}
|
||||
|
||||
func (l *d) mvR(r *d, c int) {
|
||||
copy(r.d[c:], r.d[:r.c])
|
||||
copy(r.d[:c], l.d[l.c-c:])
|
||||
r.c += c
|
||||
l.c -= c
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------- tree
|
||||
|
||||
// treeNew returns a newly created, empty tree. The compare function is used
|
||||
// for key collation.
|
||||
func treeNew(cmp cmp) *tree {
|
||||
return &tree{cmp: cmp}
|
||||
}
|
||||
|
||||
// Clear removes all K/V pairs from the tree.
|
||||
func (t *tree) Clear() {
|
||||
if t.r == nil {
|
||||
return
|
||||
}
|
||||
|
||||
clr(t.r)
|
||||
t.c, t.first, t.last, t.r = 0, nil, nil, nil
|
||||
t.ver++
|
||||
}
|
||||
|
||||
func (t *tree) cat(p *x, q, r *d, pi int) {
|
||||
t.ver++
|
||||
q.mvL(r, r.c)
|
||||
if r.n != nil {
|
||||
r.n.p = q
|
||||
} else {
|
||||
t.last = q
|
||||
}
|
||||
q.n = r.n
|
||||
if p.c > 1 {
|
||||
p.extract(pi)
|
||||
p.x[pi].ch = q
|
||||
} else {
|
||||
t.r = q
|
||||
}
|
||||
}
|
||||
|
||||
func (t *tree) catX(p, q, r *x, pi int) {
|
||||
t.ver++
|
||||
q.x[q.c].sep = p.x[pi].sep
|
||||
copy(q.x[q.c+1:], r.x[:r.c])
|
||||
q.c += r.c + 1
|
||||
q.x[q.c].ch = r.x[r.c].ch
|
||||
if p.c > 1 {
|
||||
p.c--
|
||||
pc := p.c
|
||||
if pi < pc {
|
||||
p.x[pi].sep = p.x[pi+1].sep
|
||||
copy(p.x[pi+1:], p.x[pi+2:pc+1])
|
||||
p.x[pc].ch = p.x[pc+1].ch
|
||||
p.x[pc].sep = nil // GC
|
||||
p.x[pc+1].ch = nil // GC
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
t.r = q
|
||||
}
|
||||
|
||||
//Delete removes the k's KV pair, if it exists, in which case Delete returns
|
||||
//true.
|
||||
func (t *tree) Delete(k []interface{}) (ok bool) {
|
||||
pi := -1
|
||||
var p *x
|
||||
q := t.r
|
||||
if q == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
var i int
|
||||
i, ok = t.find(q, k)
|
||||
if ok {
|
||||
switch z := q.(type) {
|
||||
case *x:
|
||||
dp := z.x[i].sep
|
||||
switch {
|
||||
case dp.c > kd:
|
||||
t.extract(dp, 0)
|
||||
default:
|
||||
if z.c < kx && q != t.r {
|
||||
t.underflowX(p, &z, pi, &i)
|
||||
}
|
||||
pi = i + 1
|
||||
p = z
|
||||
q = z.x[pi].ch
|
||||
ok = false
|
||||
continue
|
||||
}
|
||||
case *d:
|
||||
t.extract(z, i)
|
||||
if z.c >= kd {
|
||||
return
|
||||
}
|
||||
|
||||
if q != t.r {
|
||||
t.underflow(p, z, pi)
|
||||
} else if t.c == 0 {
|
||||
t.Clear()
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
switch z := q.(type) {
|
||||
case *x:
|
||||
if z.c < kx && q != t.r {
|
||||
t.underflowX(p, &z, pi, &i)
|
||||
}
|
||||
pi = i
|
||||
p = z
|
||||
q = z.x[i].ch
|
||||
case *d:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *tree) extract(q *d, i int) { // (r []interface{}) {
|
||||
t.ver++
|
||||
//r = q.d[i].v // prepared for Extract
|
||||
q.c--
|
||||
if i < q.c {
|
||||
copy(q.d[i:], q.d[i+1:q.c+1])
|
||||
}
|
||||
q.d[q.c] = zde // GC
|
||||
t.c--
|
||||
return
|
||||
}
|
||||
|
||||
func (t *tree) find(q interface{}, k []interface{}) (i int, ok bool) {
|
||||
var mk []interface{}
|
||||
l := 0
|
||||
switch z := q.(type) {
|
||||
case *x:
|
||||
h := z.c - 1
|
||||
for l <= h {
|
||||
m := (l + h) >> 1
|
||||
mk = z.x[m].sep.d[0].k
|
||||
switch cmp := t.cmp(k, mk); {
|
||||
case cmp > 0:
|
||||
l = m + 1
|
||||
case cmp == 0:
|
||||
return m, true
|
||||
default:
|
||||
h = m - 1
|
||||
}
|
||||
}
|
||||
case *d:
|
||||
h := z.c - 1
|
||||
for l <= h {
|
||||
m := (l + h) >> 1
|
||||
mk = z.d[m].k
|
||||
switch cmp := t.cmp(k, mk); {
|
||||
case cmp > 0:
|
||||
l = m + 1
|
||||
case cmp == 0:
|
||||
return m, true
|
||||
default:
|
||||
h = m - 1
|
||||
}
|
||||
}
|
||||
}
|
||||
return l, false
|
||||
}
|
||||
|
||||
// First returns the first item of the tree in the key collating order, or
|
||||
// (nil, nil) if the tree is empty.
|
||||
func (t *tree) First() (k []interface{}, v []interface{}) {
|
||||
if q := t.first; q != nil {
|
||||
q := &q.d[0]
|
||||
k, v = q.k, q.v
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Get returns the value associated with k and true if it exists. Otherwise Get
|
||||
// returns (nil, false).
|
||||
func (t *tree) Get(k []interface{}) (v []interface{}, ok bool) {
|
||||
q := t.r
|
||||
if q == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
var i int
|
||||
if i, ok = t.find(q, k); ok {
|
||||
switch z := q.(type) {
|
||||
case *x:
|
||||
return z.x[i].sep.d[0].v, true
|
||||
case *d:
|
||||
return z.d[i].v, true
|
||||
}
|
||||
}
|
||||
switch z := q.(type) {
|
||||
case *x:
|
||||
q = z.x[i].ch
|
||||
default:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *tree) insert(q *d, i int, k []interface{}, v []interface{}) *d {
|
||||
t.ver++
|
||||
c := q.c
|
||||
if i < c {
|
||||
copy(q.d[i+1:], q.d[i:c])
|
||||
}
|
||||
c++
|
||||
q.c = c
|
||||
q.d[i].k, q.d[i].v = k, v
|
||||
t.c++
|
||||
return q
|
||||
}
|
||||
|
||||
// Last returns the last item of the tree in the key collating order, or (nil,
|
||||
// nil) if the tree is empty.
|
||||
func (t *tree) Last() (k []interface{}, v []interface{}) {
|
||||
if q := t.last; q != nil {
|
||||
q := &q.d[q.c-1]
|
||||
k, v = q.k, q.v
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Len returns the number of items in the tree.
|
||||
func (t *tree) Len() int {
|
||||
return t.c
|
||||
}
|
||||
|
||||
func (t *tree) overflow(p *x, q *d, pi, i int, k []interface{}, v []interface{}) {
|
||||
t.ver++
|
||||
l, r := p.siblings(pi)
|
||||
|
||||
if l != nil && l.c < 2*kd && i > 0 {
|
||||
l.mvL(q, 1)
|
||||
t.insert(q, i-1, k, v)
|
||||
return
|
||||
}
|
||||
|
||||
if r != nil && r.c < 2*kd {
|
||||
if i < 2*kd {
|
||||
q.mvR(r, 1)
|
||||
t.insert(q, i, k, v)
|
||||
} else {
|
||||
t.insert(r, 0, k, v)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
t.split(p, q, pi, i, k, v)
|
||||
}
|
||||
|
||||
// Seek returns an enumerator positioned on a an item such that k >= item's
|
||||
// key. ok reports if k == item.key The enumerator's position is possibly
|
||||
// after the last item in the tree.
|
||||
func (t *tree) Seek(k []interface{}) (e *enumerator, ok bool) {
|
||||
q := t.r
|
||||
if q == nil {
|
||||
e = &enumerator{nil, false, 0, k, nil, t, t.ver}
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
var i int
|
||||
if i, ok = t.find(q, k); ok {
|
||||
switch z := q.(type) {
|
||||
case *x:
|
||||
e = &enumerator{nil, ok, 0, k, z.x[i].sep, t, t.ver}
|
||||
return
|
||||
case *d:
|
||||
e = &enumerator{nil, ok, i, k, z, t, t.ver}
|
||||
return
|
||||
}
|
||||
}
|
||||
switch z := q.(type) {
|
||||
case *x:
|
||||
q = z.x[i].ch
|
||||
case *d:
|
||||
e = &enumerator{nil, ok, i, k, z, t, t.ver}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SeekFirst returns an enumerator positioned on the first KV pair in the tree,
|
||||
// if any. For an empty tree, err == io.EOF is returned and e will be nil.
|
||||
func (t *tree) SeekFirst() (e *enumerator, err error) {
|
||||
q := t.first
|
||||
if q == nil {
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
return &enumerator{nil, true, 0, q.d[0].k, q, t, t.ver}, nil
|
||||
}
|
||||
|
||||
// SeekLast returns an enumerator positioned on the last KV pair in the tree,
|
||||
// if any. For an empty tree, err == io.EOF is returned and e will be nil.
|
||||
func (t *tree) SeekLast() (e *enumerator, err error) {
|
||||
q := t.last
|
||||
if q == nil {
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
return &enumerator{nil, true, q.c - 1, q.d[q.c-1].k, q, t, t.ver}, nil
|
||||
}
|
||||
|
||||
// Set sets the value associated with k.
|
||||
func (t *tree) Set(k []interface{}, v []interface{}) {
|
||||
pi := -1
|
||||
var p *x
|
||||
q := t.r
|
||||
if q != nil {
|
||||
for {
|
||||
i, ok := t.find(q, k)
|
||||
if ok {
|
||||
switch z := q.(type) {
|
||||
case *x:
|
||||
z.x[i].sep.d[0].v = v
|
||||
case *d:
|
||||
z.d[i].v = v
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
switch z := q.(type) {
|
||||
case *x:
|
||||
if z.c > 2*kx {
|
||||
t.splitX(p, &z, pi, &i)
|
||||
}
|
||||
pi = i
|
||||
p = z
|
||||
q = z.x[i].ch
|
||||
case *d:
|
||||
switch {
|
||||
case z.c < 2*kd:
|
||||
t.insert(z, i, k, v)
|
||||
default:
|
||||
t.overflow(p, z, pi, i, k, v)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
z := t.insert(&d{}, 0, k, v)
|
||||
t.r, t.first, t.last = z, z, z
|
||||
return
|
||||
}
|
||||
|
||||
func (t *tree) split(p *x, q *d, pi, i int, k []interface{}, v []interface{}) {
|
||||
t.ver++
|
||||
r := &d{}
|
||||
if q.n != nil {
|
||||
r.n = q.n
|
||||
r.n.p = r
|
||||
} else {
|
||||
t.last = r
|
||||
}
|
||||
q.n = r
|
||||
r.p = q
|
||||
|
||||
copy(r.d[:], q.d[kd:2*kd])
|
||||
for i := range q.d[kd:] {
|
||||
q.d[kd+i] = zde
|
||||
}
|
||||
q.c = kd
|
||||
r.c = kd
|
||||
if pi >= 0 {
|
||||
p.insert(pi, r, r)
|
||||
} else {
|
||||
t.r = newX(q).insert(0, r, r)
|
||||
}
|
||||
if i > kd {
|
||||
t.insert(r, i-kd, k, v)
|
||||
return
|
||||
}
|
||||
|
||||
t.insert(q, i, k, v)
|
||||
}
|
||||
|
||||
func (t *tree) splitX(p *x, pp **x, pi int, i *int) {
|
||||
t.ver++
|
||||
q := *pp
|
||||
r := &x{}
|
||||
copy(r.x[:], q.x[kx+1:])
|
||||
q.c = kx
|
||||
r.c = kx
|
||||
if pi >= 0 {
|
||||
p.insert(pi, q.x[kx].sep, r)
|
||||
} else {
|
||||
t.r = newX(q).insert(0, q.x[kx].sep, r)
|
||||
}
|
||||
q.x[kx].sep = nil
|
||||
for i := range q.x[kx+1:] {
|
||||
q.x[kx+i+1] = zxe
|
||||
}
|
||||
if *i > kx {
|
||||
*pp = r
|
||||
*i -= kx + 1
|
||||
}
|
||||
}
|
||||
|
||||
func (t *tree) underflow(p *x, q *d, pi int) {
|
||||
t.ver++
|
||||
l, r := p.siblings(pi)
|
||||
|
||||
if l != nil && l.c+q.c >= 2*kd {
|
||||
l.mvR(q, 1)
|
||||
} else if r != nil && q.c+r.c >= 2*kd {
|
||||
q.mvL(r, 1)
|
||||
r.d[r.c] = zde // GC
|
||||
} else if l != nil {
|
||||
t.cat(p, l, q, pi-1)
|
||||
} else {
|
||||
t.cat(p, q, r, pi)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *tree) underflowX(p *x, pp **x, pi int, i *int) {
|
||||
t.ver++
|
||||
var l, r *x
|
||||
q := *pp
|
||||
|
||||
if pi >= 0 {
|
||||
if pi > 0 {
|
||||
l = p.x[pi-1].ch.(*x)
|
||||
}
|
||||
if pi < p.c {
|
||||
r = p.x[pi+1].ch.(*x)
|
||||
}
|
||||
}
|
||||
|
||||
if l != nil && l.c > kx {
|
||||
q.x[q.c+1].ch = q.x[q.c].ch
|
||||
copy(q.x[1:], q.x[:q.c])
|
||||
q.x[0].ch = l.x[l.c].ch
|
||||
q.x[0].sep = p.x[pi-1].sep
|
||||
q.c++
|
||||
*i++
|
||||
l.c--
|
||||
p.x[pi-1].sep = l.x[l.c].sep
|
||||
return
|
||||
}
|
||||
|
||||
if r != nil && r.c > kx {
|
||||
q.x[q.c].sep = p.x[pi].sep
|
||||
q.c++
|
||||
q.x[q.c].ch = r.x[0].ch
|
||||
p.x[pi].sep = r.x[0].sep
|
||||
copy(r.x[:], r.x[1:r.c])
|
||||
r.c--
|
||||
rc := r.c
|
||||
r.x[rc].ch = r.x[rc+1].ch
|
||||
r.x[rc].sep = nil
|
||||
r.x[rc+1].ch = nil
|
||||
return
|
||||
}
|
||||
|
||||
if l != nil {
|
||||
*i += l.c + 1
|
||||
t.catX(p, l, q, pi-1)
|
||||
*pp = l
|
||||
return
|
||||
}
|
||||
|
||||
t.catX(p, q, r, pi)
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------- enumerator
|
||||
|
||||
// Next returns the currently enumerated item, if it exists and moves to the
|
||||
// next item in the key collation order. If there is no item to return, err ==
|
||||
// io.EOF is returned.
|
||||
func (e *enumerator) Next() (k []interface{}, v []interface{}, err error) {
|
||||
if err = e.err; err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if e.ver != e.t.ver {
|
||||
f, hit := e.t.Seek(e.k)
|
||||
if !e.hit && hit {
|
||||
if err = f.next(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
*e = *f
|
||||
}
|
||||
if e.q == nil {
|
||||
e.err, err = io.EOF, io.EOF
|
||||
return
|
||||
}
|
||||
|
||||
if e.i >= e.q.c {
|
||||
if err = e.next(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
i := e.q.d[e.i]
|
||||
k, v = i.k, i.v
|
||||
e.k, e.hit = k, false
|
||||
e.next()
|
||||
return
|
||||
}
|
||||
|
||||
func (e *enumerator) next() error {
|
||||
if e.q == nil {
|
||||
e.err = io.EOF
|
||||
return io.EOF
|
||||
}
|
||||
|
||||
switch {
|
||||
case e.i < e.q.c-1:
|
||||
e.i++
|
||||
default:
|
||||
if e.q, e.i = e.q.n, 0; e.q == nil {
|
||||
e.err = io.EOF
|
||||
}
|
||||
}
|
||||
return e.err
|
||||
}
|
||||
|
||||
// Prev returns the currently enumerated item, if it exists and moves to the
|
||||
// previous item in the key collation order. If there is no item to return, err
|
||||
// == io.EOF is returned.
|
||||
func (e *enumerator) Prev() (k []interface{}, v []interface{}, err error) {
|
||||
if err = e.err; err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if e.ver != e.t.ver {
|
||||
f, hit := e.t.Seek(e.k)
|
||||
if !e.hit && hit {
|
||||
if err = f.prev(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
*e = *f
|
||||
}
|
||||
if e.q == nil {
|
||||
e.err, err = io.EOF, io.EOF
|
||||
return
|
||||
}
|
||||
|
||||
if e.i >= e.q.c {
|
||||
if err = e.next(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
i := e.q.d[e.i]
|
||||
k, v = i.k, i.v
|
||||
e.k, e.hit = k, false
|
||||
e.prev()
|
||||
return
|
||||
}
|
||||
|
||||
func (e *enumerator) prev() error {
|
||||
if e.q == nil {
|
||||
e.err = io.EOF
|
||||
return io.EOF
|
||||
}
|
||||
|
||||
switch {
|
||||
case e.i > 0:
|
||||
e.i--
|
||||
default:
|
||||
if e.q = e.q.p; e.q == nil {
|
||||
e.err = io.EOF
|
||||
break
|
||||
}
|
||||
|
||||
e.i = e.q.c - 1
|
||||
}
|
||||
return e.err
|
||||
}
|
991
vendor/github.com/cznic/ql/builtin.go
generated
vendored
Normal file
991
vendor/github.com/cznic/ql/builtin.go
generated
vendored
Normal file
@ -0,0 +1,991 @@
|
||||
// Copyright 2014 The ql Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
//TODO agg bigint, bigrat, time, duration
|
||||
|
||||
var builtin = map[string]struct {
|
||||
f func([]interface{}, map[interface{}]interface{}) (interface{}, error)
|
||||
minArgs int
|
||||
maxArgs int
|
||||
isStatic bool
|
||||
isAggregate bool
|
||||
}{
|
||||
"__testBlob": {builtinTestBlob, 1, 1, true, false},
|
||||
"__testString": {builtinTestString, 1, 1, true, false},
|
||||
"avg": {builtinAvg, 1, 1, false, true},
|
||||
"complex": {builtinComplex, 2, 2, true, false},
|
||||
"contains": {builtinContains, 2, 2, true, false},
|
||||
"count": {builtinCount, 0, 1, false, true},
|
||||
"date": {builtinDate, 8, 8, true, false},
|
||||
"day": {builtinDay, 1, 1, true, false},
|
||||
"formatTime": {builtinFormatTime, 2, 2, true, false},
|
||||
"formatFloat": {builtinFormatFloat, 1, 4, true, false},
|
||||
"formatInt": {builtinFormatInt, 1, 2, true, false},
|
||||
"hasPrefix": {builtinHasPrefix, 2, 2, true, false},
|
||||
"hasSuffix": {builtinHasSuffix, 2, 2, true, false},
|
||||
"hour": {builtinHour, 1, 1, true, false},
|
||||
"hours": {builtinHours, 1, 1, true, false},
|
||||
"id": {builtinID, 0, 1, false, false},
|
||||
"imag": {builtinImag, 1, 1, true, false},
|
||||
"len": {builtinLen, 1, 1, true, false},
|
||||
"max": {builtinMax, 1, 1, false, true},
|
||||
"min": {builtinMin, 1, 1, false, true},
|
||||
"minute": {builtinMinute, 1, 1, true, false},
|
||||
"minutes": {builtinMinutes, 1, 1, true, false},
|
||||
"month": {builtinMonth, 1, 1, true, false},
|
||||
"nanosecond": {builtinNanosecond, 1, 1, true, false},
|
||||
"nanoseconds": {builtinNanoseconds, 1, 1, true, false},
|
||||
"now": {builtinNow, 0, 0, false, false},
|
||||
"parseTime": {builtinParseTime, 2, 2, true, false},
|
||||
"real": {builtinReal, 1, 1, true, false},
|
||||
"second": {builtinSecond, 1, 1, true, false},
|
||||
"seconds": {builtinSeconds, 1, 1, true, false},
|
||||
"since": {builtinSince, 1, 1, false, false},
|
||||
"sum": {builtinSum, 1, 1, false, true},
|
||||
"timeIn": {builtinTimeIn, 2, 2, true, false},
|
||||
"weekday": {builtinWeekday, 1, 1, true, false},
|
||||
"year": {builtinYear, 1, 1, true, false},
|
||||
"yearDay": {builtinYearday, 1, 1, true, false},
|
||||
}
|
||||
|
||||
func badNArgs(min int, s string, arg []interface{}) error {
|
||||
a := []string{}
|
||||
for _, v := range arg {
|
||||
a = append(a, fmt.Sprintf("%v", v))
|
||||
}
|
||||
switch len(arg) < min {
|
||||
case true:
|
||||
return fmt.Errorf("missing argument to %s(%s)", s, strings.Join(a, ", "))
|
||||
default: //case false:
|
||||
return fmt.Errorf("too many arguments to %s(%s)", s, strings.Join(a, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
func invArg(arg interface{}, s string) error {
|
||||
return fmt.Errorf("invalid argument %v (type %T) for %s", arg, arg, s)
|
||||
}
|
||||
|
||||
func builtinTestBlob(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
|
||||
n, err := intExpr(arg[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rng := rand.New(rand.NewSource(n))
|
||||
b := make([]byte, n)
|
||||
for i := range b {
|
||||
b[i] = byte(rng.Int())
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func builtinTestString(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
|
||||
n, err := intExpr(arg[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rng := rand.New(rand.NewSource(n))
|
||||
b := make([]byte, n)
|
||||
for i := range b {
|
||||
b[i] = byte(rng.Int())
|
||||
}
|
||||
return string(b), nil
|
||||
}
|
||||
|
||||
func builtinAvg(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
|
||||
type avg struct {
|
||||
sum interface{}
|
||||
n uint64
|
||||
}
|
||||
|
||||
if _, ok := ctx["$agg0"]; ok {
|
||||
return
|
||||
}
|
||||
|
||||
fn := ctx["$fn"]
|
||||
if _, ok := ctx["$agg"]; ok {
|
||||
data, ok := ctx[fn].(avg)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
switch x := data.sum.(type) {
|
||||
case complex64:
|
||||
return complex64(complex128(x) / complex(float64(data.n), 0)), nil
|
||||
case complex128:
|
||||
return complex64(complex128(x) / complex(float64(data.n), 0)), nil
|
||||
case float32:
|
||||
return float32(float64(x) / float64(data.n)), nil
|
||||
case float64:
|
||||
return float64(x) / float64(data.n), nil
|
||||
case int8:
|
||||
return int8(int64(x) / int64(data.n)), nil
|
||||
case int16:
|
||||
return int16(int64(x) / int64(data.n)), nil
|
||||
case int32:
|
||||
return int32(int64(x) / int64(data.n)), nil
|
||||
case int64:
|
||||
return int64(int64(x) / int64(data.n)), nil
|
||||
case uint8:
|
||||
return uint8(uint64(x) / data.n), nil
|
||||
case uint16:
|
||||
return uint16(uint64(x) / data.n), nil
|
||||
case uint32:
|
||||
return uint32(uint64(x) / data.n), nil
|
||||
case uint64:
|
||||
return uint64(uint64(x) / data.n), nil
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
data, _ := ctx[fn].(avg)
|
||||
y := arg[0]
|
||||
if y == nil {
|
||||
return
|
||||
}
|
||||
|
||||
switch x := data.sum.(type) {
|
||||
case nil:
|
||||
switch y := y.(type) {
|
||||
case float32, float64, int8, int16, int32, int64, uint8, uint16, uint32, uint64:
|
||||
data = avg{y, 0}
|
||||
default:
|
||||
return nil, fmt.Errorf("avg: cannot accept %v (value if type %T)", y, y)
|
||||
}
|
||||
case complex64:
|
||||
data.sum = x + y.(complex64)
|
||||
case complex128:
|
||||
data.sum = x + y.(complex128)
|
||||
case float32:
|
||||
data.sum = x + y.(float32)
|
||||
case float64:
|
||||
data.sum = x + y.(float64)
|
||||
case int8:
|
||||
data.sum = x + y.(int8)
|
||||
case int16:
|
||||
data.sum = x + y.(int16)
|
||||
case int32:
|
||||
data.sum = x + y.(int32)
|
||||
case int64:
|
||||
data.sum = x + y.(int64)
|
||||
case uint8:
|
||||
data.sum = x + y.(uint8)
|
||||
case uint16:
|
||||
data.sum = x + y.(uint16)
|
||||
case uint32:
|
||||
data.sum = x + y.(uint32)
|
||||
case uint64:
|
||||
data.sum = x + y.(uint64)
|
||||
}
|
||||
data.n++
|
||||
ctx[fn] = data
|
||||
return
|
||||
}
|
||||
|
||||
func builtinComplex(arg []interface{}, _ map[interface{}]interface{}) (v interface{}, err error) {
|
||||
re, im := arg[0], arg[1]
|
||||
if re == nil || im == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
re, im = coerce(re, im)
|
||||
if reflect.TypeOf(re) != reflect.TypeOf(im) {
|
||||
return nil, fmt.Errorf("complex(%T(%#v), %T(%#v)): invalid types", re, re, im, im)
|
||||
}
|
||||
|
||||
switch re := re.(type) {
|
||||
case idealFloat:
|
||||
return idealComplex(complex(float64(re), float64(im.(idealFloat)))), nil
|
||||
case idealInt:
|
||||
return idealComplex(complex(float64(re), float64(im.(idealInt)))), nil
|
||||
case idealRune:
|
||||
return idealComplex(complex(float64(re), float64(im.(idealRune)))), nil
|
||||
case idealUint:
|
||||
return idealComplex(complex(float64(re), float64(im.(idealUint)))), nil
|
||||
case float32:
|
||||
return complex(float32(re), im.(float32)), nil
|
||||
case float64:
|
||||
return complex(float64(re), im.(float64)), nil
|
||||
case int8:
|
||||
return complex(float64(re), float64(im.(int8))), nil
|
||||
case int16:
|
||||
return complex(float64(re), float64(im.(int16))), nil
|
||||
case int32:
|
||||
return complex(float64(re), float64(im.(int32))), nil
|
||||
case int64:
|
||||
return complex(float64(re), float64(im.(int64))), nil
|
||||
case uint8:
|
||||
return complex(float64(re), float64(im.(uint8))), nil
|
||||
case uint16:
|
||||
return complex(float64(re), float64(im.(uint16))), nil
|
||||
case uint32:
|
||||
return complex(float64(re), float64(im.(uint32))), nil
|
||||
case uint64:
|
||||
return complex(float64(re), float64(im.(uint64))), nil
|
||||
default:
|
||||
return nil, invArg(re, "complex")
|
||||
}
|
||||
}
|
||||
|
||||
func builtinContains(arg []interface{}, _ map[interface{}]interface{}) (v interface{}, err error) {
|
||||
switch s := arg[0].(type) {
|
||||
case nil:
|
||||
return nil, nil
|
||||
case string:
|
||||
switch chars := arg[1].(type) {
|
||||
case nil:
|
||||
return nil, nil
|
||||
case string:
|
||||
return strings.Contains(s, chars), nil
|
||||
default:
|
||||
return nil, invArg(chars, "string")
|
||||
}
|
||||
default:
|
||||
return nil, invArg(s, "string")
|
||||
}
|
||||
}
|
||||
|
||||
func builtinCount(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
|
||||
if _, ok := ctx["$agg0"]; ok {
|
||||
return int64(0), nil
|
||||
}
|
||||
|
||||
fn := ctx["$fn"]
|
||||
if _, ok := ctx["$agg"]; ok {
|
||||
return ctx[fn].(int64), nil
|
||||
}
|
||||
|
||||
n, _ := ctx[fn].(int64)
|
||||
switch len(arg) {
|
||||
case 0:
|
||||
n++
|
||||
case 1:
|
||||
if arg[0] != nil {
|
||||
n++
|
||||
}
|
||||
default:
|
||||
panic("internal error 067")
|
||||
}
|
||||
ctx[fn] = n
|
||||
return
|
||||
}
|
||||
|
||||
func builtinDate(arg []interface{}, _ map[interface{}]interface{}) (v interface{}, err error) {
|
||||
for i, v := range arg {
|
||||
switch i {
|
||||
case 7:
|
||||
switch x := v.(type) {
|
||||
case string:
|
||||
default:
|
||||
return nil, invArg(x, "date")
|
||||
}
|
||||
default:
|
||||
switch x := v.(type) {
|
||||
case int64:
|
||||
case idealInt:
|
||||
arg[i] = int64(x)
|
||||
default:
|
||||
return nil, invArg(x, "date")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sloc := arg[7].(string)
|
||||
loc := time.Local
|
||||
switch sloc {
|
||||
case "local":
|
||||
default:
|
||||
loc, err = time.LoadLocation(sloc)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return time.Date(
|
||||
int(arg[0].(int64)),
|
||||
time.Month(arg[1].(int64)),
|
||||
int(arg[2].(int64)),
|
||||
int(arg[3].(int64)),
|
||||
int(arg[4].(int64)),
|
||||
int(arg[5].(int64)),
|
||||
int(arg[6].(int64)),
|
||||
loc,
|
||||
), nil
|
||||
}
|
||||
|
||||
func builtinLen(arg []interface{}, _ map[interface{}]interface{}) (v interface{}, err error) {
|
||||
switch x := arg[0].(type) {
|
||||
case nil:
|
||||
return nil, nil
|
||||
case string:
|
||||
return int64(len(x)), nil
|
||||
default:
|
||||
return nil, invArg(x, "len")
|
||||
}
|
||||
}
|
||||
|
||||
func builtinDay(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
|
||||
switch x := arg[0].(type) {
|
||||
case nil:
|
||||
return nil, nil
|
||||
case time.Time:
|
||||
return int64(x.Day()), nil
|
||||
default:
|
||||
return nil, invArg(x, "day")
|
||||
}
|
||||
}
|
||||
|
||||
func builtinFormatTime(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
|
||||
switch x := arg[0].(type) {
|
||||
case nil:
|
||||
return nil, nil
|
||||
case time.Time:
|
||||
switch y := arg[1].(type) {
|
||||
case nil:
|
||||
return nil, nil
|
||||
case string:
|
||||
return x.Format(y), nil
|
||||
default:
|
||||
return nil, invArg(y, "formatTime")
|
||||
}
|
||||
default:
|
||||
return nil, invArg(x, "formatTime")
|
||||
}
|
||||
}
|
||||
|
||||
func builtinFormatFloat(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
|
||||
var val float64
|
||||
var fmt byte = 'g'
|
||||
|
||||
prec := -1
|
||||
bitSize := 64
|
||||
|
||||
switch x := arg[0].(type) {
|
||||
case nil:
|
||||
return nil, nil
|
||||
case float32:
|
||||
val = float64(x)
|
||||
bitSize = 32
|
||||
case float64:
|
||||
val = x
|
||||
default:
|
||||
return nil, invArg(x, "formatFloat")
|
||||
}
|
||||
|
||||
switch len(arg) {
|
||||
case 4:
|
||||
arg3 := coerce1(arg[3], int64(0))
|
||||
switch y := arg3.(type) {
|
||||
case nil:
|
||||
return nil, nil
|
||||
case int64:
|
||||
bitSize = int(y)
|
||||
default:
|
||||
return nil, invArg(y, "formatFloat")
|
||||
}
|
||||
fallthrough
|
||||
case 3:
|
||||
arg2 := coerce1(arg[2], int64(0))
|
||||
switch y := arg2.(type) {
|
||||
case nil:
|
||||
return nil, nil
|
||||
case int64:
|
||||
prec = int(y)
|
||||
default:
|
||||
return nil, invArg(y, "formatFloat")
|
||||
}
|
||||
fallthrough
|
||||
case 2:
|
||||
arg1 := coerce1(arg[1], byte(0))
|
||||
switch y := arg1.(type) {
|
||||
case nil:
|
||||
return nil, nil
|
||||
case byte:
|
||||
fmt = y
|
||||
default:
|
||||
return nil, invArg(y, "formatFloat")
|
||||
}
|
||||
}
|
||||
|
||||
return strconv.FormatFloat(val, fmt, prec, bitSize), nil
|
||||
}
|
||||
|
||||
func builtinFormatInt(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
|
||||
var intVal int64
|
||||
var uintVal uint64
|
||||
|
||||
uintType := false
|
||||
base := 10
|
||||
|
||||
switch x := arg[0].(type) {
|
||||
case nil:
|
||||
return nil, nil
|
||||
case int8:
|
||||
intVal = int64(x)
|
||||
case int16:
|
||||
intVal = int64(x)
|
||||
case int32:
|
||||
intVal = int64(x)
|
||||
case int64:
|
||||
intVal = x
|
||||
case uint8:
|
||||
uintType = true
|
||||
uintVal = uint64(x)
|
||||
case uint16:
|
||||
uintType = true
|
||||
uintVal = uint64(x)
|
||||
case uint32:
|
||||
uintType = true
|
||||
uintVal = uint64(x)
|
||||
case uint64:
|
||||
uintType = true
|
||||
uintVal = x
|
||||
default:
|
||||
return nil, invArg(x, "formatInt")
|
||||
}
|
||||
|
||||
switch len(arg) {
|
||||
case 2:
|
||||
arg1 := coerce1(arg[1], int64(0))
|
||||
switch y := arg1.(type) {
|
||||
case nil:
|
||||
return nil, nil
|
||||
case int64:
|
||||
base = int(y)
|
||||
default:
|
||||
return nil, invArg(y, "formatInt")
|
||||
}
|
||||
}
|
||||
|
||||
if uintType {
|
||||
return strconv.FormatUint(uintVal, base), nil
|
||||
}
|
||||
|
||||
return strconv.FormatInt(intVal, base), nil
|
||||
}
|
||||
|
||||
func builtinHasPrefix(arg []interface{}, _ map[interface{}]interface{}) (v interface{}, err error) {
|
||||
switch s := arg[0].(type) {
|
||||
case nil:
|
||||
return nil, nil
|
||||
case string:
|
||||
switch prefix := arg[1].(type) {
|
||||
case nil:
|
||||
return nil, nil
|
||||
case string:
|
||||
return strings.HasPrefix(s, prefix), nil
|
||||
default:
|
||||
return nil, invArg(prefix, "string")
|
||||
}
|
||||
default:
|
||||
return nil, invArg(s, "string")
|
||||
}
|
||||
}
|
||||
|
||||
func builtinHasSuffix(arg []interface{}, _ map[interface{}]interface{}) (v interface{}, err error) {
|
||||
switch s := arg[0].(type) {
|
||||
case nil:
|
||||
return nil, nil
|
||||
case string:
|
||||
switch suffix := arg[1].(type) {
|
||||
case nil:
|
||||
return nil, nil
|
||||
case string:
|
||||
return strings.HasSuffix(s, suffix), nil
|
||||
default:
|
||||
return nil, invArg(suffix, "string")
|
||||
}
|
||||
default:
|
||||
return nil, invArg(s, "string")
|
||||
}
|
||||
}
|
||||
|
||||
func builtinHour(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
|
||||
switch x := arg[0].(type) {
|
||||
case nil:
|
||||
return nil, nil
|
||||
case time.Time:
|
||||
return int64(x.Hour()), nil
|
||||
default:
|
||||
return nil, invArg(x, "hour")
|
||||
}
|
||||
}
|
||||
|
||||
func builtinHours(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
|
||||
switch x := arg[0].(type) {
|
||||
case nil:
|
||||
return nil, nil
|
||||
case time.Duration:
|
||||
return x.Hours(), nil
|
||||
default:
|
||||
return nil, invArg(x, "hours")
|
||||
}
|
||||
}
|
||||
|
||||
func builtinID(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
|
||||
switch x := ctx["$id"].(type) {
|
||||
case map[string]interface{}:
|
||||
if len(arg) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
tab := arg[0].(*ident)
|
||||
id, ok := x[tab.s]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("value not available: id(%s)", tab)
|
||||
}
|
||||
|
||||
if _, ok := id.(int64); ok {
|
||||
return id, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("value not available: id(%s)", tab)
|
||||
case int64:
|
||||
return x, nil
|
||||
default:
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
func builtinImag(arg []interface{}, _ map[interface{}]interface{}) (v interface{}, err error) {
|
||||
switch x := arg[0].(type) {
|
||||
case nil:
|
||||
return nil, nil
|
||||
case idealComplex:
|
||||
return imag(x), nil
|
||||
case complex64:
|
||||
return imag(x), nil
|
||||
case complex128:
|
||||
return imag(x), nil
|
||||
default:
|
||||
return nil, invArg(x, "imag")
|
||||
}
|
||||
}
|
||||
|
||||
func builtinMax(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
|
||||
if _, ok := ctx["$agg0"]; ok {
|
||||
return
|
||||
}
|
||||
|
||||
fn := ctx["$fn"]
|
||||
if _, ok := ctx["$agg"]; ok {
|
||||
if v, ok = ctx[fn]; ok {
|
||||
return
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
max := ctx[fn]
|
||||
y := arg[0]
|
||||
if y == nil {
|
||||
return
|
||||
}
|
||||
switch x := max.(type) {
|
||||
case nil:
|
||||
switch y := y.(type) {
|
||||
case float32, float64, string, int8, int16, int32, int64, uint8, uint16, uint32, uint64, time.Time:
|
||||
max = y
|
||||
default:
|
||||
return nil, fmt.Errorf("max: cannot accept %v (value if type %T)", y, y)
|
||||
}
|
||||
case float32:
|
||||
if y := y.(float32); y > x {
|
||||
max = y
|
||||
}
|
||||
case float64:
|
||||
if y := y.(float64); y > x {
|
||||
max = y
|
||||
}
|
||||
case string:
|
||||
if y := y.(string); y > x {
|
||||
max = y
|
||||
}
|
||||
case int8:
|
||||
if y := y.(int8); y > x {
|
||||
max = y
|
||||
}
|
||||
case int16:
|
||||
if y := y.(int16); y > x {
|
||||
max = y
|
||||
}
|
||||
case int32:
|
||||
if y := y.(int32); y > x {
|
||||
max = y
|
||||
}
|
||||
case int64:
|
||||
if y := y.(int64); y > x {
|
||||
max = y
|
||||
}
|
||||
case uint8:
|
||||
if y := y.(uint8); y > x {
|
||||
max = y
|
||||
}
|
||||
case uint16:
|
||||
if y := y.(uint16); y > x {
|
||||
max = y
|
||||
}
|
||||
case uint32:
|
||||
if y := y.(uint32); y > x {
|
||||
max = y
|
||||
}
|
||||
case uint64:
|
||||
if y := y.(uint64); y > x {
|
||||
max = y
|
||||
}
|
||||
case time.Time:
|
||||
if y := y.(time.Time); y.After(x) {
|
||||
max = y
|
||||
}
|
||||
}
|
||||
ctx[fn] = max
|
||||
return
|
||||
}
|
||||
|
||||
func builtinMin(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
|
||||
if _, ok := ctx["$agg0"]; ok {
|
||||
return
|
||||
}
|
||||
|
||||
fn := ctx["$fn"]
|
||||
if _, ok := ctx["$agg"]; ok {
|
||||
if v, ok = ctx[fn]; ok {
|
||||
return
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
min := ctx[fn]
|
||||
y := arg[0]
|
||||
if y == nil {
|
||||
return
|
||||
}
|
||||
switch x := min.(type) {
|
||||
case nil:
|
||||
switch y := y.(type) {
|
||||
case float32, float64, string, int8, int16, int32, int64, uint8, uint16, uint32, uint64, time.Time:
|
||||
min = y
|
||||
default:
|
||||
return nil, fmt.Errorf("min: cannot accept %v (value if type %T)", y, y)
|
||||
}
|
||||
case float32:
|
||||
if y := y.(float32); y < x {
|
||||
min = y
|
||||
}
|
||||
case float64:
|
||||
if y := y.(float64); y < x {
|
||||
min = y
|
||||
}
|
||||
case string:
|
||||
if y := y.(string); y < x {
|
||||
min = y
|
||||
}
|
||||
case int8:
|
||||
if y := y.(int8); y < x {
|
||||
min = y
|
||||
}
|
||||
case int16:
|
||||
if y := y.(int16); y < x {
|
||||
min = y
|
||||
}
|
||||
case int32:
|
||||
if y := y.(int32); y < x {
|
||||
min = y
|
||||
}
|
||||
case int64:
|
||||
if y := y.(int64); y < x {
|
||||
min = y
|
||||
}
|
||||
case uint8:
|
||||
if y := y.(uint8); y < x {
|
||||
min = y
|
||||
}
|
||||
case uint16:
|
||||
if y := y.(uint16); y < x {
|
||||
min = y
|
||||
}
|
||||
case uint32:
|
||||
if y := y.(uint32); y < x {
|
||||
min = y
|
||||
}
|
||||
case uint64:
|
||||
if y := y.(uint64); y < x {
|
||||
min = y
|
||||
}
|
||||
case time.Time:
|
||||
if y := y.(time.Time); y.Before(x) {
|
||||
min = y
|
||||
}
|
||||
}
|
||||
ctx[fn] = min
|
||||
return
|
||||
}
|
||||
|
||||
func builtinMinute(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
|
||||
switch x := arg[0].(type) {
|
||||
case nil:
|
||||
return nil, nil
|
||||
case time.Time:
|
||||
return int64(x.Minute()), nil
|
||||
default:
|
||||
return nil, invArg(x, "minute")
|
||||
}
|
||||
}
|
||||
|
||||
func builtinMinutes(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
|
||||
switch x := arg[0].(type) {
|
||||
case nil:
|
||||
return nil, nil
|
||||
case time.Duration:
|
||||
return x.Minutes(), nil
|
||||
default:
|
||||
return nil, invArg(x, "minutes")
|
||||
}
|
||||
}
|
||||
|
||||
func builtinMonth(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
|
||||
switch x := arg[0].(type) {
|
||||
case nil:
|
||||
return nil, nil
|
||||
case time.Time:
|
||||
return int64(x.Month()), nil
|
||||
default:
|
||||
return nil, invArg(x, "month")
|
||||
}
|
||||
}
|
||||
|
||||
func builtinNanosecond(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
|
||||
switch x := arg[0].(type) {
|
||||
case nil:
|
||||
return nil, nil
|
||||
case time.Time:
|
||||
return int64(x.Nanosecond()), nil
|
||||
default:
|
||||
return nil, invArg(x, "nanosecond")
|
||||
}
|
||||
}
|
||||
|
||||
func builtinNanoseconds(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
|
||||
switch x := arg[0].(type) {
|
||||
case nil:
|
||||
return nil, nil
|
||||
case time.Duration:
|
||||
return x.Nanoseconds(), nil
|
||||
default:
|
||||
return nil, invArg(x, "nanoseconds")
|
||||
}
|
||||
}
|
||||
|
||||
func builtinNow(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
|
||||
return time.Now(), nil
|
||||
}
|
||||
|
||||
func builtinParseTime(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
|
||||
var a [2]string
|
||||
for i, v := range arg {
|
||||
switch x := v.(type) {
|
||||
case nil:
|
||||
return nil, nil
|
||||
case string:
|
||||
a[i] = x
|
||||
default:
|
||||
return nil, invArg(x, "parseTime")
|
||||
}
|
||||
}
|
||||
|
||||
t, err := time.Parse(a[0], a[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ls := t.Location().String()
|
||||
if ls == "UTC" {
|
||||
return t, nil
|
||||
}
|
||||
|
||||
l, err := time.LoadLocation(ls)
|
||||
if err != nil {
|
||||
return t, nil
|
||||
}
|
||||
|
||||
return time.ParseInLocation(a[0], a[1], l)
|
||||
}
|
||||
|
||||
func builtinReal(arg []interface{}, _ map[interface{}]interface{}) (v interface{}, err error) {
|
||||
switch x := arg[0].(type) {
|
||||
case nil:
|
||||
return nil, nil
|
||||
case idealComplex:
|
||||
return real(x), nil
|
||||
case complex64:
|
||||
return real(x), nil
|
||||
case complex128:
|
||||
return real(x), nil
|
||||
default:
|
||||
return nil, invArg(x, "real")
|
||||
}
|
||||
}
|
||||
|
||||
func builtinSecond(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
|
||||
switch x := arg[0].(type) {
|
||||
case nil:
|
||||
return nil, nil
|
||||
case time.Time:
|
||||
return int64(x.Second()), nil
|
||||
default:
|
||||
return nil, invArg(x, "second")
|
||||
}
|
||||
}
|
||||
|
||||
func builtinSeconds(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
|
||||
switch x := arg[0].(type) {
|
||||
case nil:
|
||||
return nil, nil
|
||||
case time.Duration:
|
||||
return x.Seconds(), nil
|
||||
default:
|
||||
return nil, invArg(x, "seconds")
|
||||
}
|
||||
}
|
||||
|
||||
func builtinSince(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
|
||||
switch x := arg[0].(type) {
|
||||
case nil:
|
||||
return nil, nil
|
||||
case time.Time:
|
||||
return time.Since(x), nil
|
||||
default:
|
||||
return nil, invArg(x, "since")
|
||||
}
|
||||
}
|
||||
|
||||
func builtinSum(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
|
||||
if _, ok := ctx["$agg0"]; ok {
|
||||
return
|
||||
}
|
||||
|
||||
fn := ctx["$fn"]
|
||||
if _, ok := ctx["$agg"]; ok {
|
||||
if v, ok = ctx[fn]; ok {
|
||||
return
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
sum := ctx[fn]
|
||||
y := arg[0]
|
||||
if y == nil {
|
||||
return
|
||||
}
|
||||
switch x := sum.(type) {
|
||||
case nil:
|
||||
switch y := y.(type) {
|
||||
case complex64, complex128, float32, float64, int8, int16, int32, int64, uint8, uint16, uint32, uint64:
|
||||
sum = y
|
||||
default:
|
||||
return nil, fmt.Errorf("sum: cannot accept %v (value if type %T)", y, y)
|
||||
}
|
||||
case complex64:
|
||||
sum = x + y.(complex64)
|
||||
case complex128:
|
||||
sum = x + y.(complex128)
|
||||
case float32:
|
||||
sum = x + y.(float32)
|
||||
case float64:
|
||||
sum = x + y.(float64)
|
||||
case int8:
|
||||
sum = x + y.(int8)
|
||||
case int16:
|
||||
sum = x + y.(int16)
|
||||
case int32:
|
||||
sum = x + y.(int32)
|
||||
case int64:
|
||||
sum = x + y.(int64)
|
||||
case uint8:
|
||||
sum = x + y.(uint8)
|
||||
case uint16:
|
||||
sum = x + y.(uint16)
|
||||
case uint32:
|
||||
sum = x + y.(uint32)
|
||||
case uint64:
|
||||
sum = x + y.(uint64)
|
||||
}
|
||||
ctx[fn] = sum
|
||||
return
|
||||
}
|
||||
|
||||
func builtinTimeIn(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
|
||||
switch x := arg[0].(type) {
|
||||
case nil:
|
||||
return nil, nil
|
||||
case time.Time:
|
||||
switch y := arg[1].(type) {
|
||||
case nil:
|
||||
return nil, nil
|
||||
case string:
|
||||
loc := time.Local
|
||||
switch y {
|
||||
case "local":
|
||||
default:
|
||||
loc, err = time.LoadLocation(y)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return x.In(loc), nil
|
||||
default:
|
||||
return nil, invArg(x, "timeIn")
|
||||
}
|
||||
default:
|
||||
return nil, invArg(x, "timeIn")
|
||||
}
|
||||
}
|
||||
|
||||
func builtinWeekday(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
|
||||
switch x := arg[0].(type) {
|
||||
case nil:
|
||||
return nil, nil
|
||||
case time.Time:
|
||||
return int64(x.Weekday()), nil
|
||||
default:
|
||||
return nil, invArg(x, "weekday")
|
||||
}
|
||||
}
|
||||
|
||||
func builtinYear(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
|
||||
switch x := arg[0].(type) {
|
||||
case nil:
|
||||
return nil, nil
|
||||
case time.Time:
|
||||
return int64(x.Year()), nil
|
||||
default:
|
||||
return nil, invArg(x, "year")
|
||||
}
|
||||
}
|
||||
|
||||
func builtinYearday(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
|
||||
switch x := arg[0].(type) {
|
||||
case nil:
|
||||
return nil, nil
|
||||
case time.Time:
|
||||
return int64(x.YearDay()), nil
|
||||
default:
|
||||
return nil, invArg(x, "yearDay")
|
||||
}
|
||||
}
|
290
vendor/github.com/cznic/ql/coerce.go
generated
vendored
Normal file
290
vendor/github.com/cznic/ql/coerce.go
generated
vendored
Normal file
@ -0,0 +1,290 @@
|
||||
// Copyright 2013 The ql Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// CAUTION: This file was generated automatically by
|
||||
//
|
||||
// $ go run helper/helper.go -o coerce.go
|
||||
//
|
||||
// DO NOT EDIT!
|
||||
|
||||
package ql
|
||||
|
||||
import (
|
||||
"math"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
func coerce(a, b interface{}) (x, y interface{}) {
|
||||
if reflect.TypeOf(a) == reflect.TypeOf(b) {
|
||||
return a, b
|
||||
}
|
||||
|
||||
switch a.(type) {
|
||||
case idealComplex, idealFloat, idealInt, idealRune, idealUint:
|
||||
switch b.(type) {
|
||||
case idealComplex, idealFloat, idealInt, idealRune, idealUint:
|
||||
x, y = coerce1(a, b), b
|
||||
if reflect.TypeOf(x) == reflect.TypeOf(y) {
|
||||
return
|
||||
}
|
||||
|
||||
return a, coerce1(b, a)
|
||||
default:
|
||||
return coerce1(a, b), b
|
||||
}
|
||||
default:
|
||||
switch b.(type) {
|
||||
case idealComplex, idealFloat, idealInt, idealRune, idealUint:
|
||||
return a, coerce1(b, a)
|
||||
default:
|
||||
return a, b
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func coerce1(inVal, otherVal interface{}) (coercedInVal interface{}) {
|
||||
coercedInVal = inVal
|
||||
if otherVal == nil {
|
||||
return
|
||||
}
|
||||
|
||||
switch x := inVal.(type) {
|
||||
case nil:
|
||||
return
|
||||
case idealComplex:
|
||||
switch otherVal.(type) {
|
||||
//case idealComplex:
|
||||
//case idealFloat:
|
||||
//case idealInt:
|
||||
//case idealRune:
|
||||
//case idealUint:
|
||||
//case bool:
|
||||
case complex64:
|
||||
return complex64(x)
|
||||
case complex128:
|
||||
return complex128(x)
|
||||
//case float32:
|
||||
//case float64:
|
||||
//case int8:
|
||||
//case int16:
|
||||
//case int32:
|
||||
//case int64:
|
||||
//case string:
|
||||
//case uint8:
|
||||
//case uint16:
|
||||
//case uint32:
|
||||
//case uint64:
|
||||
//case *big.Int:
|
||||
//case *big.Rat:
|
||||
//case time.Time:
|
||||
//case time.Duration:
|
||||
}
|
||||
case idealFloat:
|
||||
switch otherVal.(type) {
|
||||
case idealComplex:
|
||||
return idealComplex(complex(float64(x), 0))
|
||||
case idealFloat:
|
||||
return idealFloat(float64(x))
|
||||
//case idealInt:
|
||||
//case idealRune:
|
||||
//case idealUint:
|
||||
//case bool:
|
||||
case complex64:
|
||||
return complex64(complex(float32(x), 0))
|
||||
case complex128:
|
||||
return complex128(complex(float64(x), 0))
|
||||
case float32:
|
||||
return float32(float64(x))
|
||||
case float64:
|
||||
return float64(float64(x))
|
||||
//case int8:
|
||||
//case int16:
|
||||
//case int32:
|
||||
//case int64:
|
||||
//case string:
|
||||
//case uint8:
|
||||
//case uint16:
|
||||
//case uint32:
|
||||
//case uint64:
|
||||
//case *big.Int:
|
||||
case *big.Rat:
|
||||
return big.NewRat(1, 1).SetFloat64(float64(x))
|
||||
//case time.Time:
|
||||
//case time.Duration:
|
||||
}
|
||||
case idealInt:
|
||||
switch otherVal.(type) {
|
||||
case idealComplex:
|
||||
return idealComplex(complex(float64(x), 0))
|
||||
case idealFloat:
|
||||
return idealFloat(int64(x))
|
||||
case idealInt:
|
||||
return idealInt(int64(x))
|
||||
//case idealRune:
|
||||
case idealUint:
|
||||
if x >= 0 {
|
||||
return idealUint(int64(x))
|
||||
}
|
||||
//case bool:
|
||||
case complex64:
|
||||
return complex64(complex(float32(x), 0))
|
||||
case complex128:
|
||||
return complex128(complex(float64(x), 0))
|
||||
case float32:
|
||||
return float32(int64(x))
|
||||
case float64:
|
||||
return float64(int64(x))
|
||||
case int8:
|
||||
if x >= math.MinInt8 && x <= math.MaxInt8 {
|
||||
return int8(int64(x))
|
||||
}
|
||||
case int16:
|
||||
if x >= math.MinInt16 && x <= math.MaxInt16 {
|
||||
return int16(int64(x))
|
||||
}
|
||||
case int32:
|
||||
if x >= math.MinInt32 && x <= math.MaxInt32 {
|
||||
return int32(int64(x))
|
||||
}
|
||||
case int64:
|
||||
return int64(int64(x))
|
||||
//case string:
|
||||
case uint8:
|
||||
if x >= 0 && x <= math.MaxUint8 {
|
||||
return uint8(int64(x))
|
||||
}
|
||||
case uint16:
|
||||
if x >= 0 && x <= math.MaxUint16 {
|
||||
return uint16(int64(x))
|
||||
}
|
||||
case uint32:
|
||||
if x >= 0 && x <= math.MaxUint32 {
|
||||
return uint32(int64(x))
|
||||
}
|
||||
case uint64:
|
||||
if x >= 0 {
|
||||
return uint64(int64(x))
|
||||
}
|
||||
case *big.Int:
|
||||
return big.NewInt(int64(x))
|
||||
case *big.Rat:
|
||||
return big.NewRat(1, 1).SetInt64(int64(x))
|
||||
//case time.Time:
|
||||
case time.Duration:
|
||||
return time.Duration(int64(x))
|
||||
}
|
||||
case idealRune:
|
||||
switch otherVal.(type) {
|
||||
case idealComplex:
|
||||
return idealComplex(complex(float64(x), 0))
|
||||
case idealFloat:
|
||||
return idealFloat(int64(x))
|
||||
case idealInt:
|
||||
return idealInt(int64(x))
|
||||
case idealRune:
|
||||
return idealRune(int64(x))
|
||||
case idealUint:
|
||||
return idealUint(int64(x))
|
||||
//case bool:
|
||||
case complex64:
|
||||
return complex64(complex(float32(x), 0))
|
||||
case complex128:
|
||||
return complex128(complex(float64(x), 0))
|
||||
case float32:
|
||||
return float32(int64(x))
|
||||
case float64:
|
||||
return float64(int64(x))
|
||||
case int8:
|
||||
return int8(int64(x))
|
||||
case int16:
|
||||
return int16(int64(x))
|
||||
case int32:
|
||||
return int32(int64(x))
|
||||
case int64:
|
||||
return int64(int64(x))
|
||||
//case string:
|
||||
case uint8:
|
||||
return uint8(int64(x))
|
||||
case uint16:
|
||||
return uint16(int64(x))
|
||||
case uint32:
|
||||
return uint32(int64(x))
|
||||
case uint64:
|
||||
return uint64(int64(x))
|
||||
case *big.Int:
|
||||
return big.NewInt(int64(x))
|
||||
case *big.Rat:
|
||||
return big.NewRat(1, 1).SetInt64(int64(x))
|
||||
//case time.Time:
|
||||
case time.Duration:
|
||||
return time.Duration(int64(x))
|
||||
}
|
||||
case idealUint:
|
||||
switch otherVal.(type) {
|
||||
case idealComplex:
|
||||
return idealComplex(complex(float64(x), 0))
|
||||
case idealFloat:
|
||||
return idealFloat(uint64(x))
|
||||
case idealInt:
|
||||
if x <= math.MaxInt64 {
|
||||
return idealInt(int64(x))
|
||||
}
|
||||
//case idealRune:
|
||||
case idealUint:
|
||||
return idealUint(uint64(x))
|
||||
//case bool:
|
||||
case complex64:
|
||||
return complex64(complex(float32(x), 0))
|
||||
case complex128:
|
||||
return complex128(complex(float64(x), 0))
|
||||
case float32:
|
||||
return float32(uint64(x))
|
||||
case float64:
|
||||
return float64(uint64(x))
|
||||
case int8:
|
||||
if x <= math.MaxInt8 {
|
||||
return int8(int64(x))
|
||||
}
|
||||
case int16:
|
||||
if x <= math.MaxInt16 {
|
||||
return int16(int64(x))
|
||||
}
|
||||
case int32:
|
||||
if x <= math.MaxInt32 {
|
||||
return int32(int64(x))
|
||||
}
|
||||
case int64:
|
||||
if x <= math.MaxInt64 {
|
||||
return int64(int64(x))
|
||||
}
|
||||
//case string:
|
||||
case uint8:
|
||||
if x >= 0 && x <= math.MaxUint8 {
|
||||
return uint8(int64(x))
|
||||
}
|
||||
case uint16:
|
||||
if x >= 0 && x <= math.MaxUint16 {
|
||||
return uint16(int64(x))
|
||||
}
|
||||
case uint32:
|
||||
if x >= 0 && x <= math.MaxUint32 {
|
||||
return uint32(int64(x))
|
||||
}
|
||||
case uint64:
|
||||
return uint64(uint64(x))
|
||||
case *big.Int:
|
||||
return big.NewInt(0).SetUint64(uint64(x))
|
||||
case *big.Rat:
|
||||
return big.NewRat(1, 1).SetInt(big.NewInt(0).SetUint64(uint64(x)))
|
||||
//case time.Time:
|
||||
case time.Duration:
|
||||
if x <= math.MaxInt64 {
|
||||
return time.Duration(int64(x))
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
298
vendor/github.com/cznic/ql/design/doc.go
generated
vendored
Normal file
298
vendor/github.com/cznic/ql/design/doc.go
generated
vendored
Normal file
@ -0,0 +1,298 @@
|
||||
// Copyright 2014 The ql Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
|
||||
Package design describes some of the data structures used in QL.
|
||||
|
||||
Handles
|
||||
|
||||
A handle is a 7 byte "pointer" to a block in the DB[0].
|
||||
|
||||
Scalar encoding
|
||||
|
||||
Encoding of so called "scalars" provided by [1]. Unless specified otherwise,
|
||||
all values discussed below are scalars, encoded scalars or encoding of scalar
|
||||
arrays.
|
||||
|
||||
Database root
|
||||
|
||||
DB root is a 1-scalar found at a fixed handle (#1).
|
||||
|
||||
+---+------+--------+-----------------------+
|
||||
| # | Name | Type | Description |
|
||||
+---+------+--------+-----------------------+
|
||||
| 0 | head | handle | First table meta data |
|
||||
+---+------+--------+-----------------------+
|
||||
|
||||
Head is the head of a single linked list of table of meta data. It's zero if
|
||||
there are no tables in the DB.
|
||||
|
||||
Table meta data
|
||||
|
||||
Table meta data are a 6-scalar.
|
||||
|
||||
+---+---------+--------+--------------------------+
|
||||
| # | Name | Type | Description |
|
||||
+---+---------+--------+--------------------------+
|
||||
| 0 | next | handle | Next table meta data. |
|
||||
| 1 | scols | string | Column defintitions |
|
||||
| 2 | hhead | handle | -> head -> first record |
|
||||
| 3 | name | string | Table name |
|
||||
| 4 | indices | string | Index definitions |
|
||||
| 5 | hxroots | handle | Index B+Trees roots list |
|
||||
+---+---------+--------+--------------------------+
|
||||
|
||||
Fields #4 and #5 are optional for backward compatibility with existing
|
||||
databases. OTOH, forward compatibility will not work. Once any indices are
|
||||
created using a newer QL version the older versions of QL, expecting only 4
|
||||
fields of meta data will not be able to use the DB. That's the intended
|
||||
behavior because the older versions of QL cannot update the indexes, which can
|
||||
break queries runned by the newer QL version which expect indices to be always
|
||||
actualized on any table-with-indices mutation.
|
||||
|
||||
The handle of the next table meta data is in the field #0 (next). If there is
|
||||
no next table meta data, the field is zero. Names and types of table columns
|
||||
are stored in field #1 (scols). A single field is described by concatenating a
|
||||
type tag and the column name. The type tags are
|
||||
|
||||
bool 'b'
|
||||
complex64 'c'
|
||||
complex128 'd'
|
||||
float32 'f'
|
||||
float64 'g', alias float
|
||||
int8 'i'
|
||||
int16 'j'
|
||||
int32 'k'
|
||||
int64 'l', alias int
|
||||
string 's'
|
||||
uint8 'u', alias byte
|
||||
uint16 'v'
|
||||
uint32 'w'
|
||||
uint64 'x', alias uint
|
||||
bigInt 'I'
|
||||
bigRat 'R'
|
||||
blob 'B'
|
||||
duration 'D'
|
||||
time 'T'
|
||||
|
||||
The scols value is the above described encoded fields joined using "|". For
|
||||
example
|
||||
|
||||
CREATE TABLE t (Foo bool, Bar string, Baz float);
|
||||
|
||||
This statement adds a table meta data with scols
|
||||
|
||||
"bFool|sBar|gBaz"
|
||||
|
||||
Columns can be dropped from a table
|
||||
|
||||
ALTER TABLE t DROP COLUMN Bar;
|
||||
|
||||
This "erases" the field info in scols, so the value becomes
|
||||
|
||||
"bFool||gBaz"
|
||||
|
||||
Colums can be added to a table
|
||||
|
||||
ALTER TABLE t ADD Count uint;
|
||||
|
||||
New fields are always added to the end of scols
|
||||
|
||||
"bFool||gBaz|xCount"
|
||||
|
||||
Index of a field in strings.Split(scols, "|") is the index of the field in a
|
||||
table record. The above discussed rules for column dropping and column adding
|
||||
allow for schema evolution without a need to reshape any existing table data.
|
||||
Dropped columns are left where they are and new records insert nil in their
|
||||
place. The encoded nil is one byte. Added columns, when not present in
|
||||
preexisting records are returned as nil values. If the overhead of dropped
|
||||
columns becomes an issue and there's time/space and memory enough to move the
|
||||
records of a table around:
|
||||
|
||||
BEGIN TRANSACTION;
|
||||
CREATE TABLE new (column definitions);
|
||||
INSERT INTO new SELECT * FROM old;
|
||||
DROP TABLE old;
|
||||
CREATE TABLE old (column definitions);
|
||||
INSERT INTO old SELECT * FROM new;
|
||||
DROP TABLE new;
|
||||
END TRANSACTION;
|
||||
|
||||
This is not very time/space effective and for Big Data it can cause an OOM
|
||||
because transactions are limited by memory resources available to the process.
|
||||
Perhaps a method and/or QL statement to do this in-place should be added
|
||||
(MAYBE consider adopting MySQL's OPTIMIZE TABLE syntax).
|
||||
|
||||
Field #2 (hhead) is a handle to a head of table records, i.e. not a handle to
|
||||
the first record in the table. It is thus always non zero even for a table
|
||||
having no records. The reason for this "double pointer" schema is to enable
|
||||
adding (linking) a new record by updating a single value of the (hhead pointing
|
||||
to) head.
|
||||
|
||||
tableMeta.hhead -> head -> firstTableRecord
|
||||
|
||||
The table name is stored in field #3 (name).
|
||||
|
||||
Indices
|
||||
|
||||
Consider an index named N, indexing column named C. The encoding of this
|
||||
particular index is a string "<tag>N". <tag> is a string "n" for non unique
|
||||
indices and "u" for unique indices. There is this index information for the
|
||||
index possibly indexing the record id() and for all other columns of scols.
|
||||
Where the column is not indexed, the index info is an empty string. Infos for
|
||||
all indexes are joined with "|". For example
|
||||
|
||||
BEGIN TRANSACTION;
|
||||
CREATE TABLE t (Foo int, Bar bool, Baz string);
|
||||
CREATE INDEX X ON t (Baz);
|
||||
CREATE UNIQUE INDEX Y ON t (Foo);
|
||||
COMMIT;
|
||||
|
||||
The values of fields #1 and #4 for the above are
|
||||
|
||||
scols: "lFoo|bBar|sBaz"
|
||||
indices: "|uY||nX"
|
||||
|
||||
Aligning properly the "|" split parts
|
||||
|
||||
id col #0 col#1 col#2
|
||||
+----------+----+--------+--------+--------+
|
||||
| scols: | | "lFoo" | "bBar" | "sBaz" |
|
||||
+----------+----+--------+--------+--------+
|
||||
| indices: | "" | "uY" | "" | "nX" |
|
||||
+----------+----+--------+--------+--------+
|
||||
|
||||
shows that the record id() is not indexed for this table while the columns Foo
|
||||
and Baz are.
|
||||
|
||||
Note that there cannot be two differently named indexes for the same column and
|
||||
it's intended. The indices are B+Trees[2]. The list of handles to their roots
|
||||
is pointed to by hxroots with zeros for non indexed columns. For the previous
|
||||
example
|
||||
|
||||
tableMeta.hxroots -> {0, y, 0, x}
|
||||
|
||||
where x is the root of the B+Tree for the X index and y is the root of the
|
||||
B+Tree for the Y index. If there would be an index for id(), its B+Tree root
|
||||
will be present where the first zero is. Similarly to hhead, hxroots is never
|
||||
zero, even when there are no indices for a table.
|
||||
|
||||
Table record
|
||||
|
||||
A table record is an N-scalar.
|
||||
|
||||
+-----+------------+--------+-------------------------------+
|
||||
| # | Name | Type | Description |
|
||||
+-----+------------+--------+-------------------------------+
|
||||
| 0 | next | handle | Next record or zero. |
|
||||
| 1 | id | int64 | Automatically assigned unique |
|
||||
| | | | value obtainable by id(). |
|
||||
| 2 | field #0 | scalar | First field of the record. |
|
||||
| 3 | field #1 | scalar | Second field of the record. |
|
||||
...
|
||||
| N-1 | field #N-2 | scalar | Last field of the record. |
|
||||
+-----+------------+--------+-------------------------------+
|
||||
|
||||
The linked "ordering" of table records has no semantics and it doesn't have to
|
||||
correlate to the order of how the records were added to the table. In fact, an
|
||||
efficient way of the linking leads to "ordering" which is actually reversed wrt
|
||||
the insertion order.
|
||||
|
||||
Non unique index
|
||||
|
||||
The composite key of the B+Tree is {indexed values, record handle}. The B+Tree
|
||||
value is not used.
|
||||
|
||||
B+Tree key B+Tree value
|
||||
+----------------+---------------+ +--------------+
|
||||
| Indexed Values | Record Handle | -> | not used |
|
||||
+----------------+---------------+ +--------------+
|
||||
|
||||
Unique index
|
||||
|
||||
If the indexed values are all NULL then the composite B+Tree key is {nil,
|
||||
record handle} and the B+Tree value is not used.
|
||||
|
||||
B+Tree key B+Tree value
|
||||
+------+-----------------+ +--------------+
|
||||
| NULL | Record Handle | -> | not used |
|
||||
+------+-----------------+ +--------------+
|
||||
|
||||
If the indexed values are not all NULL then key of the B+Tree key are the indexed
|
||||
values and the B+Tree value is the record handle.
|
||||
|
||||
B+Tree key B+Tree value
|
||||
+----------------+ +---------------+
|
||||
| Indexed Values | -> | Record Handle |
|
||||
+----------------+ +---------------+
|
||||
|
||||
Non scalar types
|
||||
|
||||
Scalar types of [1] are bool, complex*, float*, int*, uint*, string and []byte
|
||||
types. All other types are "blob-like".
|
||||
|
||||
QL type Go type
|
||||
-----------------------------
|
||||
blob []byte
|
||||
bigint big.Int
|
||||
bigrat big.Rat
|
||||
time time.Time
|
||||
duration time.Duration
|
||||
|
||||
Memory back-end stores the Go type directly. File back-end must resort to
|
||||
encode all of the above as (tagged) []byte due to the lack of more types
|
||||
supported natively by lldb. NULL values of blob-like types are encoded as nil
|
||||
(gbNull in lldb/gb.go), exactly the same as the already existing QL types are.
|
||||
|
||||
Blob encoding
|
||||
|
||||
The values of the blob-like types are first encoded into a []byte slice:
|
||||
|
||||
+-----------------------+-------------------+
|
||||
| blob | raw |
|
||||
| bigint, bigrat, time | gob encoded |
|
||||
| duration | gob encoded int64 |
|
||||
+-----------------------+-------------------+
|
||||
|
||||
The gob encoding is "differential" wrt an initial encoding of all of the
|
||||
blob-like type. IOW, the initial type descriptors which gob encoding must write
|
||||
out are stripped off and "resupplied" on decoding transparently. See also
|
||||
blob.go. If the length of the resulting slice is <= shortBlob, the first and
|
||||
only chunk is the scalar encoding of
|
||||
|
||||
|
||||
[]interface{}{typeTag, slice}. // initial (and last) chunk
|
||||
|
||||
The length of slice can be zero (for blob("")). If the resulting slice is long
|
||||
(> shortBlob), the first chunk comes from encoding
|
||||
|
||||
[]interface{}{typeTag, nextHandle, firstPart}. // initial, but not final chunk
|
||||
|
||||
In this case len(firstPart) <= shortBlob. Second and other chunks: If the chunk
|
||||
is the last one, src is
|
||||
|
||||
[]interface{lastPart}. // overflow chunk (last)
|
||||
|
||||
In this case len(lastPart) <= 64kB. If the chunk is not the last one, src is
|
||||
|
||||
[]interface{}{nextHandle, part}. // overflow chunk (not last)
|
||||
|
||||
In this case len(part) == 64kB.
|
||||
|
||||
Links
|
||||
|
||||
Referenced from above:
|
||||
|
||||
[0]: http://godoc.org/github.com/cznic/exp/lldb#hdr-Block_handles
|
||||
[1]: http://godoc.org/github.com/cznic/exp/lldb#EncodeScalars
|
||||
[2]: http://godoc.org/github.com/cznic/exp/lldb#BTree
|
||||
|
||||
Rationale
|
||||
|
||||
While these notes might be useful to anyone looking at QL sources, the
|
||||
specifically intended reader is my future self.
|
||||
|
||||
*/
|
||||
package design
|
2619
vendor/github.com/cznic/ql/doc.go
generated
vendored
Normal file
2619
vendor/github.com/cznic/ql/doc.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
523
vendor/github.com/cznic/ql/driver.go
generated
vendored
Normal file
523
vendor/github.com/cznic/ql/driver.go
generated
vendored
Normal file
@ -0,0 +1,523 @@
|
||||
// Copyright 2014 The ql Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// database/sql/driver
|
||||
|
||||
package ql
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
_ driver.Conn = (*driverConn)(nil)
|
||||
_ driver.Driver = (*sqlDriver)(nil)
|
||||
_ driver.Execer = (*driverConn)(nil)
|
||||
_ driver.Queryer = (*driverConn)(nil)
|
||||
_ driver.Result = (*driverResult)(nil)
|
||||
_ driver.Rows = (*driverRows)(nil)
|
||||
_ driver.Stmt = (*driverStmt)(nil)
|
||||
_ driver.Tx = (*driverConn)(nil)
|
||||
|
||||
txBegin = MustCompile("BEGIN TRANSACTION;")
|
||||
txCommit = MustCompile("COMMIT;")
|
||||
txRollback = MustCompile("ROLLBACK;")
|
||||
|
||||
errNoResult = errors.New("query statement does not produce a result set (no top level SELECT)")
|
||||
)
|
||||
|
||||
type errList []error
|
||||
|
||||
func (e *errList) append(err error) {
|
||||
if err != nil {
|
||||
*e = append(*e, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (e errList) error() error {
|
||||
if len(e) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return e
|
||||
}
|
||||
|
||||
func (e errList) Error() string {
|
||||
a := make([]string, len(e))
|
||||
for i, v := range e {
|
||||
a[i] = v.Error()
|
||||
}
|
||||
return strings.Join(a, "\n")
|
||||
}
|
||||
|
||||
func params(args []driver.Value) []interface{} {
|
||||
r := make([]interface{}, len(args))
|
||||
for i, v := range args {
|
||||
r[i] = interface{}(v)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
var (
|
||||
fileDriver = &sqlDriver{dbs: map[string]*driverDB{}}
|
||||
fileDriverOnce sync.Once
|
||||
memDriver = &sqlDriver{isMem: true, dbs: map[string]*driverDB{}}
|
||||
memDriverOnce sync.Once
|
||||
)
|
||||
|
||||
// RegisterDriver registers a QL database/sql/driver[0] named "ql". The name
|
||||
// parameter of
|
||||
//
|
||||
// sql.Open("ql", name)
|
||||
//
|
||||
// is interpreted as a path name to a named DB file which will be created if
|
||||
// not present. The underlying QL database data are persisted on db.Close().
|
||||
// RegisterDriver can be safely called multiple times, it'll register the
|
||||
// driver only once.
|
||||
//
|
||||
// The name argument can be optionally prefixed by "file://". In that case the
|
||||
// prefix is stripped before interpreting it as a file name.
|
||||
//
|
||||
// The name argument can be optionally prefixed by "memory://". In that case
|
||||
// the prefix is stripped before interpreting it as a name of a memory-only,
|
||||
// volatile DB.
|
||||
//
|
||||
// [0]: http://golang.org/pkg/database/sql/driver/
|
||||
func RegisterDriver() {
|
||||
fileDriverOnce.Do(func() { sql.Register("ql", fileDriver) })
|
||||
}
|
||||
|
||||
// RegisterMemDriver registers a QL memory database/sql/driver[0] named
|
||||
// "ql-mem". The name parameter of
|
||||
//
|
||||
// sql.Open("ql-mem", name)
|
||||
//
|
||||
// is interpreted as an unique memory DB name which will be created if not
|
||||
// present. The underlying QL memory database data are not persisted on
|
||||
// db.Close(). RegisterMemDriver can be safely called multiple times, it'll
|
||||
// register the driver only once.
|
||||
//
|
||||
// [0]: http://golang.org/pkg/database/sql/driver/
|
||||
func RegisterMemDriver() {
|
||||
memDriverOnce.Do(func() { sql.Register("ql-mem", memDriver) })
|
||||
}
|
||||
|
||||
type driverDB struct {
|
||||
db *DB
|
||||
name string
|
||||
refcount int
|
||||
}
|
||||
|
||||
func newDriverDB(db *DB, name string) *driverDB {
|
||||
return &driverDB{db: db, name: name, refcount: 1}
|
||||
}
|
||||
|
||||
// sqlDriver implements the interface required by database/sql/driver.
|
||||
type sqlDriver struct {
|
||||
dbs map[string]*driverDB
|
||||
isMem bool
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func (d *sqlDriver) lock() func() {
|
||||
d.mu.Lock()
|
||||
return d.mu.Unlock
|
||||
}
|
||||
|
||||
// Open returns a new connection to the database. The name is a string in a
|
||||
// driver-specific format.
|
||||
//
|
||||
// Open may return a cached connection (one previously closed), but doing so is
|
||||
// unnecessary; the sql package maintains a pool of idle connections for
|
||||
// efficient re-use.
|
||||
//
|
||||
// The returned connection is only used by one goroutine at a time.
|
||||
func (d *sqlDriver) Open(name string) (driver.Conn, error) {
|
||||
if d != fileDriver && d != memDriver {
|
||||
return nil, fmt.Errorf("open: unexpected/unsupported instance of driver.Driver: %p", d)
|
||||
}
|
||||
|
||||
switch {
|
||||
case d == fileDriver && strings.HasPrefix(name, "file://"):
|
||||
name = name[len("file://"):]
|
||||
case d == fileDriver && strings.HasPrefix(name, "memory://"):
|
||||
d = memDriver
|
||||
name = name[len("memory://"):]
|
||||
}
|
||||
name = filepath.Clean(name)
|
||||
if name == "" || name == "." || name == string(os.PathSeparator) {
|
||||
return nil, fmt.Errorf("invalid DB name %q", name)
|
||||
}
|
||||
|
||||
defer d.lock()()
|
||||
db := d.dbs[name]
|
||||
if db == nil {
|
||||
var err error
|
||||
var db0 *DB
|
||||
switch d.isMem {
|
||||
case true:
|
||||
db0, err = OpenMem()
|
||||
default:
|
||||
db0, err = OpenFile(name, &Options{CanCreate: true})
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
db = newDriverDB(db0, name)
|
||||
d.dbs[name] = db
|
||||
return newDriverConn(d, db), nil
|
||||
}
|
||||
|
||||
db.refcount++
|
||||
return newDriverConn(d, db), nil
|
||||
}
|
||||
|
||||
// driverConn is a connection to a database. It is not used concurrently by
|
||||
// multiple goroutines.
|
||||
//
|
||||
// Conn is assumed to be stateful.
|
||||
type driverConn struct {
|
||||
ctx *TCtx
|
||||
db *driverDB
|
||||
driver *sqlDriver
|
||||
stop map[*driverStmt]struct{}
|
||||
tnl int
|
||||
}
|
||||
|
||||
func newDriverConn(d *sqlDriver, ddb *driverDB) driver.Conn {
|
||||
r := &driverConn{
|
||||
db: ddb,
|
||||
driver: d,
|
||||
stop: map[*driverStmt]struct{}{},
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// Prepare returns a prepared statement, bound to this connection.
|
||||
func (c *driverConn) Prepare(query string) (driver.Stmt, error) {
|
||||
list, err := Compile(query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := &driverStmt{conn: c, stmt: list}
|
||||
c.stop[s] = struct{}{}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Close invalidates and potentially stops any current prepared statements and
|
||||
// transactions, marking this connection as no longer in use.
|
||||
//
|
||||
// Because the sql package maintains a free pool of connections and only calls
|
||||
// Close when there's a surplus of idle connections, it shouldn't be necessary
|
||||
// for drivers to do their own connection caching.
|
||||
func (c *driverConn) Close() error {
|
||||
var err errList
|
||||
for s := range c.stop {
|
||||
err.append(s.Close())
|
||||
}
|
||||
defer c.driver.lock()()
|
||||
dbs, name := c.driver.dbs, c.db.name
|
||||
v := dbs[name]
|
||||
v.refcount--
|
||||
if v.refcount == 0 {
|
||||
err.append(c.db.db.Close())
|
||||
delete(dbs, name)
|
||||
}
|
||||
return err.error()
|
||||
}
|
||||
|
||||
// Begin starts and returns a new transaction.
|
||||
func (c *driverConn) Begin() (driver.Tx, error) {
|
||||
if c.ctx == nil {
|
||||
c.ctx = NewRWCtx()
|
||||
}
|
||||
|
||||
if _, _, err := c.db.db.Execute(c.ctx, txBegin); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c.tnl++
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *driverConn) Commit() error {
|
||||
if c.tnl == 0 || c.ctx == nil {
|
||||
return errCommitNotInTransaction
|
||||
}
|
||||
|
||||
if _, _, err := c.db.db.Execute(c.ctx, txCommit); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.tnl--
|
||||
if c.tnl == 0 {
|
||||
c.ctx = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *driverConn) Rollback() error {
|
||||
if c.tnl == 0 || c.ctx == nil {
|
||||
return errRollbackNotInTransaction
|
||||
}
|
||||
|
||||
if _, _, err := c.db.db.Execute(c.ctx, txRollback); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.tnl--
|
||||
if c.tnl == 0 {
|
||||
c.ctx = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Execer is an optional interface that may be implemented by a Conn.
|
||||
//
|
||||
// If a Conn does not implement Execer, the sql package's DB.Exec will first
|
||||
// prepare a query, execute the statement, and then close the statement.
|
||||
//
|
||||
// Exec may return driver.ErrSkip.
|
||||
func (c *driverConn) Exec(query string, args []driver.Value) (driver.Result, error) {
|
||||
list, err := Compile(query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return driverExec(c.db, c.ctx, list, args)
|
||||
}
|
||||
|
||||
func driverExec(db *driverDB, ctx *TCtx, list List, args []driver.Value) (driver.Result, error) {
|
||||
if _, _, err := db.db.Execute(ctx, list, params(args)...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(list.l) == 1 {
|
||||
switch list.l[0].(type) {
|
||||
case *createTableStmt, *dropTableStmt, *alterTableAddStmt,
|
||||
*alterTableDropColumnStmt, *truncateTableStmt:
|
||||
return driver.ResultNoRows, nil
|
||||
}
|
||||
}
|
||||
|
||||
r := &driverResult{}
|
||||
if ctx != nil {
|
||||
r.lastInsertID, r.rowsAffected = ctx.LastInsertID, ctx.RowsAffected
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// Queryer is an optional interface that may be implemented by a Conn.
|
||||
//
|
||||
// If a Conn does not implement Queryer, the sql package's DB.Query will first
|
||||
// prepare a query, execute the statement, and then close the statement.
|
||||
//
|
||||
// Query may return driver.ErrSkip.
|
||||
func (c *driverConn) Query(query string, args []driver.Value) (driver.Rows, error) {
|
||||
list, err := Compile(query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return driverQuery(c.db, c.ctx, list, args)
|
||||
}
|
||||
|
||||
func driverQuery(db *driverDB, ctx *TCtx, list List, args []driver.Value) (driver.Rows, error) {
|
||||
rss, _, err := db.db.Execute(ctx, list, params(args)...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch n := len(rss); n {
|
||||
case 0:
|
||||
return nil, errNoResult
|
||||
case 1:
|
||||
return newdriverRows(rss[len(rss)-1]), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("query produced %d result sets, expected only one", n)
|
||||
}
|
||||
}
|
||||
|
||||
// driverResult is the result of a query execution.
|
||||
type driverResult struct {
|
||||
lastInsertID int64
|
||||
rowsAffected int64
|
||||
}
|
||||
|
||||
// LastInsertId returns the database's auto-generated ID after, for example, an
|
||||
// INSERT into a table with primary key.
|
||||
func (r *driverResult) LastInsertId() (int64, error) { // -golint
|
||||
return r.lastInsertID, nil
|
||||
}
|
||||
|
||||
// RowsAffected returns the number of rows affected by the query.
|
||||
func (r *driverResult) RowsAffected() (int64, error) {
|
||||
return r.rowsAffected, nil
|
||||
}
|
||||
|
||||
// driverRows is an iterator over an executed query's results.
|
||||
type driverRows struct {
|
||||
rs Recordset
|
||||
done chan int
|
||||
rows chan interface{}
|
||||
}
|
||||
|
||||
func newdriverRows(rs Recordset) *driverRows {
|
||||
r := &driverRows{
|
||||
rs: rs,
|
||||
done: make(chan int),
|
||||
rows: make(chan interface{}, 500),
|
||||
}
|
||||
go func() {
|
||||
err := io.EOF
|
||||
if e := r.rs.Do(false, func(data []interface{}) (bool, error) {
|
||||
select {
|
||||
case r.rows <- data:
|
||||
return true, nil
|
||||
case <-r.done:
|
||||
return false, nil
|
||||
}
|
||||
}); e != nil {
|
||||
err = e
|
||||
}
|
||||
|
||||
select {
|
||||
case r.rows <- err:
|
||||
case <-r.done:
|
||||
}
|
||||
}()
|
||||
return r
|
||||
}
|
||||
|
||||
// Columns returns the names of the columns. The number of columns of the
|
||||
// result is inferred from the length of the slice. If a particular column
|
||||
// name isn't known, an empty string should be returned for that entry.
|
||||
func (r *driverRows) Columns() []string {
|
||||
f, _ := r.rs.Fields()
|
||||
return f
|
||||
}
|
||||
|
||||
// Close closes the rows iterator.
|
||||
func (r *driverRows) Close() error {
|
||||
close(r.done)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Next is called to populate the next row of data into the provided slice. The
|
||||
// provided slice will be the same size as the Columns() are wide.
|
||||
//
|
||||
// The dest slice may be populated only with a driver Value type, but excluding
|
||||
// string. All string values must be converted to []byte.
|
||||
//
|
||||
// Next should return io.EOF when there are no more rows.
|
||||
func (r *driverRows) Next(dest []driver.Value) error {
|
||||
select {
|
||||
case rx := <-r.rows:
|
||||
switch x := rx.(type) {
|
||||
case error:
|
||||
return x
|
||||
case []interface{}:
|
||||
if g, e := len(x), len(dest); g != e {
|
||||
return fmt.Errorf("field count mismatch: got %d, need %d", g, e)
|
||||
}
|
||||
|
||||
for i, xi := range x {
|
||||
switch v := xi.(type) {
|
||||
case nil, int64, float64, bool, []byte, time.Time:
|
||||
dest[i] = v
|
||||
case complex64, complex128, *big.Int, *big.Rat:
|
||||
var buf bytes.Buffer
|
||||
fmt.Fprintf(&buf, "%v", v)
|
||||
dest[i] = buf.Bytes()
|
||||
case int8:
|
||||
dest[i] = int64(v)
|
||||
case int16:
|
||||
dest[i] = int64(v)
|
||||
case int32:
|
||||
dest[i] = int64(v)
|
||||
case int:
|
||||
dest[i] = int64(v)
|
||||
case uint8:
|
||||
dest[i] = int64(v)
|
||||
case uint16:
|
||||
dest[i] = int64(v)
|
||||
case uint32:
|
||||
dest[i] = int64(v)
|
||||
case uint64:
|
||||
dest[i] = int64(v)
|
||||
case uint:
|
||||
dest[i] = int64(v)
|
||||
case time.Duration:
|
||||
dest[i] = int64(v)
|
||||
case string:
|
||||
dest[i] = []byte(v)
|
||||
default:
|
||||
return fmt.Errorf("internal error 004")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("internal error 005")
|
||||
}
|
||||
case <-r.done:
|
||||
return io.EOF
|
||||
}
|
||||
}
|
||||
|
||||
// driverStmt is a prepared statement. It is bound to a driverConn and not used
|
||||
// by multiple goroutines concurrently.
|
||||
type driverStmt struct {
|
||||
conn *driverConn
|
||||
stmt List
|
||||
}
|
||||
|
||||
// Close closes the statement.
|
||||
//
|
||||
// As of Go 1.1, a Stmt will not be closed if it's in use by any queries.
|
||||
func (s *driverStmt) Close() error {
|
||||
delete(s.conn.stop, s)
|
||||
return nil
|
||||
}
|
||||
|
||||
// NumInput returns the number of placeholder parameters.
|
||||
//
|
||||
// If NumInput returns >= 0, the sql package will sanity check argument counts
|
||||
// from callers and return errors to the caller before the statement's Exec or
|
||||
// Query methods are called.
|
||||
//
|
||||
// NumInput may also return -1, if the driver doesn't know its number of
|
||||
// placeholders. In that case, the sql package will not sanity check Exec or
|
||||
// Query argument counts.
|
||||
func (s *driverStmt) NumInput() int {
|
||||
if x := s.stmt; len(x.l) == 1 {
|
||||
return x.params
|
||||
}
|
||||
|
||||
return -1
|
||||
}
|
||||
|
||||
// Exec executes a query that doesn't return rows, such as an INSERT or UPDATE.
|
||||
func (s *driverStmt) Exec(args []driver.Value) (driver.Result, error) {
|
||||
c := s.conn
|
||||
return driverExec(c.db, c.ctx, s.stmt, args)
|
||||
}
|
||||
|
||||
// Exec executes a query that may return rows, such as a SELECT.
|
||||
func (s *driverStmt) Query(args []driver.Value) (driver.Rows, error) {
|
||||
c := s.conn
|
||||
return driverQuery(c.db, c.ctx, s.stmt, args)
|
||||
}
|
61
vendor/github.com/cznic/ql/driver/driver.go
generated
vendored
Normal file
61
vendor/github.com/cznic/ql/driver/driver.go
generated
vendored
Normal file
@ -0,0 +1,61 @@
|
||||
// Copyright 2014 The ql Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package driver registers a QL sql/driver named "ql" and a memory driver named "ql-mem".
|
||||
|
||||
See also [0], [1] and [3].
|
||||
|
||||
Usage
|
||||
|
||||
A skeleton program using ql/driver.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
|
||||
_ "github.com/cznic/ql/driver"
|
||||
)
|
||||
|
||||
func main() {
|
||||
...
|
||||
// Disk file DB
|
||||
db, err := sql.Open("ql", "ql.db") // [2]
|
||||
// alternatively
|
||||
db, err := sql.Open("ql", "file://ql.db")
|
||||
|
||||
// and/or
|
||||
|
||||
// RAM DB
|
||||
mdb, err := sql.Open("ql-mem", "mem.db")
|
||||
// alternatively
|
||||
mdb, err := sql.Open("ql", "memory://mem.db")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Use db/mdb here
|
||||
...
|
||||
}
|
||||
|
||||
This package exports nothing.
|
||||
|
||||
Links
|
||||
|
||||
Referenced from above:
|
||||
|
||||
[0]: http://godoc.org/github.com/cznic/ql
|
||||
[1]: http://golang.org/pkg/database/sql/
|
||||
[2]: http://golang.org/pkg/database/sql/#Open
|
||||
[3]: http://golang.org/pkg/database/sql/driver
|
||||
*/
|
||||
package driver
|
||||
|
||||
import "github.com/cznic/ql"
|
||||
|
||||
func init() {
|
||||
ql.RegisterDriver()
|
||||
ql.RegisterMemDriver()
|
||||
}
|
18
vendor/github.com/cznic/ql/errors.go
generated
vendored
Normal file
18
vendor/github.com/cznic/ql/errors.go
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
// Copyright 2014 The ql Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ql
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
var (
|
||||
errBeginTransNoCtx = errors.New("BEGIN TRANSACTION: Must use R/W context, have nil")
|
||||
errCommitNotInTransaction = errors.New("COMMIT: Not in transaction")
|
||||
errDivByZero = errors.New("division by zero")
|
||||
errIncompatibleDBFormat = errors.New("incompatible DB format")
|
||||
errNoDataForHandle = errors.New("read: no data for handle")
|
||||
errRollbackNotInTransaction = errors.New("ROLLBACK: Not in transaction")
|
||||
)
|
2805
vendor/github.com/cznic/ql/etc.go
generated
vendored
Normal file
2805
vendor/github.com/cznic/ql/etc.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
4025
vendor/github.com/cznic/ql/expr.go
generated
vendored
Normal file
4025
vendor/github.com/cznic/ql/expr.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1323
vendor/github.com/cznic/ql/file.go
generated
vendored
Normal file
1323
vendor/github.com/cznic/ql/file.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
338
vendor/github.com/cznic/ql/helper/helper.go
generated
vendored
Normal file
338
vendor/github.com/cznic/ql/helper/helper.go
generated
vendored
Normal file
@ -0,0 +1,338 @@
|
||||
// +build ignore
|
||||
|
||||
// Copyright 2014 The ql Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
type t int
|
||||
|
||||
const (
|
||||
qNil t = iota
|
||||
idealComplex
|
||||
idealFloat
|
||||
idealInt
|
||||
idealRune
|
||||
idealUint
|
||||
qBool
|
||||
qComplex64
|
||||
qComplex128
|
||||
qFloat32
|
||||
qFloat64
|
||||
qInt8
|
||||
qInt16
|
||||
qInt32
|
||||
qInt64
|
||||
qString
|
||||
qUint8
|
||||
qUint16
|
||||
qUint32
|
||||
qUint64
|
||||
qBigInt
|
||||
qBigRat
|
||||
qTime
|
||||
qDuration
|
||||
|
||||
qEnd
|
||||
)
|
||||
|
||||
func (n t) String() string {
|
||||
switch n {
|
||||
case qNil:
|
||||
return "nil"
|
||||
case idealComplex:
|
||||
return "idealComplex"
|
||||
case idealFloat:
|
||||
return "idealFloat"
|
||||
case idealInt:
|
||||
return "idealInt"
|
||||
case idealRune:
|
||||
return "idealRune"
|
||||
case idealUint:
|
||||
return "idealUint"
|
||||
case qBool:
|
||||
return "bool"
|
||||
case qComplex64:
|
||||
return "complex64"
|
||||
case qComplex128:
|
||||
return "complex128"
|
||||
case qFloat32:
|
||||
return "float32"
|
||||
case qFloat64:
|
||||
return "float64"
|
||||
case qInt8:
|
||||
return "int8"
|
||||
case qInt16:
|
||||
return "int16"
|
||||
case qInt32:
|
||||
return "int32"
|
||||
case qInt64:
|
||||
return "int64"
|
||||
case qString:
|
||||
return "string"
|
||||
case qUint8:
|
||||
return "uint8"
|
||||
case qUint16:
|
||||
return "uint16"
|
||||
case qUint32:
|
||||
return "uint32"
|
||||
case qUint64:
|
||||
return "uint64"
|
||||
case qBigInt:
|
||||
return "*big.Int"
|
||||
case qBigRat:
|
||||
return "*big.Rat"
|
||||
case qTime:
|
||||
return "time.Time"
|
||||
case qDuration:
|
||||
return "time.Duration"
|
||||
default:
|
||||
panic("internal error 046")
|
||||
}
|
||||
}
|
||||
|
||||
func coerceIdealComplex(typ t) string {
|
||||
switch typ {
|
||||
case qComplex64, qComplex128:
|
||||
return fmt.Sprintf("return %s(x)\n", typ)
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
func coerceIdealFloat(typ t) string {
|
||||
switch typ {
|
||||
case idealComplex:
|
||||
return fmt.Sprintf("return %s(complex(float64(x), 0))\n", typ)
|
||||
case qComplex64:
|
||||
return fmt.Sprintf("return %s(complex(float32(x), 0))\n", typ)
|
||||
case qComplex128:
|
||||
return fmt.Sprintf("return %s(complex(float64(x), 0))\n", typ)
|
||||
case idealFloat, qFloat32, qFloat64:
|
||||
return fmt.Sprintf("return %s(float64(x))\n", typ)
|
||||
case qBigRat:
|
||||
return fmt.Sprintf("return big.NewRat(1, 1).SetFloat64(float64(x))\n")
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func coerceIdealInt(typ t) string {
|
||||
switch typ {
|
||||
case idealComplex:
|
||||
return fmt.Sprintf("return %s(complex(float64(x), 0))\n", typ)
|
||||
case qComplex64:
|
||||
return fmt.Sprintf("return %s(complex(float32(x), 0))\n", typ)
|
||||
case qComplex128:
|
||||
return fmt.Sprintf("return %s(complex(float64(x), 0))\n", typ)
|
||||
case idealFloat, idealInt, qFloat32, qFloat64, qInt64:
|
||||
return fmt.Sprintf("return %s(int64(x))\n", typ)
|
||||
case idealUint:
|
||||
return fmt.Sprintf("if x >= 0 { return %s(int64(x)) }\n", typ)
|
||||
case qInt8:
|
||||
return fmt.Sprintf("if x >= math.MinInt8 && x<= math.MaxInt8 { return %s(int64(x)) }\n", typ)
|
||||
case qInt16:
|
||||
return fmt.Sprintf("if x >= math.MinInt16 && x<= math.MaxInt16 { return %s(int64(x)) }\n", typ)
|
||||
case qInt32:
|
||||
return fmt.Sprintf("if x >= math.MinInt32 && x<= math.MaxInt32 { return %s(int64(x)) }\n", typ)
|
||||
case qUint8:
|
||||
return fmt.Sprintf("if x >= 0 && x<= math.MaxUint8 { return %s(int64(x)) }\n", typ)
|
||||
case qUint16:
|
||||
return fmt.Sprintf("if x >= 0 && x<= math.MaxUint16 { return %s(int64(x)) }\n", typ)
|
||||
case qUint32:
|
||||
return fmt.Sprintf("if x >= 0 && x<= math.MaxUint32 { return %s(int64(x)) }\n", typ)
|
||||
case qUint64:
|
||||
return fmt.Sprintf("if x >= 0 { return %s(int64(x)) }\n", typ)
|
||||
case qBigInt:
|
||||
return fmt.Sprintf("return big.NewInt(int64(x))\n")
|
||||
case qBigRat:
|
||||
return fmt.Sprintf("return big.NewRat(1, 1).SetInt64(int64(x))\n")
|
||||
case qDuration:
|
||||
return fmt.Sprintf("return time.Duration(int64(x))\n")
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func coerceIdealRune(typ t) string {
|
||||
switch typ {
|
||||
case idealComplex:
|
||||
return fmt.Sprintf("return %s(complex(float64(x), 0))\n", typ)
|
||||
case qComplex64:
|
||||
return fmt.Sprintf("return %s(complex(float32(x), 0))\n", typ)
|
||||
case qComplex128:
|
||||
return fmt.Sprintf("return %s(complex(float64(x), 0))\n", typ)
|
||||
case idealFloat, idealInt, idealRune, idealUint, qFloat32, qFloat64, qInt8, qInt16, qInt32, qInt64, qUint8, qUint16, qUint32, qUint64:
|
||||
return fmt.Sprintf("return %s(int64(x))\n", typ)
|
||||
case qBigInt:
|
||||
return fmt.Sprintf("return big.NewInt(int64(x))\n")
|
||||
case qBigRat:
|
||||
return fmt.Sprintf("return big.NewRat(1, 1).SetInt64(int64(x))\n")
|
||||
case qDuration:
|
||||
return fmt.Sprintf("return time.Duration(int64(x))\n")
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func coerceIdealUint(typ t) string {
|
||||
switch typ {
|
||||
case idealComplex:
|
||||
return fmt.Sprintf("return %s(complex(float64(x), 0))\n", typ)
|
||||
case qComplex64:
|
||||
return fmt.Sprintf("return %s(complex(float32(x), 0))\n", typ)
|
||||
case qComplex128:
|
||||
return fmt.Sprintf("return %s(complex(float64(x), 0))\n", typ)
|
||||
case idealFloat, idealUint, qFloat32, qFloat64, qUint64:
|
||||
return fmt.Sprintf("return %s(uint64(x))\n", typ)
|
||||
case idealInt:
|
||||
return fmt.Sprintf("if x <= math.MaxInt64 { return %s(int64(x)) }\n", typ)
|
||||
case qInt8:
|
||||
return fmt.Sprintf("if x <= math.MaxInt8 { return %s(int64(x)) }\n", typ)
|
||||
case qInt16:
|
||||
return fmt.Sprintf("if x<= math.MaxInt16 { return %s(int64(x)) }\n", typ)
|
||||
case qInt32:
|
||||
return fmt.Sprintf("if x<= math.MaxInt32 { return %s(int64(x)) }\n", typ)
|
||||
case qInt64:
|
||||
return fmt.Sprintf("if x<= math.MaxInt64 { return %s(int64(x)) }\n", typ)
|
||||
case qUint8:
|
||||
return fmt.Sprintf("if x >= 0 && x<= math.MaxUint8 { return %s(int64(x)) }\n", typ)
|
||||
case qUint16:
|
||||
return fmt.Sprintf("if x >= 0 && x<= math.MaxUint16 { return %s(int64(x)) }\n", typ)
|
||||
case qUint32:
|
||||
return fmt.Sprintf("if x >= 0 && x<= math.MaxUint32 { return %s(int64(x)) }\n", typ)
|
||||
case qBigInt:
|
||||
return fmt.Sprintf("return big.NewInt(0).SetUint64(uint64(x))\n")
|
||||
case qBigRat:
|
||||
return fmt.Sprintf("return big.NewRat(1, 1).SetInt(big.NewInt(0).SetUint64(uint64(x)))\n")
|
||||
case qDuration:
|
||||
return fmt.Sprintf("if x <= math.MaxInt64 { return time.Duration(int64(x)) }\n")
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func genCoerce1(w io.Writer, in t, f func(out t) string) {
|
||||
fmt.Fprintf(w, "\tcase %s:\n", in)
|
||||
fmt.Fprintf(w, "\t\tswitch otherVal.(type) {\n")
|
||||
|
||||
for i := idealComplex; i < qEnd; i++ {
|
||||
s := f(i)
|
||||
switch s {
|
||||
case "":
|
||||
fmt.Fprintf(w, "\t\t//case %s:\n", i)
|
||||
default:
|
||||
fmt.Fprintf(w, "\t\tcase %s:\n", i)
|
||||
fmt.Fprintf(w, "\t\t\t%s", s)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, "\t\t}\n") // switch
|
||||
}
|
||||
|
||||
func genCoerce(w io.Writer) {
|
||||
fmt.Fprintf(w,
|
||||
`
|
||||
func coerce1(inVal, otherVal interface{}) (coercedInVal interface{}) {
|
||||
coercedInVal = inVal
|
||||
if otherVal == nil {
|
||||
return
|
||||
}
|
||||
|
||||
switch x := inVal.(type) {
|
||||
case nil:
|
||||
return
|
||||
`)
|
||||
genCoerce1(w, idealComplex, coerceIdealComplex)
|
||||
genCoerce1(w, idealFloat, coerceIdealFloat)
|
||||
genCoerce1(w, idealInt, coerceIdealInt)
|
||||
genCoerce1(w, idealRune, coerceIdealRune)
|
||||
genCoerce1(w, idealUint, coerceIdealUint)
|
||||
fmt.Fprintf(w, "\t}\n") // switch
|
||||
|
||||
fmt.Fprintf(w, "\treturn\n}\n") // func
|
||||
}
|
||||
|
||||
func main() {
|
||||
ofn := flag.String("o", "", "")
|
||||
flag.Parse()
|
||||
_, err := os.Stat(*ofn)
|
||||
if err == nil {
|
||||
log.Fatalf("%s exists", *ofn)
|
||||
}
|
||||
|
||||
w := bufio.NewWriter(os.Stdout)
|
||||
if s := *ofn; s != "" {
|
||||
f, err := os.Create(s)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
defer f.Close()
|
||||
w = bufio.NewWriter(f)
|
||||
}
|
||||
defer w.Flush()
|
||||
|
||||
fmt.Fprintf(w, `// Copyright 2013 The ql Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// CAUTION: This file was generated automatically by
|
||||
//
|
||||
// $ go run helper/helper.go -o coerce.go
|
||||
//
|
||||
// DO NOT EDIT!
|
||||
|
||||
package ql
|
||||
|
||||
import (
|
||||
"math"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
func coerce(a, b interface{}) (x, y interface{}) {
|
||||
if reflect.TypeOf(a) == reflect.TypeOf(b) {
|
||||
return a, b
|
||||
}
|
||||
|
||||
switch a.(type) {
|
||||
case idealComplex, idealFloat, idealInt, idealRune, idealUint:
|
||||
switch b.(type) {
|
||||
case idealComplex, idealFloat, idealInt, idealRune, idealUint:
|
||||
x, y = coerce1(a, b), b
|
||||
if reflect.TypeOf(x) == reflect.TypeOf(y) {
|
||||
return
|
||||
}
|
||||
|
||||
return a, coerce1(b, a)
|
||||
default:
|
||||
return coerce1(a, b), b
|
||||
}
|
||||
default:
|
||||
switch b.(type) {
|
||||
case idealComplex, idealFloat, idealInt, idealRune, idealUint:
|
||||
return a, coerce1(b, a)
|
||||
default:
|
||||
return a, b
|
||||
}
|
||||
}
|
||||
}
|
||||
`)
|
||||
genCoerce(w)
|
||||
}
|
302
vendor/github.com/cznic/ql/httpfs.go
generated
vendored
Normal file
302
vendor/github.com/cznic/ql/httpfs.go
generated
vendored
Normal file
@ -0,0 +1,302 @@
|
||||
// Copyright (c) 2014 ql Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cznic/mathutil"
|
||||
)
|
||||
|
||||
var (
|
||||
_ http.FileSystem = (*HTTPFS)(nil)
|
||||
_ http.File = (*HTTPFile)(nil)
|
||||
_ os.FileInfo = (*HTTPFile)(nil)
|
||||
_ os.FileInfo = (*dirEntry)(nil)
|
||||
)
|
||||
|
||||
type dirEntry string
|
||||
|
||||
func (d dirEntry) Name() string { return string(d) }
|
||||
func (d dirEntry) Size() int64 { return -1 }
|
||||
func (d dirEntry) Mode() os.FileMode { return os.ModeDir }
|
||||
func (d dirEntry) ModTime() time.Time { return time.Time{} }
|
||||
func (d dirEntry) IsDir() bool { return true }
|
||||
func (d dirEntry) Sys() interface{} { return interface{}(nil) }
|
||||
|
||||
// A HTTPFile is returned by the HTTPFS's Open method and can be served by the
|
||||
// http.FileServer implementation.
|
||||
type HTTPFile struct {
|
||||
closed bool
|
||||
content []byte
|
||||
dirEntries []os.FileInfo
|
||||
isFile bool
|
||||
name string
|
||||
off int
|
||||
sz int
|
||||
}
|
||||
|
||||
// Close implements http.File.
|
||||
func (f *HTTPFile) Close() error {
|
||||
if f.closed {
|
||||
return os.ErrInvalid
|
||||
}
|
||||
|
||||
f.closed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsDir implements os.FileInfo
|
||||
func (f *HTTPFile) IsDir() bool { return !f.isFile }
|
||||
|
||||
// Mode implements os.FileInfo
|
||||
func (f *HTTPFile) Mode() os.FileMode {
|
||||
switch f.isFile {
|
||||
case false:
|
||||
return os.FileMode(0444)
|
||||
default:
|
||||
return os.ModeDir
|
||||
}
|
||||
}
|
||||
|
||||
// ModTime implements os.FileInfo
|
||||
func (f *HTTPFile) ModTime() time.Time {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
// Name implements os.FileInfo
|
||||
func (f *HTTPFile) Name() string { return path.Base(f.name) }
|
||||
|
||||
// Size implements os.FileInfo
|
||||
func (f *HTTPFile) Size() int64 {
|
||||
switch f.isFile {
|
||||
case false:
|
||||
return -1
|
||||
default:
|
||||
return int64(len(f.content))
|
||||
}
|
||||
}
|
||||
|
||||
// Stat implements http.File.
|
||||
func (f *HTTPFile) Stat() (os.FileInfo, error) { return f, nil }
|
||||
|
||||
// Sys implements os.FileInfo
|
||||
func (f *HTTPFile) Sys() interface{} { return interface{}(nil) }
|
||||
|
||||
// Readdir implements http.File.
|
||||
func (f *HTTPFile) Readdir(count int) ([]os.FileInfo, error) {
|
||||
if f.isFile {
|
||||
return nil, fmt.Errorf("not a directory: %s", f.name)
|
||||
}
|
||||
|
||||
if count <= 0 {
|
||||
r := f.dirEntries
|
||||
f.dirEntries = f.dirEntries[:0]
|
||||
return r, nil
|
||||
}
|
||||
|
||||
rq := mathutil.Min(count, len(f.dirEntries))
|
||||
r := f.dirEntries[:rq]
|
||||
f.dirEntries = f.dirEntries[rq:]
|
||||
if len(r) != 0 {
|
||||
return r, nil
|
||||
}
|
||||
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
// Read implements http.File.
|
||||
func (f *HTTPFile) Read(b []byte) (int, error) {
|
||||
if f.closed {
|
||||
return 0, os.ErrInvalid
|
||||
}
|
||||
|
||||
n := copy(b, f.content[f.off:])
|
||||
f.off += n
|
||||
if n != 0 {
|
||||
return n, nil
|
||||
}
|
||||
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
// Seek implements http.File.
|
||||
func (f *HTTPFile) Seek(offset int64, whence int) (int64, error) {
|
||||
if f.closed {
|
||||
return 0, os.ErrInvalid
|
||||
}
|
||||
|
||||
if offset < 0 {
|
||||
return int64(f.off), fmt.Errorf("cannot seek before start of file")
|
||||
}
|
||||
|
||||
switch whence {
|
||||
case 0:
|
||||
noff := int64(f.off) + offset
|
||||
if noff > mathutil.MaxInt {
|
||||
return int64(f.off), fmt.Errorf("seek target overflows int: %d", noff)
|
||||
}
|
||||
|
||||
f.off = mathutil.Min(int(offset), len(f.content))
|
||||
if f.off == int(offset) {
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
return int64(f.off), io.EOF
|
||||
case 1:
|
||||
noff := int64(f.off) + offset
|
||||
if noff > mathutil.MaxInt {
|
||||
return int64(f.off), fmt.Errorf("seek target overflows int: %d", noff)
|
||||
}
|
||||
|
||||
off := mathutil.Min(f.off+int(offset), len(f.content))
|
||||
if off == f.off+int(offset) {
|
||||
f.off = off
|
||||
return int64(off), nil
|
||||
}
|
||||
|
||||
f.off = off
|
||||
return int64(off), io.EOF
|
||||
case 2:
|
||||
noff := int64(f.off) - offset
|
||||
if noff < 0 {
|
||||
return int64(f.off), fmt.Errorf("cannot seek before start of file")
|
||||
}
|
||||
|
||||
f.off = len(f.content) - int(offset)
|
||||
return int64(f.off), nil
|
||||
default:
|
||||
return int64(f.off), fmt.Errorf("seek: invalid whence %d", whence)
|
||||
}
|
||||
}
|
||||
|
||||
// HTTPFS implements a http.FileSystem backed by data in a DB.
|
||||
type HTTPFS struct {
|
||||
db *DB
|
||||
dir, get List
|
||||
}
|
||||
|
||||
// NewHTTPFS returns a http.FileSystem backed by a result record set of query.
|
||||
// The record set provides two mandatory fields: path and content (the field
|
||||
// names are case sensitive). Type of name must be string and type of content
|
||||
// must be blob (ie. []byte). Field 'path' value is the "file" pathname, which
|
||||
// must be rooted; and field 'content' value is its "data".
|
||||
func (db *DB) NewHTTPFS(query string) (*HTTPFS, error) {
|
||||
if _, err := Compile(query); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dir, err := Compile(fmt.Sprintf("SELECT path FROM (%s) WHERE hasPrefix(path, $1)", query))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
get, err := Compile(fmt.Sprintf("SELECT content FROM (%s) WHERE path == $1", query))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &HTTPFS{db: db, dir: dir, get: get}, nil
|
||||
}
|
||||
|
||||
// Open implements http.FileSystem. The name parameter represents a file path.
|
||||
// The elements in a file path are separated by slash ('/', U+002F) characters,
|
||||
// regardless of host operating system convention.
|
||||
func (f *HTTPFS) Open(name string) (http.File, error) {
|
||||
if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 ||
|
||||
strings.Contains(name, "\x00") {
|
||||
return nil, fmt.Errorf("invalid character in file path: %q", name)
|
||||
}
|
||||
|
||||
name = path.Clean("/" + name)
|
||||
rs, _, err := f.db.Execute(nil, f.get, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
n := 0
|
||||
var fdata []byte
|
||||
if err = rs[0].Do(false, func(data []interface{}) (more bool, err error) {
|
||||
switch n {
|
||||
case 0:
|
||||
var ok bool
|
||||
fdata, ok = data[0].([]byte)
|
||||
if !ok {
|
||||
return false, fmt.Errorf("open: expected blob, got %T", data[0])
|
||||
}
|
||||
n++
|
||||
return true, nil
|
||||
default:
|
||||
return false, fmt.Errorf("open: more than one result was returned for %s", name)
|
||||
}
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if n == 1 { // file found
|
||||
return &HTTPFile{name: name, isFile: true, content: fdata}, nil
|
||||
}
|
||||
|
||||
dirName := name
|
||||
if dirName[len(dirName)-1] != filepath.Separator {
|
||||
dirName += string(filepath.Separator)
|
||||
}
|
||||
// Open("/a/b"): {/a/b/c.x,/a/b/d.x,/a/e.x,/a/b/f/g.x} -> {c.x,d.x,f}
|
||||
rs, _, err = f.db.Execute(nil, f.dir, dirName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
n = 0
|
||||
r := &HTTPFile{name: dirName}
|
||||
m := map[string]bool{}
|
||||
x := len(dirName)
|
||||
if err = rs[0].Do(false, func(data []interface{}) (more bool, err error) {
|
||||
n++
|
||||
switch name := data[0].(type) {
|
||||
case string:
|
||||
if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 ||
|
||||
strings.Contains(name, "\x00") {
|
||||
return false, fmt.Errorf("invalid character in file path: %q", name)
|
||||
}
|
||||
|
||||
name = path.Clean("/" + name)
|
||||
rest := name[x:]
|
||||
parts := strings.Split(rest, "/")
|
||||
if len(parts) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
nm := parts[0]
|
||||
switch len(parts) {
|
||||
case 1: // file
|
||||
r.dirEntries = append(r.dirEntries, &HTTPFile{isFile: true, name: nm})
|
||||
default: // directory
|
||||
if !m[nm] {
|
||||
r.dirEntries = append(r.dirEntries, dirEntry(nm))
|
||||
}
|
||||
m[nm] = true
|
||||
}
|
||||
return true, nil
|
||||
default:
|
||||
return false, fmt.Errorf("expected string path, got %T(%v)", name, name)
|
||||
}
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if n != 0 {
|
||||
return r, nil
|
||||
}
|
||||
|
||||
return nil, os.ErrNotExist
|
||||
}
|
625
vendor/github.com/cznic/ql/introspection.go
generated
vendored
Normal file
625
vendor/github.com/cznic/ql/introspection.go
generated
vendored
Normal file
@ -0,0 +1,625 @@
|
||||
// Copyright 2014 The ql Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ql
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
schemaCache = map[reflect.Type]*StructInfo{}
|
||||
schemaMu sync.RWMutex
|
||||
)
|
||||
|
||||
// StructInfo describes a struct type. An instance of StructInfo obtained from
|
||||
// StructSchema is shared and must not be mutated. That includes the values
|
||||
// pointed to by the elements of Fields and Indices.
|
||||
type StructInfo struct {
|
||||
Fields []*StructField // Fields describe the considered fields of a struct type.
|
||||
HasID bool // Whether the struct has a considered field named ID of type int64.
|
||||
Indices []*StructIndex // Indices describe indices defined by the index or uindex ql tags.
|
||||
IsPtr bool // Whether the StructInfo was derived from a pointer to a struct.
|
||||
}
|
||||
|
||||
// StructIndex describes an index defined by the ql tag index or uindex.
|
||||
type StructIndex struct {
|
||||
ColumnName string // Name of the column the index is on.
|
||||
Name string // Name of the index.
|
||||
Unique bool // Whether the index is unique.
|
||||
}
|
||||
|
||||
// StructField describes a considered field of a struct type.
|
||||
type StructField struct {
|
||||
Index int // Index is the index of the field for reflect.Value.Field.
|
||||
IsID bool // Whether the field corresponds to record id().
|
||||
IsPtr bool // Whether the field is a pointer type.
|
||||
MarshalType reflect.Type // The reflect.Type a field must be converted to when marshaling or nil when it is assignable directly. (Field->value)
|
||||
Name string // Field name or value of the name tag (like in `ql:"name foo"`).
|
||||
ReflectType reflect.Type // The reflect.Type of the field.
|
||||
Tags map[string]string // QL tags of this field. (`ql:"a, b c, d"` -> {"a": "", "b": "c", "d": ""})
|
||||
Type Type // QL type of the field.
|
||||
UnmarshalType reflect.Type // The reflect.Type a value must be converted to when unmarshaling or nil when it is assignable directly. (Field<-value)
|
||||
ZeroPtr reflect.Value // The reflect.Zero value of the field if it's a pointer type.
|
||||
}
|
||||
|
||||
func (s *StructField) check(v interface{}) error {
|
||||
t := reflect.TypeOf(v)
|
||||
if !s.ReflectType.AssignableTo(t) {
|
||||
if !s.ReflectType.ConvertibleTo(t) {
|
||||
return fmt.Errorf("type %s (%v) cannot be converted to %T", s.ReflectType.Name(), s.ReflectType.Kind(), t.Name())
|
||||
}
|
||||
|
||||
s.MarshalType = t
|
||||
}
|
||||
|
||||
if !t.AssignableTo(s.ReflectType) {
|
||||
if !t.ConvertibleTo(s.ReflectType) {
|
||||
return fmt.Errorf("type %s (%v) cannot be converted to %T", t.Name(), t.Kind(), s.ReflectType.Name())
|
||||
}
|
||||
|
||||
s.UnmarshalType = s.ReflectType
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseTag(s string) map[string]string {
|
||||
m := map[string]string{}
|
||||
for _, v := range strings.Split(s, ",") {
|
||||
v = strings.TrimSpace(v)
|
||||
switch n := strings.IndexRune(v, ' '); {
|
||||
case n < 0:
|
||||
m[v] = ""
|
||||
default:
|
||||
m[v[:n]] = v[n+1:]
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// StructSchema returns StructInfo for v which must be a struct instance or a
|
||||
// pointer to a struct. The info is computed only once for every type.
|
||||
// Subsequent calls to StructSchema for the same type return a cached
|
||||
// StructInfo.
|
||||
//
|
||||
// Note: The returned StructSchema is shared and must be not mutated, including
|
||||
// any other data structures it may point to.
|
||||
func StructSchema(v interface{}) (*StructInfo, error) {
|
||||
if v == nil {
|
||||
return nil, fmt.Errorf("cannot derive schema for %T(%v)", v, v)
|
||||
}
|
||||
|
||||
typ := reflect.TypeOf(v)
|
||||
schemaMu.RLock()
|
||||
if r, ok := schemaCache[typ]; ok {
|
||||
schemaMu.RUnlock()
|
||||
return r, nil
|
||||
}
|
||||
|
||||
schemaMu.RUnlock()
|
||||
var schemaPtr bool
|
||||
t := typ
|
||||
if t.Kind() == reflect.Ptr {
|
||||
t = t.Elem()
|
||||
schemaPtr = true
|
||||
}
|
||||
if k := t.Kind(); k != reflect.Struct {
|
||||
return nil, fmt.Errorf("cannot derive schema for type %T (%v)", v, k)
|
||||
}
|
||||
|
||||
r := &StructInfo{IsPtr: schemaPtr}
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
fn := f.Name
|
||||
if !ast.IsExported(fn) {
|
||||
continue
|
||||
}
|
||||
|
||||
tags := parseTag(f.Tag.Get("ql"))
|
||||
if _, ok := tags["-"]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if s := tags["name"]; s != "" {
|
||||
fn = s
|
||||
}
|
||||
|
||||
if fn == "ID" && f.Type.Kind() == reflect.Int64 {
|
||||
r.HasID = true
|
||||
}
|
||||
var ix, unique bool
|
||||
var xn string
|
||||
xfn := fn
|
||||
if s := tags["index"]; s != "" {
|
||||
if _, ok := tags["uindex"]; ok {
|
||||
return nil, fmt.Errorf("both index and uindex in QL struct tag")
|
||||
}
|
||||
|
||||
ix, xn = true, s
|
||||
} else if s := tags["uindex"]; s != "" {
|
||||
if _, ok := tags["index"]; ok {
|
||||
return nil, fmt.Errorf("both index and uindex in QL struct tag")
|
||||
}
|
||||
|
||||
ix, unique, xn = true, true, s
|
||||
}
|
||||
if ix {
|
||||
if fn == "ID" && r.HasID {
|
||||
xfn = "id()"
|
||||
}
|
||||
r.Indices = append(r.Indices, &StructIndex{Name: xn, ColumnName: xfn, Unique: unique})
|
||||
}
|
||||
|
||||
sf := &StructField{Index: i, Name: fn, Tags: tags, Type: Type(-1), ReflectType: f.Type}
|
||||
fk := sf.ReflectType.Kind()
|
||||
if fk == reflect.Ptr {
|
||||
sf.IsPtr = true
|
||||
sf.ZeroPtr = reflect.Zero(sf.ReflectType)
|
||||
sf.ReflectType = sf.ReflectType.Elem()
|
||||
fk = sf.ReflectType.Kind()
|
||||
}
|
||||
|
||||
switch fk {
|
||||
case reflect.Bool:
|
||||
sf.Type = Bool
|
||||
if err := sf.check(false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case reflect.Int, reflect.Uint:
|
||||
return nil, fmt.Errorf("only integers of fixed size can be used to derive a schema: %v", fk)
|
||||
case reflect.Int8:
|
||||
sf.Type = Int8
|
||||
if err := sf.check(int8(0)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case reflect.Int16:
|
||||
if err := sf.check(int16(0)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sf.Type = Int16
|
||||
case reflect.Int32:
|
||||
if err := sf.check(int32(0)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sf.Type = Int32
|
||||
case reflect.Int64:
|
||||
if sf.ReflectType.Name() == "Duration" && sf.ReflectType.PkgPath() == "time" {
|
||||
sf.Type = Duration
|
||||
break
|
||||
}
|
||||
|
||||
sf.Type = Int64
|
||||
if err := sf.check(int64(0)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case reflect.Uint8:
|
||||
sf.Type = Uint8
|
||||
if err := sf.check(uint8(0)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case reflect.Uint16:
|
||||
sf.Type = Uint16
|
||||
if err := sf.check(uint16(0)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case reflect.Uint32:
|
||||
sf.Type = Uint32
|
||||
if err := sf.check(uint32(0)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case reflect.Uint64:
|
||||
sf.Type = Uint64
|
||||
if err := sf.check(uint64(0)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case reflect.Float32:
|
||||
sf.Type = Float32
|
||||
if err := sf.check(float32(0)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case reflect.Float64:
|
||||
sf.Type = Float64
|
||||
if err := sf.check(float64(0)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case reflect.Complex64:
|
||||
sf.Type = Complex64
|
||||
if err := sf.check(complex64(0)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case reflect.Complex128:
|
||||
sf.Type = Complex128
|
||||
if err := sf.check(complex128(0)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case reflect.Slice:
|
||||
sf.Type = Blob
|
||||
if err := sf.check([]byte(nil)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case reflect.Struct:
|
||||
switch sf.ReflectType.PkgPath() {
|
||||
case "math/big":
|
||||
switch sf.ReflectType.Name() {
|
||||
case "Int":
|
||||
sf.Type = BigInt
|
||||
case "Rat":
|
||||
sf.Type = BigRat
|
||||
}
|
||||
case "time":
|
||||
switch sf.ReflectType.Name() {
|
||||
case "Time":
|
||||
sf.Type = Time
|
||||
}
|
||||
}
|
||||
case reflect.String:
|
||||
sf.Type = String
|
||||
if err := sf.check(""); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if sf.Type < 0 {
|
||||
return nil, fmt.Errorf("cannot derive schema for type %s (%v)", sf.ReflectType.Name(), fk)
|
||||
}
|
||||
|
||||
sf.IsID = fn == "ID" && r.HasID
|
||||
r.Fields = append(r.Fields, sf)
|
||||
}
|
||||
|
||||
schemaMu.Lock()
|
||||
schemaCache[typ] = r
|
||||
if t != typ {
|
||||
r2 := *r
|
||||
r2.IsPtr = false
|
||||
schemaCache[t] = &r2
|
||||
}
|
||||
schemaMu.Unlock()
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// MustStructSchema is like StructSchema but panics on error. It simplifies
|
||||
// safe initialization of global variables holding StructInfo.
|
||||
//
|
||||
// MustStructSchema is safe for concurrent use by multiple goroutines.
|
||||
func MustStructSchema(v interface{}) *StructInfo {
|
||||
s, err := StructSchema(v)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// SchemaOptions amend the result of Schema.
|
||||
type SchemaOptions struct {
|
||||
// Don't wrap the CREATE statement(s) in a transaction.
|
||||
NoTransaction bool
|
||||
|
||||
// Don't insert the IF NOT EXISTS clause in the CREATE statement(s).
|
||||
NoIfNotExists bool
|
||||
|
||||
// Do not strip the "pkg." part from type name "pkg.Type", produce
|
||||
// "pkg_Type" table name instead. Applies only when no name is passed
|
||||
// to Schema().
|
||||
KeepPrefix bool
|
||||
}
|
||||
|
||||
var zeroSchemaOptions SchemaOptions
|
||||
|
||||
// Schema returns a CREATE TABLE/INDEX statement(s) for a table derived from a
|
||||
// struct or an error, if any. The table is named using the name parameter. If
|
||||
// name is an empty string then the type name of the struct is used while non
|
||||
// conforming characters are replaced by underscores. Value v can be also a
|
||||
// pointer to a struct.
|
||||
//
|
||||
// Every considered struct field type must be one of the QL types or a type
|
||||
// convertible to string, bool, int*, uint*, float* or complex* type or pointer
|
||||
// to such type. Integers with a width dependent on the architecture can not be
|
||||
// used. Only exported fields are considered. If an exported field QL tag
|
||||
// contains "-" (`ql:"-"`) then such field is not considered. A field with name
|
||||
// ID, having type int64, corresponds to id() - and is thus not a part of the
|
||||
// CREATE statement. A field QL tag containing "index name" or "uindex name"
|
||||
// triggers additionally creating an index or unique index on the respective
|
||||
// field. Fields can be renamed using a QL tag "name newName". Fields are
|
||||
// considered in the order of appearance. A QL tag is a struct tag part
|
||||
// prefixed by "ql:". Tags can be combined, for example:
|
||||
//
|
||||
// type T struct {
|
||||
// Foo string `ql:"index xFoo, name Bar"`
|
||||
// }
|
||||
//
|
||||
// If opts.NoTransaction == true then the statement(s) are not wrapped in a
|
||||
// transaction. If opt.NoIfNotExists == true then the CREATE statement(s) omits
|
||||
// the IF NOT EXISTS clause. Passing nil opts is equal to passing
|
||||
// &SchemaOptions{}
|
||||
//
|
||||
// Schema is safe for concurrent use by multiple goroutines.
|
||||
func Schema(v interface{}, name string, opt *SchemaOptions) (List, error) {
|
||||
if opt == nil {
|
||||
opt = &zeroSchemaOptions
|
||||
}
|
||||
s, err := StructSchema(v)
|
||||
if err != nil {
|
||||
return List{}, err
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
if !opt.NoTransaction {
|
||||
buf.WriteString("BEGIN TRANSACTION; ")
|
||||
}
|
||||
buf.WriteString("CREATE TABLE ")
|
||||
if !opt.NoIfNotExists {
|
||||
buf.WriteString("IF NOT EXISTS ")
|
||||
}
|
||||
if name == "" {
|
||||
name = fmt.Sprintf("%T", v)
|
||||
if !opt.KeepPrefix {
|
||||
a := strings.Split(name, ".")
|
||||
if l := len(a); l > 1 {
|
||||
name = a[l-1]
|
||||
}
|
||||
}
|
||||
nm := []rune{}
|
||||
for _, v := range name {
|
||||
switch {
|
||||
case v >= '0' && v <= '9' || v == '_' || v >= 'a' && v <= 'z' || v >= 'A' && v <= 'Z':
|
||||
// ok
|
||||
default:
|
||||
v = '_'
|
||||
}
|
||||
nm = append(nm, v)
|
||||
}
|
||||
name = string(nm)
|
||||
}
|
||||
buf.WriteString(name + " (")
|
||||
for _, v := range s.Fields {
|
||||
if v.IsID {
|
||||
continue
|
||||
}
|
||||
|
||||
buf.WriteString(fmt.Sprintf("%s %s, ", v.Name, v.Type))
|
||||
}
|
||||
buf.WriteString("); ")
|
||||
for _, v := range s.Indices {
|
||||
buf.WriteString("CREATE ")
|
||||
if v.Unique {
|
||||
buf.WriteString("UNIQUE ")
|
||||
}
|
||||
buf.WriteString("INDEX ")
|
||||
if !opt.NoIfNotExists {
|
||||
buf.WriteString("IF NOT EXISTS ")
|
||||
}
|
||||
buf.WriteString(fmt.Sprintf("%s ON %s (%s); ", v.Name, name, v.ColumnName))
|
||||
}
|
||||
if !opt.NoTransaction {
|
||||
buf.WriteString("COMMIT; ")
|
||||
}
|
||||
l, err := Compile(buf.String())
|
||||
if err != nil {
|
||||
return List{}, fmt.Errorf("%s: %v", buf.String(), err)
|
||||
}
|
||||
|
||||
return l, nil
|
||||
}
|
||||
|
||||
// MustSchema is like Schema but panics on error. It simplifies safe
|
||||
// initialization of global variables holding compiled schemas.
|
||||
//
|
||||
// MustSchema is safe for concurrent use by multiple goroutines.
|
||||
func MustSchema(v interface{}, name string, opt *SchemaOptions) List {
|
||||
l, err := Schema(v, name, opt)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
// Marshal converts, in the order of appearance, fields of a struct instance v
|
||||
// to []interface{} or an error, if any. Value v can be also a pointer to a
|
||||
// struct.
|
||||
//
|
||||
// Every considered struct field type must be one of the QL types or a type
|
||||
// convertible to string, bool, int*, uint*, float* or complex* type or pointer
|
||||
// to such type. Integers with a width dependent on the architecture can not be
|
||||
// used. Only exported fields are considered. If an exported field QL tag
|
||||
// contains "-" then such field is not considered. A QL tag is a struct tag
|
||||
// part prefixed by "ql:". Field with name ID, having type int64, corresponds
|
||||
// to id() - and is thus not part of the result.
|
||||
//
|
||||
// Marshal is safe for concurrent use by multiple goroutines.
|
||||
func Marshal(v interface{}) ([]interface{}, error) {
|
||||
s, err := StructSchema(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
val := reflect.ValueOf(v)
|
||||
if s.IsPtr {
|
||||
val = val.Elem()
|
||||
}
|
||||
n := len(s.Fields)
|
||||
if s.HasID {
|
||||
n--
|
||||
}
|
||||
r := make([]interface{}, n)
|
||||
j := 0
|
||||
for _, v := range s.Fields {
|
||||
if v.IsID {
|
||||
continue
|
||||
}
|
||||
|
||||
f := val.Field(v.Index)
|
||||
if v.IsPtr {
|
||||
if f.IsNil() {
|
||||
r[j] = nil
|
||||
j++
|
||||
continue
|
||||
}
|
||||
|
||||
f = f.Elem()
|
||||
}
|
||||
if m := v.MarshalType; m != nil {
|
||||
f = f.Convert(m)
|
||||
}
|
||||
r[j] = f.Interface()
|
||||
j++
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// MustMarshal is like Marshal but panics on error. It simplifies marshaling of
|
||||
// "safe" types, like eg. those which were already verified by Schema or
|
||||
// MustSchema. When the underlying Marshal returns an error, MustMarshal
|
||||
// panics.
|
||||
//
|
||||
// MustMarshal is safe for concurrent use by multiple goroutines.
|
||||
func MustMarshal(v interface{}) []interface{} {
|
||||
r, err := Marshal(v)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
// Unmarshal stores data from []interface{} in the struct value pointed to by
|
||||
// v.
|
||||
//
|
||||
// Every considered struct field type must be one of the QL types or a type
|
||||
// convertible to string, bool, int*, uint*, float* or complex* type or pointer
|
||||
// to such type. Integers with a width dependent on the architecture can not be
|
||||
// used. Only exported fields are considered. If an exported field QL tag
|
||||
// contains "-" then such field is not considered. A QL tag is a struct tag
|
||||
// part prefixed by "ql:". Fields are considered in the order of appearance.
|
||||
// Types of values in data must be compatible with the corresponding considered
|
||||
// field of v.
|
||||
//
|
||||
// If the struct has no ID field then the number of values in data must be equal
|
||||
// to the number of considered fields of v.
|
||||
//
|
||||
// type T struct {
|
||||
// A bool
|
||||
// B string
|
||||
// }
|
||||
//
|
||||
// Assuming the schema is
|
||||
//
|
||||
// CREATE TABLE T (A bool, B string);
|
||||
//
|
||||
// Data might be a result of queries like
|
||||
//
|
||||
// SELECT * FROM T;
|
||||
// SELECT A, B FROM T;
|
||||
//
|
||||
// If the struct has a considered ID field then the number of values in data
|
||||
// must be equal to the number of considered fields in v - or one less. In the
|
||||
// later case the ID field is not set.
|
||||
//
|
||||
// type U struct {
|
||||
// ID int64
|
||||
// A bool
|
||||
// B string
|
||||
// }
|
||||
//
|
||||
// Assuming the schema is
|
||||
//
|
||||
// CREATE TABLE T (A bool, B string);
|
||||
//
|
||||
// Data might be a result of queries like
|
||||
//
|
||||
// SELECT * FROM T; // ID not set
|
||||
// SELECT A, B FROM T; // ID not set
|
||||
// SELECT id(), A, B FROM T; // ID is set
|
||||
//
|
||||
// To unmarshal a value from data into a pointer field of v, Unmarshal first
|
||||
// handles the case of the value being nil. In that case, Unmarshal sets the
|
||||
// pointer to nil. Otherwise, Unmarshal unmarshals the data value into value
|
||||
// pointed at by the pointer. If the pointer is nil, Unmarshal allocates a new
|
||||
// value for it to point to.
|
||||
//
|
||||
// Unmarshal is safe for concurrent use by multiple goroutines.
|
||||
func Unmarshal(v interface{}, data []interface{}) (err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
var ok bool
|
||||
if err, ok = r.(error); !ok {
|
||||
err = fmt.Errorf("%v", r)
|
||||
}
|
||||
err = fmt.Errorf("unmarshal: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
s, err := StructSchema(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !s.IsPtr {
|
||||
return fmt.Errorf("unmarshal: need a pointer to a struct")
|
||||
}
|
||||
|
||||
id := false
|
||||
nv, nf := len(data), len(s.Fields)
|
||||
switch s.HasID {
|
||||
case true:
|
||||
switch {
|
||||
case nv == nf:
|
||||
id = true
|
||||
case nv == nf-1:
|
||||
// ok
|
||||
default:
|
||||
return fmt.Errorf("unmarshal: got %d values, need %d or %d", nv, nf-1, nf)
|
||||
}
|
||||
default:
|
||||
switch {
|
||||
case nv == nf:
|
||||
// ok
|
||||
default:
|
||||
return fmt.Errorf("unmarshal: got %d values, need %d", nv, nf)
|
||||
}
|
||||
}
|
||||
|
||||
j := 0
|
||||
vVal := reflect.ValueOf(v)
|
||||
if s.IsPtr {
|
||||
vVal = vVal.Elem()
|
||||
}
|
||||
for _, sf := range s.Fields {
|
||||
if sf.IsID && !id {
|
||||
continue
|
||||
}
|
||||
|
||||
d := data[j]
|
||||
val := reflect.ValueOf(d)
|
||||
j++
|
||||
|
||||
fVal := vVal.Field(sf.Index)
|
||||
if u := sf.UnmarshalType; u != nil {
|
||||
val = val.Convert(u)
|
||||
}
|
||||
if !sf.IsPtr {
|
||||
fVal.Set(val)
|
||||
continue
|
||||
}
|
||||
|
||||
if d == nil {
|
||||
fVal.Set(sf.ZeroPtr)
|
||||
continue
|
||||
}
|
||||
|
||||
if fVal.IsNil() {
|
||||
fVal.Set(reflect.New(sf.ReflectType))
|
||||
}
|
||||
|
||||
fVal.Elem().Set(val)
|
||||
}
|
||||
return nil
|
||||
}
|
1277
vendor/github.com/cznic/ql/mem.go
generated
vendored
Normal file
1277
vendor/github.com/cznic/ql/mem.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
2749
vendor/github.com/cznic/ql/parser.go
generated
vendored
Normal file
2749
vendor/github.com/cznic/ql/parser.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
2800
vendor/github.com/cznic/ql/plan.go
generated
vendored
Normal file
2800
vendor/github.com/cznic/ql/plan.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1729
vendor/github.com/cznic/ql/ql.go
generated
vendored
Normal file
1729
vendor/github.com/cznic/ql/ql.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
219
vendor/github.com/cznic/ql/ql/main.go
generated
vendored
Normal file
219
vendor/github.com/cznic/ql/ql/main.go
generated
vendored
Normal file
@ -0,0 +1,219 @@
|
||||
// Copyright 2014 The ql Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Command ql is a utility to explore a database, prototype a schema or test
|
||||
// drive a query, etc.
|
||||
//
|
||||
// Installation:
|
||||
//
|
||||
// $ go get github.com/cznic/ql/ql
|
||||
//
|
||||
// Usage:
|
||||
//
|
||||
// ql [-db name] [-schema regexp] [-tables regexp] [-fld] statement_list
|
||||
//
|
||||
// Options:
|
||||
//
|
||||
// -db name Name of the database to use. Defaults to "ql.db".
|
||||
// If the DB file does not exists it is created automatically.
|
||||
//
|
||||
// -schema re If re != "" show the CREATE statements of matching tables and exit.
|
||||
//
|
||||
// -tables re If re != "" show the matching table names and exit.
|
||||
//
|
||||
// -fld First row of a query result set will show field names.
|
||||
//
|
||||
// statement_list QL statements to execute.
|
||||
// If no non flag arguments are present, ql reads from stdin.
|
||||
// The list is wrapped into an automatic transaction.
|
||||
//
|
||||
// -t Report and measure time to execute, including creating/opening and closing the DB.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// $ ql 'create table t (i int, s string)'
|
||||
// $ ql << EOF
|
||||
// > insert into t values
|
||||
// > (1, "a"),
|
||||
// > (2, "b"),
|
||||
// > (3, "c"),
|
||||
// > EOF
|
||||
// $ ql 'select * from t'
|
||||
// 3, "c"
|
||||
// 2, "b"
|
||||
// 1, "a"
|
||||
// $ ql -fld 'select * from t where i != 2 order by s'
|
||||
// "i", "s"
|
||||
// 1, "a"
|
||||
// 3, "c"
|
||||
// $
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cznic/ql"
|
||||
)
|
||||
|
||||
func str(data []interface{}) string {
|
||||
a := make([]string, len(data))
|
||||
for i, v := range data {
|
||||
switch x := v.(type) {
|
||||
case string:
|
||||
a[i] = fmt.Sprintf("%q", x)
|
||||
default:
|
||||
a[i] = fmt.Sprint(x)
|
||||
}
|
||||
}
|
||||
return strings.Join(a, ", ")
|
||||
}
|
||||
|
||||
func main() {
|
||||
if err := do(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func do() (err error) {
|
||||
oDB := flag.String("db", "ql.db", "The DB file to open. It'll be created if missing.")
|
||||
oFlds := flag.Bool("fld", false, "Show recordset's field names.")
|
||||
oSchema := flag.String("schema", "", "If non empty, show the CREATE statements of matching tables and exit.")
|
||||
oTables := flag.String("tables", "", "If non empty, list matching table names and exit.")
|
||||
oTime := flag.Bool("t", false, "Measure and report time to execute the statement(s) including DB create/open/close.")
|
||||
flag.Parse()
|
||||
|
||||
t0 := time.Now()
|
||||
if *oTime {
|
||||
defer func() {
|
||||
fmt.Fprintf(os.Stderr, "%s\n", time.Since(t0))
|
||||
}()
|
||||
}
|
||||
|
||||
db, err := ql.OpenFile(*oDB, &ql.Options{CanCreate: true})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
ec := db.Close()
|
||||
switch {
|
||||
case ec != nil && err != nil:
|
||||
log.Println(ec)
|
||||
case ec != nil:
|
||||
err = ec
|
||||
}
|
||||
}()
|
||||
|
||||
if pat := *oSchema; pat != "" {
|
||||
re, err := regexp.Compile(pat)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nfo, err := db.Info()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r := []string{}
|
||||
for _, ti := range nfo.Tables {
|
||||
if !re.MatchString(ti.Name) {
|
||||
continue
|
||||
}
|
||||
|
||||
a := []string{}
|
||||
for _, ci := range ti.Columns {
|
||||
a = append(a, fmt.Sprintf("%s %s", ci.Name, ci.Type))
|
||||
}
|
||||
r = append(r, fmt.Sprintf("CREATE TABLE %s (%s);", ti.Name, strings.Join(a, ", ")))
|
||||
}
|
||||
sort.Strings(r)
|
||||
if len(r) != 0 {
|
||||
fmt.Println(strings.Join(r, "\n"))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if pat := *oTables; pat != "" {
|
||||
re, err := regexp.Compile(pat)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nfo, err := db.Info()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r := []string{}
|
||||
for _, ti := range nfo.Tables {
|
||||
if !re.MatchString(ti.Name) {
|
||||
continue
|
||||
}
|
||||
|
||||
r = append(r, ti.Name)
|
||||
}
|
||||
sort.Strings(r)
|
||||
if len(r) != 0 {
|
||||
fmt.Println(strings.Join(r, "\n"))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var src string
|
||||
switch n := flag.NArg(); n {
|
||||
case 0:
|
||||
b, err := ioutil.ReadAll(bufio.NewReader(os.Stdin))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
src = string(b)
|
||||
default:
|
||||
a := make([]string, n)
|
||||
for i := range a {
|
||||
a[i] = flag.Arg(i)
|
||||
}
|
||||
src = strings.Join(a, " ")
|
||||
}
|
||||
|
||||
src = "BEGIN TRANSACTION; " + src + "; COMMIT;"
|
||||
l, err := ql.Compile(src)
|
||||
if err != nil {
|
||||
log.Println(src)
|
||||
return err
|
||||
}
|
||||
|
||||
rs, i, err := db.Execute(ql.NewRWCtx(), l)
|
||||
if err != nil {
|
||||
a := strings.Split(strings.TrimSpace(fmt.Sprint(l)), "\n")
|
||||
return fmt.Errorf("%v: %s", err, a[i])
|
||||
}
|
||||
|
||||
if len(rs) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
switch {
|
||||
case l.IsExplainStmt():
|
||||
return rs[len(rs)-1].Do(*oFlds, func(data []interface{}) (bool, error) {
|
||||
fmt.Println(data[0])
|
||||
return true, nil
|
||||
})
|
||||
default:
|
||||
return rs[len(rs)-1].Do(*oFlds, func(data []interface{}) (bool, error) {
|
||||
fmt.Println(str(data))
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
}
|
4129
vendor/github.com/cznic/ql/scanner.go
generated
vendored
Normal file
4129
vendor/github.com/cznic/ql/scanner.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1268
vendor/github.com/cznic/ql/stmt.go
generated
vendored
Normal file
1268
vendor/github.com/cznic/ql/stmt.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
991
vendor/github.com/cznic/ql/storage.go
generated
vendored
Normal file
991
vendor/github.com/cznic/ql/storage.go
generated
vendored
Normal file
@ -0,0 +1,991 @@
|
||||
// Copyright (c) 2014 ql Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type storage interface {
|
||||
Acid() bool
|
||||
BeginTransaction() error
|
||||
Close() error
|
||||
Commit() error
|
||||
Create(data ...interface{}) (h int64, err error)
|
||||
CreateIndex(unique bool) (handle int64, x btreeIndex, err error)
|
||||
CreateTemp(asc bool) (bt temp, err error)
|
||||
Delete(h int64, blobCols ...*col) error //LATER split the nil blobCols case
|
||||
ID() (id int64, err error)
|
||||
Name() string
|
||||
OpenIndex(unique bool, handle int64) (btreeIndex, error) // Never called on the memory backend.
|
||||
Read(dst []interface{}, h int64, cols ...*col) (data []interface{}, err error)
|
||||
ResetID() (err error)
|
||||
Rollback() error
|
||||
Update(h int64, data ...interface{}) error
|
||||
UpdateRow(h int64, blobCols []*col, data ...interface{}) error
|
||||
Verify() (allocs int64, err error)
|
||||
}
|
||||
|
||||
type btreeIterator interface {
|
||||
Next() (k, v []interface{}, err error)
|
||||
}
|
||||
|
||||
type temp interface {
|
||||
BeginTransaction() error
|
||||
Create(data ...interface{}) (h int64, err error)
|
||||
Drop() (err error)
|
||||
Get(k []interface{}) (v []interface{}, err error)
|
||||
Read(dst []interface{}, h int64, cols ...*col) (data []interface{}, err error)
|
||||
SeekFirst() (e btreeIterator, err error)
|
||||
Set(k, v []interface{}) (err error)
|
||||
}
|
||||
|
||||
type indexIterator interface {
|
||||
Next() (k []interface{}, h int64, err error)
|
||||
Prev() (k []interface{}, h int64, err error)
|
||||
}
|
||||
|
||||
type btreeIndex interface {
|
||||
Clear() error // supports truncate table statement
|
||||
Create(indexedValues []interface{}, h int64) error // supports insert into statement
|
||||
Delete(indexedValues []interface{}, h int64) error // supports delete from statement
|
||||
Drop() error // supports drop table, drop index statements
|
||||
Seek(indexedValues []interface{}) (iter indexIterator, hit bool, err error) // supports where clause
|
||||
SeekFirst() (iter indexIterator, err error) // supports aggregate min / ascending order by
|
||||
SeekLast() (iter indexIterator, err error) // supports aggregate max / descending order by
|
||||
}
|
||||
|
||||
type indexedCol struct { // Column name or id() index.
|
||||
name string
|
||||
unique bool
|
||||
x btreeIndex
|
||||
xroot int64
|
||||
}
|
||||
|
||||
type index2 struct { // Expression list index.
|
||||
unique bool
|
||||
x btreeIndex
|
||||
xroot int64
|
||||
sources []string
|
||||
exprList []expression
|
||||
}
|
||||
|
||||
func (x *index2) eval(ctx *execCtx, cols []*col, id int64, r []interface{}) ([]interface{}, error) {
|
||||
f, isFile := ctx.db.store.(*file)
|
||||
vlist := make([]interface{}, len(x.exprList))
|
||||
m := map[interface{}]interface{}{"$id": id}
|
||||
for _, col := range cols {
|
||||
ci := col.index
|
||||
v := interface{}(nil)
|
||||
if ci < len(r) {
|
||||
v = r[ci]
|
||||
}
|
||||
if b, ok := v.([]byte); ok && isFile {
|
||||
var err error
|
||||
if v, err = expand1(chunk{f: f, b: b}, nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
m[col.name] = v
|
||||
}
|
||||
for i, e := range x.exprList {
|
||||
v, err := e.eval(ctx, m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if ok, typ := isBlobType(v); ok {
|
||||
return nil, fmt.Errorf("value of a complex index cannot be of blob-like type: %v", typ)
|
||||
}
|
||||
|
||||
vlist[i] = v
|
||||
}
|
||||
return vlist, nil
|
||||
}
|
||||
|
||||
type indexKey struct {
|
||||
value []interface{}
|
||||
h int64
|
||||
}
|
||||
|
||||
// storage fields
|
||||
// 0: next int64
|
||||
// 1: scols string
|
||||
// 2: hhead int64
|
||||
// 3: name string
|
||||
// 4: indices string - optional
|
||||
// 5: hxroots int64 - optional
|
||||
type table struct {
|
||||
cols []*col // logical
|
||||
cols0 []*col // physical
|
||||
h int64 //
|
||||
head int64 // head of the single linked record list
|
||||
hhead int64 // handle of the head of the single linked record list
|
||||
hxroots int64
|
||||
indices []*indexedCol
|
||||
indices2 map[string]*index2
|
||||
name string
|
||||
next int64 // single linked table list
|
||||
store storage
|
||||
tnext *table
|
||||
tprev *table
|
||||
xroots []interface{}
|
||||
constraints []*constraint
|
||||
defaults []expression
|
||||
}
|
||||
|
||||
func (t *table) hasIndices() bool { return len(t.indices) != 0 || len(t.indices2) != 0 }
|
||||
func (t *table) hasIndices2() bool { return len(t.indices2) != 0 }
|
||||
|
||||
func (t *table) constraintsAndDefaults(ctx *execCtx) error {
|
||||
if isSystemName[t.name] {
|
||||
return nil
|
||||
}
|
||||
|
||||
_, ok := ctx.db.root.tables["__Column2"]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
cols := t.cols
|
||||
constraints := make([]*constraint, len(cols))
|
||||
defaults := make([]expression, len(cols))
|
||||
arg := []interface{}{t.name}
|
||||
rs, err := selectColumn2.l[0].exec(&execCtx{db: ctx.db, arg: arg})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var rows [][]interface{}
|
||||
ok = false
|
||||
if err := rs.(recordset).do(
|
||||
&execCtx{db: ctx.db, arg: arg},
|
||||
func(id interface{}, data []interface{}) (more bool, err error) {
|
||||
rows = append(rows, data)
|
||||
return true, nil
|
||||
},
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, row := range rows {
|
||||
nm := row[0].(string)
|
||||
nonNull := row[1].(bool)
|
||||
cexpr := row[2].(string)
|
||||
dexpr := row[3].(string)
|
||||
for i, c := range cols {
|
||||
if c.name == nm {
|
||||
var co *constraint
|
||||
if nonNull || cexpr != "" {
|
||||
co = &constraint{}
|
||||
constraints[i] = co
|
||||
if cexpr != "" {
|
||||
if co.expr, err = ctx.db.str2expr(cexpr); err != nil {
|
||||
return fmt.Errorf("constraint %q: %v", cexpr, err)
|
||||
}
|
||||
}
|
||||
|
||||
t.constraints = constraints
|
||||
}
|
||||
if dexpr != "" {
|
||||
if defaults[i], err = ctx.db.str2expr(dexpr); err != nil {
|
||||
return fmt.Errorf("constraint %q: %v", dexpr, err)
|
||||
}
|
||||
|
||||
t.defaults = defaults
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *table) checkConstraintsAndDefaults(ctx *execCtx, row []interface{}, m map[interface{}]interface{}) error {
|
||||
cols := t.cols
|
||||
|
||||
if len(t.defaults) != 0 {
|
||||
// 1.
|
||||
for _, c := range cols {
|
||||
m[c.name] = row[c.index]
|
||||
}
|
||||
|
||||
// 2.
|
||||
for i, c := range cols {
|
||||
val := row[c.index]
|
||||
expr := t.defaults[i]
|
||||
if val != nil || expr == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
dval, err := expr.eval(ctx, m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
row[c.index] = dval
|
||||
if err = typeCheck(row, []*col{c}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(t.constraints) != 0 {
|
||||
// 3.
|
||||
for _, c := range cols {
|
||||
m[c.name] = row[c.index]
|
||||
}
|
||||
|
||||
// 4.
|
||||
for i, c := range cols {
|
||||
constraint := t.constraints[i]
|
||||
if constraint == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
val := row[c.index]
|
||||
expr := constraint.expr
|
||||
if expr == nil { // Constraint: NOT NULL
|
||||
if val == nil {
|
||||
return fmt.Errorf("column %s: constraint violation: NOT NULL", c.name)
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
// Constraint is an expression
|
||||
cval, err := expr.eval(ctx, m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if cval == nil {
|
||||
return fmt.Errorf("column %s: constraint violation: %s", c.name, expr)
|
||||
}
|
||||
|
||||
bval, ok := cval.(bool)
|
||||
if !ok {
|
||||
return fmt.Errorf("column %s: non bool constraint expression: %s", c.name, expr)
|
||||
}
|
||||
|
||||
if !bval {
|
||||
return fmt.Errorf("column %s: constraint violation: %s", c.name, expr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *table) clone() *table {
|
||||
r := &table{}
|
||||
*r = *t
|
||||
r.constraints = append([]*constraint(nil), t.constraints...)
|
||||
r.defaults = append([]expression(nil), t.defaults...)
|
||||
r.indices2 = nil
|
||||
if n := len(t.indices2); n != 0 {
|
||||
r.indices2 = make(map[string]*index2, n)
|
||||
for k, v := range t.indices2 {
|
||||
r.indices2[k] = v
|
||||
}
|
||||
}
|
||||
r.cols = make([]*col, len(t.cols))
|
||||
for i, v := range t.cols {
|
||||
c := &col{}
|
||||
*c = *v
|
||||
r.cols[i] = c
|
||||
}
|
||||
r.cols0 = make([]*col, len(t.cols0))
|
||||
for i, v := range t.cols0 {
|
||||
c := &col{}
|
||||
*c = *v
|
||||
r.cols0[i] = c
|
||||
}
|
||||
r.indices = make([]*indexedCol, len(t.indices))
|
||||
for i, v := range t.indices {
|
||||
if v != nil {
|
||||
c := &indexedCol{}
|
||||
*c = *v
|
||||
r.indices[i] = c
|
||||
}
|
||||
}
|
||||
r.xroots = make([]interface{}, len(t.xroots))
|
||||
copy(r.xroots, t.xroots)
|
||||
r.tnext, r.tprev = nil, nil
|
||||
return r
|
||||
}
|
||||
|
||||
func (t *table) findIndexByColName(name string) (*col, *indexedCol) {
|
||||
for i, v := range t.indices {
|
||||
if v == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if i == 0 {
|
||||
if name == "id()" {
|
||||
return idCol, v
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
if c := t.cols[i-1]; c.name == name {
|
||||
return c, v
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (t *table) findIndexByName(name string) interface{} {
|
||||
for _, v := range t.indices {
|
||||
if v != nil && v.name == name {
|
||||
return v
|
||||
}
|
||||
}
|
||||
for k, v := range t.indices2 {
|
||||
if k == name {
|
||||
return v
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *table) load() (err error) {
|
||||
data, err := t.store.Read(nil, t.h)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var hasIndices bool
|
||||
switch n := len(data); n {
|
||||
case 4:
|
||||
case 6:
|
||||
hasIndices = true
|
||||
default:
|
||||
return fmt.Errorf("corrupted DB: table data len %d", n)
|
||||
}
|
||||
|
||||
var ok bool
|
||||
if t.next, ok = data[0].(int64); !ok {
|
||||
return fmt.Errorf("corrupted DB: table data[0] of type %T", data[0])
|
||||
}
|
||||
|
||||
scols, ok := data[1].(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("corrupted DB: table data[1] of type %T", data[1])
|
||||
}
|
||||
|
||||
if t.hhead, ok = data[2].(int64); !ok {
|
||||
return fmt.Errorf("corrupted DB: table data[2] of type %T", data[2])
|
||||
}
|
||||
|
||||
if t.name, ok = data[3].(string); !ok {
|
||||
return fmt.Errorf("corrupted DB: table data[3] of type %T", data[3])
|
||||
}
|
||||
|
||||
var head []interface{}
|
||||
if head, err = t.store.Read(nil, t.hhead); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(head) != 1 {
|
||||
return fmt.Errorf("corrupted DB: table head data len %d", len(head))
|
||||
}
|
||||
|
||||
if t.head, ok = head[0].(int64); !ok {
|
||||
return fmt.Errorf("corrupted DB: table head data[0] of type %T", head[0])
|
||||
}
|
||||
|
||||
a := strings.Split(scols, "|")
|
||||
t.cols0 = make([]*col, len(a))
|
||||
for i, v := range a {
|
||||
if len(v) < 1 {
|
||||
return fmt.Errorf("corrupted DB: field info %q", v)
|
||||
}
|
||||
|
||||
col := &col{name: v[1:], typ: int(v[0]), index: i}
|
||||
t.cols0[i] = col
|
||||
if col.name != "" {
|
||||
t.cols = append(t.cols, col)
|
||||
}
|
||||
}
|
||||
|
||||
if !hasIndices {
|
||||
return
|
||||
}
|
||||
|
||||
if t.hxroots, ok = data[5].(int64); !ok {
|
||||
return fmt.Errorf("corrupted DB: table data[5] of type %T", data[5])
|
||||
}
|
||||
|
||||
xroots, err := t.store.Read(nil, t.hxroots)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if g, e := len(xroots), len(t.cols0)+1; g != e {
|
||||
return fmt.Errorf("corrupted DB: got %d index roots, expected %d", g, e)
|
||||
}
|
||||
|
||||
indices, ok := data[4].(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("corrupted DB: table data[4] of type %T", data[4])
|
||||
}
|
||||
|
||||
a = strings.Split(indices, "|")
|
||||
if g, e := len(a), len(t.cols0)+1; g != e {
|
||||
return fmt.Errorf("corrupted DB: got %d index definitions, expected %d", g, e)
|
||||
}
|
||||
|
||||
t.indices = make([]*indexedCol, len(a))
|
||||
for i, v := range a {
|
||||
if v == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
if len(v) < 2 {
|
||||
return fmt.Errorf("corrupted DB: invalid index definition %q", v)
|
||||
}
|
||||
|
||||
nm := v[1:]
|
||||
h, ok := xroots[i].(int64)
|
||||
if !ok {
|
||||
return fmt.Errorf("corrupted DB: table index root of type %T", xroots[i])
|
||||
}
|
||||
|
||||
if h == 0 {
|
||||
return fmt.Errorf("corrupted DB: missing root for index %s", nm)
|
||||
}
|
||||
|
||||
unique := v[0] == 'u'
|
||||
x, err := t.store.OpenIndex(unique, h)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t.indices[i] = &indexedCol{nm, unique, x, h}
|
||||
}
|
||||
t.xroots = xroots
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func newTable(store storage, name string, next int64, cols []*col, tprev, tnext *table) (t *table, err error) {
|
||||
hhead, err := store.Create(int64(0))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
scols := cols2meta(cols)
|
||||
h, err := store.Create(next, scols, hhead, name)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
t = &table{
|
||||
cols0: cols,
|
||||
h: h,
|
||||
hhead: hhead,
|
||||
name: name,
|
||||
next: next,
|
||||
store: store,
|
||||
tnext: tnext,
|
||||
tprev: tprev,
|
||||
}
|
||||
return t.updateCols(), nil
|
||||
}
|
||||
|
||||
func (t *table) blobCols() (r []*col) {
|
||||
for _, c := range t.cols0 {
|
||||
switch c.typ {
|
||||
case qBlob, qBigInt, qBigRat, qTime, qDuration:
|
||||
r = append(r, c)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (t *table) truncate() (err error) {
|
||||
h := t.head
|
||||
var rec []interface{}
|
||||
blobCols := t.blobCols()
|
||||
for h != 0 {
|
||||
rec, err := t.store.Read(rec, h)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nh := rec[0].(int64)
|
||||
|
||||
if err = t.store.Delete(h, blobCols...); err != nil { //LATER remove double read for len(blobCols) != 0
|
||||
return err
|
||||
}
|
||||
|
||||
h = nh
|
||||
}
|
||||
if err = t.store.Update(t.hhead, 0); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, v := range t.indices {
|
||||
if v == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := v.x.Clear(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, ix := range t.indices2 {
|
||||
if err := ix.x.Clear(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
t.head = 0
|
||||
return t.updated()
|
||||
}
|
||||
|
||||
func (t *table) addIndex0(unique bool, indexName string, colIndex int) (int64, btreeIndex, error) {
|
||||
switch len(t.indices) {
|
||||
case 0:
|
||||
indices := make([]*indexedCol, len(t.cols0)+1)
|
||||
h, x, err := t.store.CreateIndex(unique)
|
||||
if err != nil {
|
||||
return -1, nil, err
|
||||
}
|
||||
|
||||
indices[colIndex+1] = &indexedCol{indexName, unique, x, h}
|
||||
xroots := make([]interface{}, len(indices))
|
||||
xroots[colIndex+1] = h
|
||||
hx, err := t.store.Create(xroots...)
|
||||
if err != nil {
|
||||
return -1, nil, err
|
||||
}
|
||||
|
||||
t.hxroots, t.xroots, t.indices = hx, xroots, indices
|
||||
return h, x, t.updated()
|
||||
default:
|
||||
ex := t.indices[colIndex+1]
|
||||
if ex != nil && ex.name != "" {
|
||||
colName := "id()"
|
||||
if colIndex >= 0 {
|
||||
colName = t.cols0[colIndex].name
|
||||
}
|
||||
return -1, nil, fmt.Errorf("column %s already has an index: %s", colName, ex.name)
|
||||
}
|
||||
|
||||
h, x, err := t.store.CreateIndex(unique)
|
||||
if err != nil {
|
||||
return -1, nil, err
|
||||
}
|
||||
|
||||
t.xroots[colIndex+1] = h
|
||||
if err := t.store.Update(t.hxroots, t.xroots...); err != nil {
|
||||
return -1, nil, err
|
||||
}
|
||||
|
||||
t.indices[colIndex+1] = &indexedCol{indexName, unique, x, h}
|
||||
return h, x, t.updated()
|
||||
}
|
||||
}
|
||||
|
||||
func (t *table) addIndex(unique bool, indexName string, colIndex int) (int64, error) {
|
||||
hx, x, err := t.addIndex0(unique, indexName, colIndex)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
// Must fill the new index.
|
||||
ncols := len(t.cols0)
|
||||
h, store := t.head, t.store
|
||||
for h != 0 {
|
||||
rec, err := store.Read(nil, h, t.cols...)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
if n := ncols + 2 - len(rec); n > 0 {
|
||||
rec = append(rec, make([]interface{}, n)...)
|
||||
}
|
||||
|
||||
if err = x.Create([]interface{}{rec[colIndex+2]}, h); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
h = rec[0].(int64)
|
||||
}
|
||||
return hx, nil
|
||||
}
|
||||
|
||||
func (t *table) addIndex2(execCtx *execCtx, unique bool, indexName string, exprList []expression) (int64, error) {
|
||||
if _, ok := t.indices2[indexName]; ok {
|
||||
panic("internal error 009")
|
||||
}
|
||||
|
||||
hx, x, err := t.store.CreateIndex(unique)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
var a []string
|
||||
for _, v := range exprList {
|
||||
a = append(a, v.String())
|
||||
}
|
||||
x2 := &index2{unique, x, hx, a, exprList}
|
||||
if t.indices2 == nil {
|
||||
t.indices2 = map[string]*index2{}
|
||||
}
|
||||
t.indices2[indexName] = x2
|
||||
|
||||
// Must fill the new index.
|
||||
m := map[interface{}]interface{}{}
|
||||
h, store := t.head, t.store
|
||||
for h != 0 {
|
||||
rec, err := store.Read(nil, h, t.cols...)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
for _, col := range t.cols {
|
||||
ci := col.index
|
||||
v := interface{}(nil)
|
||||
if ci < len(rec) {
|
||||
v = rec[ci+2]
|
||||
}
|
||||
m[col.name] = v
|
||||
}
|
||||
|
||||
id := rec[1].(int64)
|
||||
vlist, err := x2.eval(execCtx, t.cols, id, rec[2:])
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
if err := x2.x.Create(vlist, h); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
h = rec[0].(int64)
|
||||
}
|
||||
return hx, nil
|
||||
}
|
||||
|
||||
func (t *table) dropIndex(xIndex int) error {
|
||||
t.xroots[xIndex] = 0
|
||||
if err := t.indices[xIndex].x.Drop(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t.indices[xIndex] = nil
|
||||
return t.updated()
|
||||
}
|
||||
|
||||
func (t *table) updated() (err error) {
|
||||
switch {
|
||||
case len(t.indices) != 0:
|
||||
a := []string{}
|
||||
for _, v := range t.indices {
|
||||
if v == nil {
|
||||
a = append(a, "")
|
||||
continue
|
||||
}
|
||||
|
||||
s := "n"
|
||||
if v.unique {
|
||||
s = "u"
|
||||
}
|
||||
a = append(a, s+v.name)
|
||||
}
|
||||
return t.store.Update(t.h, t.next, cols2meta(t.updateCols().cols0), t.hhead, t.name, strings.Join(a, "|"), t.hxroots)
|
||||
default:
|
||||
return t.store.Update(t.h, t.next, cols2meta(t.updateCols().cols0), t.hhead, t.name)
|
||||
}
|
||||
}
|
||||
|
||||
// storage fields
|
||||
// 0: next record handle int64
|
||||
// 1: record id int64
|
||||
// 2...: data row
|
||||
func (t *table) addRecord(execCtx *execCtx, r []interface{}) (id int64, err error) {
|
||||
if id, err = t.store.ID(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
r = append([]interface{}{t.head, id}, r...)
|
||||
h, err := t.store.Create(r...)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for i, v := range t.indices {
|
||||
if v == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if err = v.x.Create([]interface{}{r[i+1]}, h); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
for _, ix := range t.indices2 {
|
||||
vlist, err := ix.eval(execCtx, t.cols, id, r[2:])
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
if err := ix.x.Create(vlist, h); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
}
|
||||
|
||||
if err = t.store.Update(t.hhead, h); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
t.head = h
|
||||
return
|
||||
}
|
||||
|
||||
func (t *table) flds() (r []*fld) {
|
||||
r = make([]*fld, len(t.cols))
|
||||
for i, v := range t.cols {
|
||||
r[i] = &fld{expr: &ident{v.name}, name: v.name}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (t *table) fieldNames() []string {
|
||||
r := make([]string, len(t.cols))
|
||||
for i, v := range t.cols {
|
||||
r[i] = v.name
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func (t *table) updateCols() *table {
|
||||
t.cols = t.cols[:0]
|
||||
for i, c := range t.cols0 {
|
||||
if c.name != "" {
|
||||
c.index = i
|
||||
t.cols = append(t.cols, c)
|
||||
}
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
func (t *table) row0(ctx *execCtx, h int64) ([]interface{}, error) {
|
||||
rec, err := ctx.db.store.Read(nil, h, t.cols...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if d := len(t.cols) - (len(rec) - 2); d > 0 {
|
||||
rec = append(rec, make([]interface{}, d)...)
|
||||
}
|
||||
|
||||
return rec, nil
|
||||
}
|
||||
|
||||
func (t *table) row(ctx *execCtx, h int64) (int64, []interface{}, error) {
|
||||
rec, err := t.row0(ctx, h)
|
||||
if err != nil {
|
||||
return -1, nil, err
|
||||
}
|
||||
|
||||
return rec[1].(int64), rec[2:], nil
|
||||
}
|
||||
|
||||
// storage fields
|
||||
// 0: handle of first table in DB int64
|
||||
type root struct {
|
||||
head int64 // Single linked table list
|
||||
lastInsertID int64
|
||||
parent *root
|
||||
rowsAffected int64 //LATER implement
|
||||
store storage
|
||||
tables map[string]*table
|
||||
thead *table
|
||||
}
|
||||
|
||||
func newRoot(store storage) (r *root, err error) {
|
||||
data, err := store.Read(nil, 1)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
switch len(data) {
|
||||
case 0: // new empty DB, create empty table list
|
||||
if err = store.BeginTransaction(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = store.Update(1, int64(0)); err != nil {
|
||||
store.Rollback()
|
||||
return
|
||||
}
|
||||
|
||||
if err = store.Commit(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return &root{
|
||||
store: store,
|
||||
tables: map[string]*table{},
|
||||
}, nil
|
||||
case 1: // existing DB, load tables
|
||||
if len(data) != 1 {
|
||||
return nil, fmt.Errorf("corrupted DB: root is an %d-scalar", len(data))
|
||||
}
|
||||
|
||||
p, ok := data[0].(int64)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("corrupted DB: root head has type %T", data[0])
|
||||
}
|
||||
|
||||
r := &root{
|
||||
head: p,
|
||||
store: store,
|
||||
tables: map[string]*table{},
|
||||
}
|
||||
|
||||
var tprev *table
|
||||
for p != 0 {
|
||||
t := &table{
|
||||
h: p,
|
||||
store: store,
|
||||
tprev: tprev,
|
||||
}
|
||||
|
||||
if r.thead == nil {
|
||||
r.thead = t
|
||||
}
|
||||
if tprev != nil {
|
||||
tprev.tnext = t
|
||||
}
|
||||
tprev = t
|
||||
|
||||
if err = t.load(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if r.tables[t.name] != nil { // duplicate
|
||||
return nil, fmt.Errorf("corrupted DB: duplicate table metadata for table %s", t.name)
|
||||
}
|
||||
|
||||
r.tables[t.name] = t
|
||||
p = t.next
|
||||
}
|
||||
return r, nil
|
||||
default:
|
||||
return nil, errIncompatibleDBFormat
|
||||
}
|
||||
}
|
||||
|
||||
func (r *root) findIndexByName(name string) (*table, interface{}) {
|
||||
for _, t := range r.tables {
|
||||
if i := t.findIndexByName(name); i != nil {
|
||||
return t, i
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (r *root) updated() (err error) {
|
||||
return r.store.Update(1, r.head)
|
||||
}
|
||||
|
||||
func (r *root) createTable(name string, cols []*col) (t *table, err error) {
|
||||
if _, ok := r.tables[name]; ok {
|
||||
panic("internal error 065")
|
||||
}
|
||||
|
||||
if t, err = newTable(r.store, name, r.head, cols, nil, r.thead); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = r.store.Update(1, t.h); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if p := r.thead; p != nil {
|
||||
p.tprev = t
|
||||
}
|
||||
r.tables[name], r.head, r.thead = t, t.h, t
|
||||
return
|
||||
}
|
||||
|
||||
func (r *root) dropTable(t *table) (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
delete(r.tables, t.name)
|
||||
}()
|
||||
|
||||
if err = t.truncate(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = t.store.Delete(t.hhead); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = t.store.Delete(t.h); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, v := range t.indices {
|
||||
if v != nil && v.x != nil {
|
||||
if err = v.x.Drop(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, v := range t.indices2 {
|
||||
if err = v.x.Drop(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if h := t.hxroots; h != 0 {
|
||||
if err = t.store.Delete(h); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
case t.tprev == nil && t.tnext == nil:
|
||||
r.head = 0
|
||||
r.thead = nil
|
||||
err = r.updated()
|
||||
return errSet(&err, r.store.ResetID())
|
||||
case t.tprev == nil && t.tnext != nil:
|
||||
next := t.tnext
|
||||
next.tprev = nil
|
||||
r.head = next.h
|
||||
r.thead = next
|
||||
if err = r.updated(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return next.updated()
|
||||
case t.tprev != nil && t.tnext == nil: // last in list
|
||||
prev := t.tprev
|
||||
prev.next = 0
|
||||
prev.tnext = nil
|
||||
return prev.updated()
|
||||
default: //case t.tprev != nil && t.tnext != nil:
|
||||
prev, next := t.tprev, t.tnext
|
||||
prev.next = next.h
|
||||
prev.tnext = next
|
||||
next.tprev = prev
|
||||
if err = prev.updated(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return next.updated()
|
||||
}
|
||||
}
|
186
vendor/github.com/cznic/ql/vendored/github.com/camlistore/go4/lock/lock.go
generated
vendored
Normal file
186
vendor/github.com/cznic/ql/vendored/github.com/camlistore/go4/lock/lock.go
generated
vendored
Normal file
@ -0,0 +1,186 @@
|
||||
/*
|
||||
Copyright 2013 The Go Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package lock is a file locking library.
|
||||
package lock
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Lock locks the given file, creating the file if necessary. If the
|
||||
// file already exists, it must have zero size or an error is returned.
|
||||
// The lock is an exclusive lock (a write lock), but locked files
|
||||
// should neither be read from nor written to. Such files should have
|
||||
// zero size and only exist to co-ordinate ownership across processes.
|
||||
//
|
||||
// A nil Closer is returned if an error occurred. Otherwise, close that
|
||||
// Closer to release the lock.
|
||||
//
|
||||
// On Linux, FreeBSD and OSX, a lock has the same semantics as fcntl(2)'s
|
||||
// advisory locks. In particular, closing any other file descriptor for the
|
||||
// same file will release the lock prematurely.
|
||||
//
|
||||
// Attempting to lock a file that is already locked by the current process
|
||||
// has undefined behavior.
|
||||
//
|
||||
// On other operating systems, lock will fallback to using the presence and
|
||||
// content of a file named name + '.lock' to implement locking behavior.
|
||||
func Lock(name string) (io.Closer, error) {
|
||||
abs, err := filepath.Abs(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lockmu.Lock()
|
||||
defer lockmu.Unlock()
|
||||
if locked[abs] {
|
||||
return nil, fmt.Errorf("file %q already locked", abs)
|
||||
}
|
||||
|
||||
c, err := lockFn(abs)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot acquire lock: %v", err)
|
||||
}
|
||||
locked[abs] = true
|
||||
return c, nil
|
||||
}
|
||||
|
||||
var lockFn = lockPortable
|
||||
|
||||
// lockPortable is a portable version not using fcntl. Doesn't handle crashes as gracefully,
|
||||
// since it can leave stale lock files.
|
||||
func lockPortable(name string) (io.Closer, error) {
|
||||
fi, err := os.Stat(name)
|
||||
if err == nil && fi.Size() > 0 {
|
||||
st := portableLockStatus(name)
|
||||
switch st {
|
||||
case statusLocked:
|
||||
return nil, fmt.Errorf("file %q already locked", name)
|
||||
case statusStale:
|
||||
os.Remove(name)
|
||||
case statusInvalid:
|
||||
return nil, fmt.Errorf("can't Lock file %q: has invalid contents", name)
|
||||
}
|
||||
}
|
||||
f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC|os.O_EXCL, 0666)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create lock file %s %v", name, err)
|
||||
}
|
||||
if err := json.NewEncoder(f).Encode(&pidLockMeta{OwnerPID: os.Getpid()}); err != nil {
|
||||
return nil, fmt.Errorf("cannot write owner pid: %v", err)
|
||||
}
|
||||
return &unlocker{
|
||||
f: f,
|
||||
abs: name,
|
||||
portable: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type lockStatus int
|
||||
|
||||
const (
|
||||
statusInvalid lockStatus = iota
|
||||
statusLocked
|
||||
statusUnlocked
|
||||
statusStale
|
||||
)
|
||||
|
||||
type pidLockMeta struct {
|
||||
OwnerPID int
|
||||
}
|
||||
|
||||
func portableLockStatus(path string) lockStatus {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return statusUnlocked
|
||||
}
|
||||
defer f.Close()
|
||||
var meta pidLockMeta
|
||||
if json.NewDecoder(f).Decode(&meta) != nil {
|
||||
return statusInvalid
|
||||
}
|
||||
if meta.OwnerPID == 0 {
|
||||
return statusInvalid
|
||||
}
|
||||
p, err := os.FindProcess(meta.OwnerPID)
|
||||
if err != nil {
|
||||
// e.g. on Windows
|
||||
return statusStale
|
||||
}
|
||||
// On unix, os.FindProcess always is true, so we have to send
|
||||
// it a signal to see if it's alive.
|
||||
if signalZero != nil {
|
||||
if p.Signal(signalZero) != nil {
|
||||
return statusStale
|
||||
}
|
||||
}
|
||||
return statusLocked
|
||||
}
|
||||
|
||||
var signalZero os.Signal // nil or set by lock_sigzero.go
|
||||
|
||||
var (
|
||||
lockmu sync.Mutex
|
||||
locked = map[string]bool{} // abs path -> true
|
||||
)
|
||||
|
||||
type unlocker struct {
|
||||
portable bool
|
||||
f *os.File
|
||||
abs string
|
||||
// once guards the close method call.
|
||||
once sync.Once
|
||||
// err holds the error returned by Close.
|
||||
err error
|
||||
}
|
||||
|
||||
func (u *unlocker) Close() error {
|
||||
u.once.Do(u.close)
|
||||
return u.err
|
||||
}
|
||||
|
||||
func (u *unlocker) close() {
|
||||
lockmu.Lock()
|
||||
defer lockmu.Unlock()
|
||||
delete(locked, u.abs)
|
||||
|
||||
if u.portable {
|
||||
// In the portable lock implementation, it's
|
||||
// important to close before removing because
|
||||
// Windows won't allow us to remove an open
|
||||
// file.
|
||||
if err := u.f.Close(); err != nil {
|
||||
u.err = err
|
||||
}
|
||||
if err := os.Remove(u.abs); err != nil {
|
||||
// Note that if both Close and Remove fail,
|
||||
// we care more about the latter than the former
|
||||
// so we'll return that error.
|
||||
u.err = err
|
||||
}
|
||||
return
|
||||
}
|
||||
// In other implementatioons, it's nice for us to clean up.
|
||||
// If we do do this, though, it needs to be before the
|
||||
// u.f.Close below.
|
||||
os.Remove(u.abs)
|
||||
u.err = u.f.Close()
|
||||
}
|
32
vendor/github.com/cznic/ql/vendored/github.com/camlistore/go4/lock/lock_appengine.go
generated
vendored
Normal file
32
vendor/github.com/cznic/ql/vendored/github.com/camlistore/go4/lock/lock_appengine.go
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
// +build appengine
|
||||
|
||||
/*
|
||||
Copyright 2013 The Go Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package lock
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
func init() {
|
||||
lockFn = lockAppEngine
|
||||
}
|
||||
|
||||
func lockAppEngine(name string) (io.Closer, error) {
|
||||
return nil, errors.New("Lock not available on App Engine")
|
||||
}
|
67
vendor/github.com/cznic/ql/vendored/github.com/camlistore/go4/lock/lock_darwin_amd64.go
generated
vendored
Normal file
67
vendor/github.com/cznic/ql/vendored/github.com/camlistore/go4/lock/lock_darwin_amd64.go
generated
vendored
Normal file
@ -0,0 +1,67 @@
|
||||
// +build darwin,amd64
|
||||
// +build !appengine
|
||||
|
||||
/*
|
||||
Copyright 2013 The Go Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package lock
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func init() {
|
||||
lockFn = lockFcntl
|
||||
}
|
||||
|
||||
func lockFcntl(name string) (io.Closer, error) {
|
||||
fi, err := os.Stat(name)
|
||||
if err == nil && fi.Size() > 0 {
|
||||
return nil, fmt.Errorf("can't Lock file %q: has non-zero size", name)
|
||||
}
|
||||
|
||||
f, err := os.Create(name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Lock Create of %s failed: %v", name, err)
|
||||
}
|
||||
|
||||
// This type matches C's "struct flock" defined in /usr/include/sys/fcntl.h.
|
||||
// TODO: move this into the standard syscall package.
|
||||
k := struct {
|
||||
Start uint64 // sizeof(off_t): 8
|
||||
Len uint64 // sizeof(off_t): 8
|
||||
Pid uint32 // sizeof(pid_t): 4
|
||||
Type uint16 // sizeof(short): 2
|
||||
Whence uint16 // sizeof(short): 2
|
||||
}{
|
||||
Type: syscall.F_WRLCK,
|
||||
Whence: uint16(os.SEEK_SET),
|
||||
Start: 0,
|
||||
Len: 0, // 0 means to lock the entire file.
|
||||
Pid: uint32(os.Getpid()),
|
||||
}
|
||||
|
||||
_, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_SETLK), uintptr(unsafe.Pointer(&k)))
|
||||
if errno != 0 {
|
||||
f.Close()
|
||||
return nil, errno
|
||||
}
|
||||
return &unlocker{f: f, abs: name}, nil
|
||||
}
|
66
vendor/github.com/cznic/ql/vendored/github.com/camlistore/go4/lock/lock_freebsd.go
generated
vendored
Normal file
66
vendor/github.com/cznic/ql/vendored/github.com/camlistore/go4/lock/lock_freebsd.go
generated
vendored
Normal file
@ -0,0 +1,66 @@
|
||||
/*
|
||||
Copyright 2013 The Go Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package lock
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func init() {
|
||||
lockFn = lockFcntl
|
||||
}
|
||||
|
||||
func lockFcntl(name string) (io.Closer, error) {
|
||||
fi, err := os.Stat(name)
|
||||
if err == nil && fi.Size() > 0 {
|
||||
return nil, fmt.Errorf("can't Lock file %q: has non-zero size", name)
|
||||
}
|
||||
|
||||
f, err := os.Create(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// This type matches C's "struct flock" defined in /usr/include/fcntl.h.
|
||||
// TODO: move this into the standard syscall package.
|
||||
k := struct {
|
||||
Start int64 /* off_t starting offset */
|
||||
Len int64 /* off_t len = 0 means until end of file */
|
||||
Pid int32 /* pid_t lock owner */
|
||||
Type int16 /* short lock type: read/write, etc. */
|
||||
Whence int16 /* short type of l_start */
|
||||
Sysid int32 /* int remote system id or zero for local */
|
||||
}{
|
||||
Start: 0,
|
||||
Len: 0, // 0 means to lock the entire file.
|
||||
Pid: int32(os.Getpid()),
|
||||
Type: syscall.F_WRLCK,
|
||||
Whence: int16(os.SEEK_SET),
|
||||
Sysid: 0,
|
||||
}
|
||||
|
||||
_, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_SETLK), uintptr(unsafe.Pointer(&k)))
|
||||
if errno != 0 {
|
||||
f.Close()
|
||||
return nil, errno
|
||||
}
|
||||
return &unlocker{f: f, abs: name}, nil
|
||||
}
|
67
vendor/github.com/cznic/ql/vendored/github.com/camlistore/go4/lock/lock_linux_amd64.go
generated
vendored
Normal file
67
vendor/github.com/cznic/ql/vendored/github.com/camlistore/go4/lock/lock_linux_amd64.go
generated
vendored
Normal file
@ -0,0 +1,67 @@
|
||||
// +build linux,amd64
|
||||
// +build !appengine
|
||||
|
||||
/*
|
||||
Copyright 2013 The Go Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package lock
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func init() {
|
||||
lockFn = lockFcntl
|
||||
}
|
||||
|
||||
func lockFcntl(name string) (io.Closer, error) {
|
||||
fi, err := os.Stat(name)
|
||||
if err == nil && fi.Size() > 0 {
|
||||
return nil, fmt.Errorf("can't Lock file %q: has non-zero size", name)
|
||||
}
|
||||
|
||||
f, err := os.Create(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// This type matches C's "struct flock" defined in /usr/include/bits/fcntl.h.
|
||||
// TODO: move this into the standard syscall package.
|
||||
k := struct {
|
||||
Type uint32
|
||||
Whence uint32
|
||||
Start uint64
|
||||
Len uint64
|
||||
Pid uint32
|
||||
}{
|
||||
Type: syscall.F_WRLCK,
|
||||
Whence: uint32(os.SEEK_SET),
|
||||
Start: 0,
|
||||
Len: 0, // 0 means to lock the entire file.
|
||||
Pid: uint32(os.Getpid()),
|
||||
}
|
||||
|
||||
_, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_SETLK), uintptr(unsafe.Pointer(&k)))
|
||||
if errno != 0 {
|
||||
f.Close()
|
||||
return nil, errno
|
||||
}
|
||||
return &unlocker{f: f, abs: name}, nil
|
||||
}
|
68
vendor/github.com/cznic/ql/vendored/github.com/camlistore/go4/lock/lock_linux_arm.go
generated
vendored
Normal file
68
vendor/github.com/cznic/ql/vendored/github.com/camlistore/go4/lock/lock_linux_arm.go
generated
vendored
Normal file
@ -0,0 +1,68 @@
|
||||
// +build linux,arm
|
||||
// +build !appengine
|
||||
|
||||
/*
|
||||
Copyright 2013 The Go Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package lock
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func init() {
|
||||
lockFn = lockFcntl
|
||||
}
|
||||
|
||||
func lockFcntl(name string) (io.Closer, error) {
|
||||
fi, err := os.Stat(name)
|
||||
if err == nil && fi.Size() > 0 {
|
||||
return nil, fmt.Errorf("can't Lock file %q: has non-zero size", name)
|
||||
}
|
||||
|
||||
f, err := os.Create(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// This type matches C's "struct flock" defined in /usr/include/bits/fcntl.h.
|
||||
// TODO: move this into the standard syscall package.
|
||||
k := struct {
|
||||
Type uint16
|
||||
Whence uint16
|
||||
Start uint32
|
||||
Len uint32
|
||||
Pid uint32
|
||||
}{
|
||||
Type: syscall.F_WRLCK,
|
||||
Whence: uint16(os.SEEK_SET),
|
||||
Start: 0,
|
||||
Len: 0, // 0 means to lock the entire file.
|
||||
Pid: uint32(os.Getpid()),
|
||||
}
|
||||
|
||||
const F_SETLK = 6 // actual value. syscall package is wrong: golang.org/issue/7059
|
||||
_, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(F_SETLK), uintptr(unsafe.Pointer(&k)))
|
||||
if errno != 0 {
|
||||
f.Close()
|
||||
return nil, errno
|
||||
}
|
||||
return &unlocker{f: f, abs: name}, nil
|
||||
}
|
41
vendor/github.com/cznic/ql/vendored/github.com/camlistore/go4/lock/lock_plan9.go
generated
vendored
Normal file
41
vendor/github.com/cznic/ql/vendored/github.com/camlistore/go4/lock/lock_plan9.go
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
/*
|
||||
Copyright 2013 The Go Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package lock
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
func init() {
|
||||
lockFn = lockPlan9
|
||||
}
|
||||
|
||||
func lockPlan9(name string) (io.Closer, error) {
|
||||
fi, err := os.Stat(name)
|
||||
if err == nil && fi.Size() > 0 {
|
||||
return nil, fmt.Errorf("can't Lock file %q: has non-zero size", name)
|
||||
}
|
||||
|
||||
f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE, os.ModeExclusive|0644)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Lock Create of %s failed: %v", name, err)
|
||||
}
|
||||
|
||||
return &unlocker{f: f, abs: name}, nil
|
||||
}
|
26
vendor/github.com/cznic/ql/vendored/github.com/camlistore/go4/lock/lock_sigzero.go
generated
vendored
Normal file
26
vendor/github.com/cznic/ql/vendored/github.com/camlistore/go4/lock/lock_sigzero.go
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
// +build !appengine
|
||||
// +build linux darwin freebsd openbsd netbsd dragonfly
|
||||
|
||||
/*
|
||||
Copyright 2013 The Go Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package lock
|
||||
|
||||
import "syscall"
|
||||
|
||||
func init() {
|
||||
signalZero = syscall.Signal(0)
|
||||
}
|
324
vendor/github.com/cznic/ql/vendored/github.com/cznic/exp/lldb/2pc.go
generated
vendored
Normal file
324
vendor/github.com/cznic/ql/vendored/github.com/cznic/exp/lldb/2pc.go
generated
vendored
Normal file
@ -0,0 +1,324 @@
|
||||
// Copyright 2014 The lldb Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Two Phase Commit & Structural ACID
|
||||
|
||||
package lldb
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/cznic/fileutil"
|
||||
"github.com/cznic/mathutil"
|
||||
)
|
||||
|
||||
var _ Filer = &ACIDFiler0{} // Ensure ACIDFiler0 is a Filer
|
||||
|
||||
type acidWrite struct {
|
||||
b []byte
|
||||
off int64
|
||||
}
|
||||
|
||||
type acidWriter0 ACIDFiler0
|
||||
|
||||
func (a *acidWriter0) WriteAt(b []byte, off int64) (n int, err error) {
|
||||
f := (*ACIDFiler0)(a)
|
||||
if f.bwal == nil { // new epoch
|
||||
f.data = f.data[:0]
|
||||
f.bwal = bufio.NewWriter(f.wal)
|
||||
if err = a.writePacket([]interface{}{wpt00Header, walTypeACIDFiler0, ""}); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err = a.writePacket([]interface{}{wpt00WriteData, b, off}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
f.data = append(f.data, acidWrite{b, off})
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
func (a *acidWriter0) writePacket(items []interface{}) (err error) {
|
||||
f := (*ACIDFiler0)(a)
|
||||
b, err := EncodeScalars(items...)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var b4 [4]byte
|
||||
binary.BigEndian.PutUint32(b4[:], uint32(len(b)))
|
||||
if _, err = f.bwal.Write(b4[:]); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = f.bwal.Write(b); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if m := (4 + len(b)) % 16; m != 0 {
|
||||
var pad [15]byte
|
||||
_, err = f.bwal.Write(pad[:16-m])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// WAL Packet Tags
|
||||
const (
|
||||
wpt00Header = iota
|
||||
wpt00WriteData
|
||||
wpt00Checkpoint
|
||||
)
|
||||
|
||||
const (
|
||||
walTypeACIDFiler0 = iota
|
||||
)
|
||||
|
||||
// ACIDFiler0 is a very simple, synchronous implementation of 2PC. It uses a
|
||||
// single write ahead log file to provide the structural atomicity
|
||||
// (BeginUpdate/EndUpdate/Rollback) and durability (DB can be recovered from
|
||||
// WAL if a crash occurred).
|
||||
//
|
||||
// ACIDFiler0 is a Filer.
|
||||
//
|
||||
// NOTE: Durable synchronous 2PC involves three fsyncs in this implementation
|
||||
// (WAL, DB, zero truncated WAL). Where possible, it's recommended to collect
|
||||
// transactions for, say one second before performing the two phase commit as
|
||||
// the typical performance for rotational hard disks is about few tens of
|
||||
// fsyncs per second atmost. For an example of such collective transaction
|
||||
// approach please see the colecting FSM STT in Dbm's documentation[1].
|
||||
//
|
||||
// [1]: http://godoc.org/github.com/cznic/exp/dbm
|
||||
type ACIDFiler0 struct {
|
||||
*RollbackFiler
|
||||
wal *os.File
|
||||
bwal *bufio.Writer
|
||||
data []acidWrite
|
||||
testHook bool // keeps WAL untruncated (once)
|
||||
peakWal int64 // tracks WAL maximum used size
|
||||
peakBitFilerPages int // track maximum transaction memory
|
||||
}
|
||||
|
||||
// NewACIDFiler0 returns a newly created ACIDFiler0 with WAL in wal.
|
||||
//
|
||||
// If the WAL is zero sized then a previous clean shutdown of db is taken for
|
||||
// granted and no recovery procedure is taken.
|
||||
//
|
||||
// If the WAL is of non zero size then it is checked for having a
|
||||
// commited/fully finished transaction not yet been reflected in db. If such
|
||||
// transaction exists it's committed to db. If the recovery process finishes
|
||||
// successfully, the WAL is truncated to zero size and fsync'ed prior to return
|
||||
// from NewACIDFiler0.
|
||||
func NewACIDFiler(db Filer, wal *os.File) (r *ACIDFiler0, err error) {
|
||||
fi, err := wal.Stat()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
r = &ACIDFiler0{wal: wal}
|
||||
|
||||
if fi.Size() != 0 {
|
||||
if err = r.recoverDb(db); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
acidWriter := (*acidWriter0)(r)
|
||||
|
||||
if r.RollbackFiler, err = NewRollbackFiler(
|
||||
db,
|
||||
func(sz int64) (err error) {
|
||||
// Checkpoint
|
||||
if err = acidWriter.writePacket([]interface{}{wpt00Checkpoint, sz}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = r.bwal.Flush(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
r.bwal = nil
|
||||
|
||||
if err = r.wal.Sync(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
wfi, err := r.wal.Stat()
|
||||
switch err != nil {
|
||||
case true:
|
||||
// unexpected, but ignored
|
||||
case false:
|
||||
r.peakWal = mathutil.MaxInt64(wfi.Size(), r.peakWal)
|
||||
}
|
||||
|
||||
// Phase 1 commit complete
|
||||
|
||||
for _, v := range r.data {
|
||||
if _, err := db.WriteAt(v.b, v.off); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err = db.Truncate(sz); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = db.Sync(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Phase 2 commit complete
|
||||
|
||||
if !r.testHook {
|
||||
if err = r.wal.Truncate(0); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = r.wal.Seek(0, 0); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
r.testHook = false
|
||||
return r.wal.Sync()
|
||||
|
||||
},
|
||||
acidWriter,
|
||||
); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// PeakWALSize reports the maximum size WAL has ever used.
|
||||
func (a ACIDFiler0) PeakWALSize() int64 {
|
||||
return a.peakWal
|
||||
}
|
||||
|
||||
func (a *ACIDFiler0) readPacket(f *bufio.Reader) (items []interface{}, err error) {
|
||||
var b4 [4]byte
|
||||
n, err := io.ReadAtLeast(f, b4[:], 4)
|
||||
if n != 4 {
|
||||
return
|
||||
}
|
||||
|
||||
ln := int(binary.BigEndian.Uint32(b4[:]))
|
||||
m := (4 + ln) % 16
|
||||
padd := (16 - m) % 16
|
||||
b := make([]byte, ln+padd)
|
||||
if n, err = io.ReadAtLeast(f, b, len(b)); n != len(b) {
|
||||
return
|
||||
}
|
||||
|
||||
return DecodeScalars(b[:ln])
|
||||
}
|
||||
|
||||
func (a *ACIDFiler0) recoverDb(db Filer) (err error) {
|
||||
fi, err := a.wal.Stat()
|
||||
if err != nil {
|
||||
return &ErrILSEQ{Type: ErrInvalidWAL, Name: a.wal.Name(), More: err}
|
||||
}
|
||||
|
||||
if sz := fi.Size(); sz%16 != 0 {
|
||||
return &ErrILSEQ{Type: ErrFileSize, Name: a.wal.Name(), Arg: sz}
|
||||
}
|
||||
|
||||
f := bufio.NewReader(a.wal)
|
||||
items, err := a.readPacket(f)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if len(items) != 3 || items[0] != int64(wpt00Header) || items[1] != int64(walTypeACIDFiler0) {
|
||||
return &ErrILSEQ{Type: ErrInvalidWAL, Name: a.wal.Name(), More: fmt.Sprintf("invalid packet items %#v", items)}
|
||||
}
|
||||
|
||||
tr := NewBTree(nil)
|
||||
|
||||
for {
|
||||
items, err = a.readPacket(f)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if len(items) < 2 {
|
||||
return &ErrILSEQ{Type: ErrInvalidWAL, Name: a.wal.Name(), More: fmt.Sprintf("too few packet items %#v", items)}
|
||||
}
|
||||
|
||||
switch items[0] {
|
||||
case int64(wpt00WriteData):
|
||||
if len(items) != 3 {
|
||||
return &ErrILSEQ{Type: ErrInvalidWAL, Name: a.wal.Name(), More: fmt.Sprintf("invalid data packet items %#v", items)}
|
||||
}
|
||||
|
||||
b, off := items[1].([]byte), items[2].(int64)
|
||||
var key [8]byte
|
||||
binary.BigEndian.PutUint64(key[:], uint64(off))
|
||||
if err = tr.Set(key[:], b); err != nil {
|
||||
return
|
||||
}
|
||||
case int64(wpt00Checkpoint):
|
||||
var b1 [1]byte
|
||||
if n, err := f.Read(b1[:]); n != 0 || err == nil {
|
||||
return &ErrILSEQ{Type: ErrInvalidWAL, Name: a.wal.Name(), More: fmt.Sprintf("checkpoint n %d, err %v", n, err)}
|
||||
}
|
||||
|
||||
if len(items) != 2 {
|
||||
return &ErrILSEQ{Type: ErrInvalidWAL, Name: a.wal.Name(), More: fmt.Sprintf("checkpoint packet invalid items %#v", items)}
|
||||
}
|
||||
|
||||
sz := items[1].(int64)
|
||||
enum, err := tr.seekFirst()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for {
|
||||
k, v, err := enum.current()
|
||||
if err != nil {
|
||||
if fileutil.IsEOF(err) {
|
||||
break
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = db.WriteAt(v, int64(binary.BigEndian.Uint64(k))); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = enum.next(); err != nil {
|
||||
if fileutil.IsEOF(err) {
|
||||
break
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err = db.Truncate(sz); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = db.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Recovery complete
|
||||
|
||||
if err = a.wal.Truncate(0); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return a.wal.Sync()
|
||||
default:
|
||||
return &ErrILSEQ{Type: ErrInvalidWAL, Name: a.wal.Name(), More: fmt.Sprintf("packet tag %v", items[0])}
|
||||
}
|
||||
}
|
||||
}
|
44
vendor/github.com/cznic/ql/vendored/github.com/cznic/exp/lldb/2pc_docs.go
generated
vendored
Normal file
44
vendor/github.com/cznic/ql/vendored/github.com/cznic/exp/lldb/2pc_docs.go
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
// Copyright 2014 The lldb Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
|
||||
Anatomy of a WAL file
|
||||
|
||||
WAL file
|
||||
A sequence of packets
|
||||
|
||||
WAL packet, parts in slice notation
|
||||
[0:4], 4 bytes: N uint32 // network byte order
|
||||
[4:4+N], N bytes: payload []byte // gb encoded scalars
|
||||
|
||||
Packets, including the 4 byte 'size' prefix, MUST BE padded to size == 0 (mod
|
||||
16). The values of the padding bytes MUST BE zero.
|
||||
|
||||
Encoded scalars first item is a packet type number (packet tag). The meaning of
|
||||
any other item(s) of the payload depends on the packet tag.
|
||||
|
||||
Packet definitions
|
||||
|
||||
{wpt00Header int, typ int, s string}
|
||||
typ: Must be zero (ACIDFiler0 file).
|
||||
s: Any comment string, empty string is okay.
|
||||
|
||||
This packet must be present only once - as the first packet of
|
||||
a WAL file.
|
||||
|
||||
{wpt00WriteData int, b []byte, off int64}
|
||||
Write data (WriteAt(b, off)).
|
||||
|
||||
{wpt00Checkpoint int, sz int64}
|
||||
Checkpoint (Truncate(sz)).
|
||||
|
||||
This packet must be present only once - as the last packet of
|
||||
a WAL file.
|
||||
|
||||
*/
|
||||
|
||||
package lldb
|
||||
|
||||
//TODO optimize bitfiler/wal/2pc data above final size
|
2320
vendor/github.com/cznic/ql/vendored/github.com/cznic/exp/lldb/btree.go
generated
vendored
Normal file
2320
vendor/github.com/cznic/ql/vendored/github.com/cznic/exp/lldb/btree.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
170
vendor/github.com/cznic/ql/vendored/github.com/cznic/exp/lldb/errors.go
generated
vendored
Normal file
170
vendor/github.com/cznic/ql/vendored/github.com/cznic/exp/lldb/errors.go
generated
vendored
Normal file
@ -0,0 +1,170 @@
|
||||
// Copyright 2014 The lldb Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Some errors returned by this package.
|
||||
//
|
||||
// Note that this package can return more errors than declared here, for
|
||||
// example io.EOF from Filer.ReadAt().
|
||||
|
||||
package lldb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// ErrDecodeScalars is possibly returned from DecodeScalars
|
||||
type ErrDecodeScalars struct {
|
||||
B []byte // Data being decoded
|
||||
I int // offending offset
|
||||
}
|
||||
|
||||
// Error implements the built in error type.
|
||||
func (e *ErrDecodeScalars) Error() string {
|
||||
return fmt.Sprintf("DecodeScalars: corrupted data @ %d/%d", e.I, len(e.B))
|
||||
}
|
||||
|
||||
// ErrINVAL reports invalid values passed as parameters, for example negative
|
||||
// offsets where only non-negative ones are allowed or read from the DB.
|
||||
type ErrINVAL struct {
|
||||
Src string
|
||||
Val interface{}
|
||||
}
|
||||
|
||||
// Error implements the built in error type.
|
||||
func (e *ErrINVAL) Error() string {
|
||||
return fmt.Sprintf("%s: %+v", e.Src, e.Val)
|
||||
}
|
||||
|
||||
// ErrPERM is for example reported when a Filer is closed while BeginUpdate(s)
|
||||
// are not balanced with EndUpdate(s)/Rollback(s) or when EndUpdate or Rollback
|
||||
// is invoked which is not paired with a BeginUpdate.
|
||||
type ErrPERM struct {
|
||||
Src string
|
||||
}
|
||||
|
||||
// Error implements the built in error type.
|
||||
func (e *ErrPERM) Error() string {
|
||||
return fmt.Sprintf("%s: Operation not permitted", string(e.Src))
|
||||
}
|
||||
|
||||
// ErrTag represents an ErrILSEQ kind.
|
||||
type ErrType int
|
||||
|
||||
// ErrILSEQ types
|
||||
const (
|
||||
ErrOther ErrType = iota
|
||||
|
||||
ErrAdjacentFree // Adjacent free blocks (.Off and .Arg)
|
||||
ErrDecompress // Used compressed block: corrupted compression
|
||||
ErrExpFreeTag // Expected a free block tag, got .Arg
|
||||
ErrExpUsedTag // Expected a used block tag, got .Arg
|
||||
ErrFLT // Free block is invalid or referenced multiple times
|
||||
ErrFLTLoad // FLT truncated to .Off, need size >= .Arg
|
||||
ErrFLTSize // Free block size (.Arg) doesn't belong to its list min size: .Arg2
|
||||
ErrFileSize // File .Name size (.Arg) != 0 (mod 16)
|
||||
ErrFreeChaining // Free block, .prev.next doesn't point back to this block
|
||||
ErrFreeTailBlock // Last block is free
|
||||
ErrHead // Head of a free block list has non zero Prev (.Arg)
|
||||
ErrInvalidRelocTarget // Reloc doesn't target (.Arg) a short or long used block
|
||||
ErrInvalidWAL // Corrupted write ahead log. .Name: file name, .More: more
|
||||
ErrLongFreeBlkTooLong // Long free block spans beyond EOF, size .Arg
|
||||
ErrLongFreeBlkTooShort // Long free block must have at least 2 atoms, got only .Arg
|
||||
ErrLongFreeNextBeyondEOF // Long free block .Next (.Arg) spans beyond EOF
|
||||
ErrLongFreePrevBeyondEOF // Long free block .Prev (.Arg) spans beyond EOF
|
||||
ErrLongFreeTailTag // Expected a long free block tail tag, got .Arg
|
||||
ErrLostFreeBlock // Free block is not in any FLT list
|
||||
ErrNullReloc // Used reloc block with nil target
|
||||
ErrRelocBeyondEOF // Used reloc points (.Arg) beyond EOF
|
||||
ErrShortFreeTailTag // Expected a short free block tail tag, got .Arg
|
||||
ErrSmall // Request for a free block (.Arg) returned a too small one (.Arg2) at .Off
|
||||
ErrTailTag // Block at .Off has invalid tail CC (compression code) tag, got .Arg
|
||||
ErrUnexpReloc // Unexpected reloc block referred to from reloc block .Arg
|
||||
ErrVerifyPadding // Used block has nonzero padding
|
||||
ErrVerifyTailSize // Long free block size .Arg but tail size .Arg2
|
||||
ErrVerifyUsedSpan // Used block size (.Arg) spans beyond EOF
|
||||
)
|
||||
|
||||
// ErrILSEQ reports a corrupted file format. Details in fields according to Type.
|
||||
type ErrILSEQ struct {
|
||||
Type ErrType
|
||||
Off int64
|
||||
Arg int64
|
||||
Arg2 int64
|
||||
Arg3 int64
|
||||
Name string
|
||||
More interface{}
|
||||
}
|
||||
|
||||
// Error implements the built in error type.
|
||||
func (e *ErrILSEQ) Error() string {
|
||||
switch e.Type {
|
||||
case ErrAdjacentFree:
|
||||
return fmt.Sprintf("Adjacent free blocks at offset %#x and %#x", e.Off, e.Arg)
|
||||
case ErrDecompress:
|
||||
return fmt.Sprintf("Compressed block at offset %#x: Corrupted compressed content", e.Off)
|
||||
case ErrExpFreeTag:
|
||||
return fmt.Sprintf("Block at offset %#x: Expected a free block tag, got %#2x", e.Off, e.Arg)
|
||||
case ErrExpUsedTag:
|
||||
return fmt.Sprintf("Block at ofset %#x: Expected a used block tag, got %#2x", e.Off, e.Arg)
|
||||
case ErrFLT:
|
||||
return fmt.Sprintf("Free block at offset %#x is invalid or referenced multiple times", e.Off)
|
||||
case ErrFLTLoad:
|
||||
return fmt.Sprintf("FLT truncated to size %d, expected at least %d", e.Off, e.Arg)
|
||||
case ErrFLTSize:
|
||||
return fmt.Sprintf("Free block at offset %#x has size (%#x) should be at least (%#x)", e.Off, e.Arg, e.Arg2)
|
||||
case ErrFileSize:
|
||||
return fmt.Sprintf("File %q size (%#x) != 0 (mod 16)", e.Name, e.Arg)
|
||||
case ErrFreeChaining:
|
||||
return fmt.Sprintf("Free block at offset %#x: .prev.next doesn point back here.", e.Off)
|
||||
case ErrFreeTailBlock:
|
||||
return fmt.Sprintf("Free block at offset %#x: Cannot be last file block", e.Off)
|
||||
case ErrHead:
|
||||
return fmt.Sprintf("Block at offset %#x: Head of free block list has non zero .prev %#x", e.Off, e.Arg)
|
||||
case ErrInvalidRelocTarget:
|
||||
return fmt.Sprintf("Used reloc block at offset %#x: Target (%#x) is not a short or long used block", e.Off, e.Arg)
|
||||
case ErrInvalidWAL:
|
||||
return fmt.Sprintf("Corrupted write ahead log file: %q %v", e.Name, e.More)
|
||||
case ErrLongFreeBlkTooLong:
|
||||
return fmt.Sprintf("Long free block at offset %#x: Size (%#x) beyond EOF", e.Off, e.Arg)
|
||||
case ErrLongFreeBlkTooShort:
|
||||
return fmt.Sprintf("Long free block at offset %#x: Size (%#x) too small", e.Off, e.Arg)
|
||||
case ErrLongFreeNextBeyondEOF:
|
||||
return fmt.Sprintf("Long free block at offset %#x: Next (%#x) points beyond EOF", e.Off, e.Arg)
|
||||
case ErrLongFreePrevBeyondEOF:
|
||||
return fmt.Sprintf("Long free block at offset %#x: Prev (%#x) points beyond EOF", e.Off, e.Arg)
|
||||
case ErrLongFreeTailTag:
|
||||
return fmt.Sprintf("Block at offset %#x: Expected long free tail tag, got %#2x", e.Off, e.Arg)
|
||||
case ErrLostFreeBlock:
|
||||
return fmt.Sprintf("Free block at offset %#x: not in any FLT list", e.Off)
|
||||
case ErrNullReloc:
|
||||
return fmt.Sprintf("Used reloc block at offset %#x: Nil target", e.Off)
|
||||
case ErrRelocBeyondEOF:
|
||||
return fmt.Sprintf("Used reloc block at offset %#x: Link (%#x) points beyond EOF", e.Off, e.Arg)
|
||||
case ErrShortFreeTailTag:
|
||||
return fmt.Sprintf("Block at offset %#x: Expected short free tail tag, got %#2x", e.Off, e.Arg)
|
||||
case ErrSmall:
|
||||
return fmt.Sprintf("Request for of free block of size %d returned a too small (%d) one at offset %#x", e.Arg, e.Arg2, e.Off)
|
||||
case ErrTailTag:
|
||||
return fmt.Sprintf("Block at offset %#x: Invalid tail CC tag, got %#2x", e.Off, e.Arg)
|
||||
case ErrUnexpReloc:
|
||||
return fmt.Sprintf("Block at offset %#x: Unexpected reloc block. Referred to from reloc block at offset %#x", e.Off, e.Arg)
|
||||
case ErrVerifyPadding:
|
||||
return fmt.Sprintf("Used block at offset %#x: Nonzero padding", e.Off)
|
||||
case ErrVerifyTailSize:
|
||||
return fmt.Sprintf("Long free block at offset %#x: Size %#x, but tail size %#x", e.Off, e.Arg, e.Arg2)
|
||||
case ErrVerifyUsedSpan:
|
||||
return fmt.Sprintf("Used block at offset %#x: Size %#x spans beyond EOF", e.Off, e.Arg)
|
||||
}
|
||||
|
||||
more := ""
|
||||
if e.More != nil {
|
||||
more = fmt.Sprintf(", %v", e.More)
|
||||
}
|
||||
off := ""
|
||||
if e.Off != 0 {
|
||||
off = fmt.Sprintf(", off: %#x", e.Off)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("Error%s%s", off, more)
|
||||
}
|
1981
vendor/github.com/cznic/ql/vendored/github.com/cznic/exp/lldb/falloc.go
generated
vendored
Normal file
1981
vendor/github.com/cznic/ql/vendored/github.com/cznic/exp/lldb/falloc.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
192
vendor/github.com/cznic/ql/vendored/github.com/cznic/exp/lldb/filer.go
generated
vendored
Normal file
192
vendor/github.com/cznic/ql/vendored/github.com/cznic/exp/lldb/filer.go
generated
vendored
Normal file
@ -0,0 +1,192 @@
|
||||
// Copyright 2014 The lldb Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// An abstraction of file like (persistent) storage with optional (abstracted)
|
||||
// support for structural integrity.
|
||||
|
||||
package lldb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/cznic/mathutil"
|
||||
)
|
||||
|
||||
func doubleTrouble(first, second error) error {
|
||||
return fmt.Errorf("%q. Additionally, while attempting to recover (rollback): %q", first, second)
|
||||
}
|
||||
|
||||
// A Filer is a []byte-like model of a file or similar entity. It may
|
||||
// optionally implement support for structural transaction safety. In contrast
|
||||
// to a file stream, a Filer is not sequentially accessible. ReadAt and WriteAt
|
||||
// are always "addressed" by an offset and are assumed to perform atomically.
|
||||
// A Filer is not safe for concurrent access, it's designed for consumption by
|
||||
// the other objects in package, which should use a Filer from one goroutine
|
||||
// only or via a mutex. BeginUpdate, EndUpdate and Rollback must be either all
|
||||
// implemented by a Filer for structural integrity - or they should be all
|
||||
// no-ops; where/if that requirement is relaxed.
|
||||
//
|
||||
// If a Filer wraps another Filer implementation, it usually invokes the same
|
||||
// methods on the "inner" one, after some possible argument translations etc.
|
||||
// If a Filer implements the structural transactions handling methods
|
||||
// (BeginUpdate, EndUpdate and Rollback) as no-ops _and_ wraps another Filer:
|
||||
// it then still MUST invoke those methods on the inner Filer. This is
|
||||
// important for the case where a RollbackFiler exists somewhere down the
|
||||
// chain. It's also important for an Allocator - to know when it must
|
||||
// invalidate its FLT cache.
|
||||
type Filer interface {
|
||||
// BeginUpdate increments the "nesting" counter (initially zero). Every
|
||||
// call to BeginUpdate must be eventually "balanced" by exactly one of
|
||||
// EndUpdate or Rollback. Calls to BeginUpdate may nest.
|
||||
BeginUpdate() error
|
||||
|
||||
// Analogous to os.File.Close().
|
||||
Close() error
|
||||
|
||||
// EndUpdate decrements the "nesting" counter. If it's zero after that
|
||||
// then assume the "storage" has reached structural integrity (after a
|
||||
// batch of partial updates). If a Filer implements some support for
|
||||
// that (write ahead log, journal, etc.) then the appropriate actions
|
||||
// are to be taken for nesting == 0. Invocation of an unbalanced
|
||||
// EndUpdate is an error.
|
||||
EndUpdate() error
|
||||
|
||||
// Analogous to os.File.Name().
|
||||
Name() string
|
||||
|
||||
// PunchHole deallocates space inside a "file" in the byte range
|
||||
// starting at off and continuing for size bytes. The actual hole
|
||||
// created by PunchHole may be smaller than requested. The Filer size
|
||||
// (as reported by `Size()` does not change when hole punching, even
|
||||
// when punching the end of a file off. In contrast to the Linux
|
||||
// implementation of FALLOC_FL_PUNCH_HOLE in `fallocate`(2); a Filer is
|
||||
// free not only to ignore `PunchHole()` (implement it as a nop), but
|
||||
// additionally no guarantees about the content of the hole, when
|
||||
// eventually read back, are required, i.e. any data, not only zeros,
|
||||
// can be read from the "hole", including just anything what was left
|
||||
// there - with all of the possible security problems.
|
||||
PunchHole(off, size int64) error
|
||||
|
||||
// As os.File.ReadAt. Note: `off` is an absolute "file pointer"
|
||||
// address and cannot be negative even when a Filer is a InnerFiler.
|
||||
ReadAt(b []byte, off int64) (n int, err error)
|
||||
|
||||
// Rollback cancels and undoes the innermost pending update level.
|
||||
// Rollback decrements the "nesting" counter. If a Filer implements
|
||||
// some support for keeping structural integrity (write ahead log,
|
||||
// journal, etc.) then the appropriate actions are to be taken.
|
||||
// Invocation of an unbalanced Rollback is an error.
|
||||
Rollback() error
|
||||
|
||||
// Analogous to os.File.FileInfo().Size().
|
||||
Size() (int64, error)
|
||||
|
||||
// Analogous to os.Sync().
|
||||
Sync() (err error)
|
||||
|
||||
// Analogous to os.File.Truncate().
|
||||
Truncate(size int64) error
|
||||
|
||||
// Analogous to os.File.WriteAt(). Note: `off` is an absolute "file
|
||||
// pointer" address and cannot be negative even when a Filer is a
|
||||
// InnerFiler.
|
||||
WriteAt(b []byte, off int64) (n int, err error)
|
||||
}
|
||||
|
||||
var _ Filer = &InnerFiler{} // Ensure InnerFiler is a Filer.
|
||||
|
||||
// A InnerFiler is a Filer with added addressing/size translation.
|
||||
type InnerFiler struct {
|
||||
outer Filer
|
||||
off int64
|
||||
}
|
||||
|
||||
// NewInnerFiler returns a new InnerFiler wrapped by `outer` in a way which
|
||||
// adds `off` to every access.
|
||||
//
|
||||
// For example, considering:
|
||||
//
|
||||
// inner := NewInnerFiler(outer, 10)
|
||||
//
|
||||
// then
|
||||
//
|
||||
// inner.WriteAt([]byte{42}, 4)
|
||||
//
|
||||
// translates to
|
||||
//
|
||||
// outer.WriteAt([]byte{42}, 14)
|
||||
//
|
||||
// But an attempt to emulate
|
||||
//
|
||||
// outer.WriteAt([]byte{17}, 9)
|
||||
//
|
||||
// by
|
||||
//
|
||||
// inner.WriteAt([]byte{17}, -1)
|
||||
//
|
||||
// will fail as the `off` parameter can never be < 0. Also note that
|
||||
//
|
||||
// inner.Size() == outer.Size() - off,
|
||||
//
|
||||
// i.e. `inner` pretends no `outer` exists. Finally, after e.g.
|
||||
//
|
||||
// inner.Truncate(7)
|
||||
// outer.Size() == 17
|
||||
//
|
||||
// will be true.
|
||||
func NewInnerFiler(outer Filer, off int64) *InnerFiler { return &InnerFiler{outer, off} }
|
||||
|
||||
// BeginUpdate implements Filer.
|
||||
func (f *InnerFiler) BeginUpdate() error { return f.outer.BeginUpdate() }
|
||||
|
||||
// Close implements Filer.
|
||||
func (f *InnerFiler) Close() (err error) { return f.outer.Close() }
|
||||
|
||||
// EndUpdate implements Filer.
|
||||
func (f *InnerFiler) EndUpdate() error { return f.outer.EndUpdate() }
|
||||
|
||||
// Name implements Filer.
|
||||
func (f *InnerFiler) Name() string { return f.outer.Name() }
|
||||
|
||||
// PunchHole implements Filer. `off`, `size` must be >= 0.
|
||||
func (f *InnerFiler) PunchHole(off, size int64) error { return f.outer.PunchHole(f.off+off, size) }
|
||||
|
||||
// ReadAt implements Filer. `off` must be >= 0.
|
||||
func (f *InnerFiler) ReadAt(b []byte, off int64) (n int, err error) {
|
||||
if off < 0 {
|
||||
return 0, &ErrINVAL{f.outer.Name() + ":ReadAt invalid off", off}
|
||||
}
|
||||
|
||||
return f.outer.ReadAt(b, f.off+off)
|
||||
}
|
||||
|
||||
// Rollback implements Filer.
|
||||
func (f *InnerFiler) Rollback() error { return f.outer.Rollback() }
|
||||
|
||||
// Size implements Filer.
|
||||
func (f *InnerFiler) Size() (int64, error) {
|
||||
sz, err := f.outer.Size()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return mathutil.MaxInt64(sz-f.off, 0), nil
|
||||
}
|
||||
|
||||
// Sync() implements Filer.
|
||||
func (f *InnerFiler) Sync() (err error) {
|
||||
return f.outer.Sync()
|
||||
}
|
||||
|
||||
// Truncate implements Filer.
|
||||
func (f *InnerFiler) Truncate(size int64) error { return f.outer.Truncate(size + f.off) }
|
||||
|
||||
// WriteAt implements Filer. `off` must be >= 0.
|
||||
func (f *InnerFiler) WriteAt(b []byte, off int64) (n int, err error) {
|
||||
if off < 0 {
|
||||
return 0, &ErrINVAL{f.outer.Name() + ":WriteAt invalid off", off}
|
||||
}
|
||||
|
||||
return f.outer.WriteAt(b, f.off+off)
|
||||
}
|
812
vendor/github.com/cznic/ql/vendored/github.com/cznic/exp/lldb/gb.go
generated
vendored
Normal file
812
vendor/github.com/cznic/ql/vendored/github.com/cznic/exp/lldb/gb.go
generated
vendored
Normal file
@ -0,0 +1,812 @@
|
||||
// Copyright 2014 The lldb Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Utilities to encode/decode and collate Go predeclared scalar types (and the
|
||||
// typeless nil and []byte). The encoding format is a variation of the one
|
||||
// used by the "encoding/gob" package.
|
||||
|
||||
package lldb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"github.com/cznic/mathutil"
|
||||
)
|
||||
|
||||
const (
|
||||
gbNull = iota // 0x00
|
||||
gbFalse // 0x01
|
||||
gbTrue // 0x02
|
||||
gbFloat0 // 0x03
|
||||
gbFloat1 // 0x04
|
||||
gbFloat2 // 0x05
|
||||
gbFloat3 // 0x06
|
||||
gbFloat4 // 0x07
|
||||
gbFloat5 // 0x08
|
||||
gbFloat6 // 0x09
|
||||
gbFloat7 // 0x0a
|
||||
gbFloat8 // 0x0b
|
||||
gbComplex0 // 0x0c
|
||||
gbComplex1 // 0x0d
|
||||
gbComplex2 // 0x0e
|
||||
gbComplex3 // 0x0f
|
||||
gbComplex4 // 0x10
|
||||
gbComplex5 // 0x11
|
||||
gbComplex6 // 0x12
|
||||
gbComplex7 // 0x13
|
||||
gbComplex8 // 0x14
|
||||
gbBytes00 // 0x15
|
||||
gbBytes01 // 0x16
|
||||
gbBytes02 // 0x17
|
||||
gbBytes03 // 0x18
|
||||
gbBytes04 // 0x19
|
||||
gbBytes05 // 0x1a
|
||||
gbBytes06 // 0x1b
|
||||
gbBytes07 // 0x1c
|
||||
gbBytes08 // 0x1d
|
||||
gbBytes09 // 0x1e
|
||||
gbBytes10 // 0x1f
|
||||
gbBytes11 // 0x20
|
||||
gbBytes12 // 0x21
|
||||
gbBytes13 // 0x22
|
||||
gbBytes14 // 0x23
|
||||
gbBytes15 // 0x24
|
||||
gbBytes16 // 0x25
|
||||
gbBytes17 // Ox26
|
||||
gbBytes1 // 0x27
|
||||
gbBytes2 // 0x28: Offset by one to allow 64kB sized []byte.
|
||||
gbString00 // 0x29
|
||||
gbString01 // 0x2a
|
||||
gbString02 // 0x2b
|
||||
gbString03 // 0x2c
|
||||
gbString04 // 0x2d
|
||||
gbString05 // 0x2e
|
||||
gbString06 // 0x2f
|
||||
gbString07 // 0x30
|
||||
gbString08 // 0x31
|
||||
gbString09 // 0x32
|
||||
gbString10 // 0x33
|
||||
gbString11 // 0x34
|
||||
gbString12 // 0x35
|
||||
gbString13 // 0x36
|
||||
gbString14 // 0x37
|
||||
gbString15 // 0x38
|
||||
gbString16 // 0x39
|
||||
gbString17 // 0x3a
|
||||
gbString1 // 0x3b
|
||||
gbString2 // 0x3c
|
||||
gbUintP1 // 0x3d
|
||||
gbUintP2 // 0x3e
|
||||
gbUintP3 // 0x3f
|
||||
gbUintP4 // 0x40
|
||||
gbUintP5 // 0x41
|
||||
gbUintP6 // 0x42
|
||||
gbUintP7 // 0x43
|
||||
gbUintP8 // 0x44
|
||||
gbIntM8 // 0x45
|
||||
gbIntM7 // 0x46
|
||||
gbIntM6 // 0x47
|
||||
gbIntM5 // 0x48
|
||||
gbIntM4 // 0x49
|
||||
gbIntM3 // 0x4a
|
||||
gbIntM2 // 0x4b
|
||||
gbIntM1 // 0x4c
|
||||
gbIntP1 // 0x4d
|
||||
gbIntP2 // 0x4e
|
||||
gbIntP3 // 0x4f
|
||||
gbIntP4 // 0x50
|
||||
gbIntP5 // 0x51
|
||||
gbIntP6 // 0x52
|
||||
gbIntP7 // 0x53
|
||||
gbIntP8 // 0x54
|
||||
gbInt0 // 0x55
|
||||
|
||||
gbIntMax = 255 - gbInt0 // 0xff == 170
|
||||
)
|
||||
|
||||
// EncodeScalars encodes a vector of predeclared scalar type values to a
|
||||
// []byte, making it suitable to store it as a "record" in a DB or to use it as
|
||||
// a key of a BTree.
|
||||
func EncodeScalars(scalars ...interface{}) (b []byte, err error) {
|
||||
for _, scalar := range scalars {
|
||||
switch x := scalar.(type) {
|
||||
default:
|
||||
return nil, &ErrINVAL{"EncodeScalars: unsupported type", fmt.Sprintf("%T in `%#v`", x, scalars)}
|
||||
|
||||
case nil:
|
||||
b = append(b, gbNull)
|
||||
|
||||
case bool:
|
||||
switch x {
|
||||
case false:
|
||||
b = append(b, gbFalse)
|
||||
case true:
|
||||
b = append(b, gbTrue)
|
||||
}
|
||||
|
||||
case float32:
|
||||
encFloat(float64(x), &b)
|
||||
case float64:
|
||||
encFloat(x, &b)
|
||||
|
||||
case complex64:
|
||||
encComplex(complex128(x), &b)
|
||||
case complex128:
|
||||
encComplex(x, &b)
|
||||
|
||||
case string:
|
||||
n := len(x)
|
||||
if n <= 17 {
|
||||
b = append(b, byte(gbString00+n))
|
||||
b = append(b, []byte(x)...)
|
||||
break
|
||||
}
|
||||
|
||||
if n > 65535 {
|
||||
return nil, fmt.Errorf("EncodeScalars: cannot encode string of length %d (limit 65536)", n)
|
||||
}
|
||||
|
||||
pref := byte(gbString1)
|
||||
if n > 255 {
|
||||
pref++
|
||||
}
|
||||
b = append(b, pref)
|
||||
encUint0(uint64(n), &b)
|
||||
b = append(b, []byte(x)...)
|
||||
|
||||
case int8:
|
||||
encInt(int64(x), &b)
|
||||
case int16:
|
||||
encInt(int64(x), &b)
|
||||
case int32:
|
||||
encInt(int64(x), &b)
|
||||
case int64:
|
||||
encInt(x, &b)
|
||||
case int:
|
||||
encInt(int64(x), &b)
|
||||
|
||||
case uint8:
|
||||
encUint(uint64(x), &b)
|
||||
case uint16:
|
||||
encUint(uint64(x), &b)
|
||||
case uint32:
|
||||
encUint(uint64(x), &b)
|
||||
case uint64:
|
||||
encUint(x, &b)
|
||||
case uint:
|
||||
encUint(uint64(x), &b)
|
||||
case []byte:
|
||||
n := len(x)
|
||||
if n <= 17 {
|
||||
b = append(b, byte(gbBytes00+n))
|
||||
b = append(b, []byte(x)...)
|
||||
break
|
||||
}
|
||||
|
||||
if n > 655356 {
|
||||
return nil, fmt.Errorf("EncodeScalars: cannot encode []byte of length %d (limit 65536)", n)
|
||||
}
|
||||
|
||||
pref := byte(gbBytes1)
|
||||
if n > 255 {
|
||||
pref++
|
||||
}
|
||||
b = append(b, pref)
|
||||
if n <= 255 {
|
||||
b = append(b, byte(n))
|
||||
} else {
|
||||
n--
|
||||
b = append(b, byte(n>>8), byte(n))
|
||||
}
|
||||
b = append(b, x...)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func encComplex(f complex128, b *[]byte) {
|
||||
encFloatPrefix(gbComplex0, real(f), b)
|
||||
encFloatPrefix(gbComplex0, imag(f), b)
|
||||
}
|
||||
|
||||
func encFloatPrefix(prefix byte, f float64, b *[]byte) {
|
||||
u := math.Float64bits(f)
|
||||
var n uint64
|
||||
for i := 0; i < 8; i++ {
|
||||
n <<= 8
|
||||
n |= u & 0xFF
|
||||
u >>= 8
|
||||
}
|
||||
bits := mathutil.BitLenUint64(n)
|
||||
if bits == 0 {
|
||||
*b = append(*b, prefix)
|
||||
return
|
||||
}
|
||||
|
||||
// 0 1 2 3 4 5 6 7 8 9
|
||||
// . 1 1 1 1 1 1 1 1 2
|
||||
encUintPrefix(prefix+1+byte((bits-1)>>3), n, b)
|
||||
}
|
||||
|
||||
func encFloat(f float64, b *[]byte) {
|
||||
encFloatPrefix(gbFloat0, f, b)
|
||||
}
|
||||
|
||||
func encUint0(n uint64, b *[]byte) {
|
||||
switch {
|
||||
case n <= 0xff:
|
||||
*b = append(*b, byte(n))
|
||||
case n <= 0xffff:
|
||||
*b = append(*b, byte(n>>8), byte(n))
|
||||
case n <= 0xffffff:
|
||||
*b = append(*b, byte(n>>16), byte(n>>8), byte(n))
|
||||
case n <= 0xffffffff:
|
||||
*b = append(*b, byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
|
||||
case n <= 0xffffffffff:
|
||||
*b = append(*b, byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
|
||||
case n <= 0xffffffffffff:
|
||||
*b = append(*b, byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
|
||||
case n <= 0xffffffffffffff:
|
||||
*b = append(*b, byte(n>>48), byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
|
||||
case n <= math.MaxUint64:
|
||||
*b = append(*b, byte(n>>56), byte(n>>48), byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
|
||||
}
|
||||
}
|
||||
|
||||
func encUintPrefix(prefix byte, n uint64, b *[]byte) {
|
||||
*b = append(*b, prefix)
|
||||
encUint0(n, b)
|
||||
}
|
||||
|
||||
func encUint(n uint64, b *[]byte) {
|
||||
bits := mathutil.Max(1, mathutil.BitLenUint64(n))
|
||||
encUintPrefix(gbUintP1+byte((bits-1)>>3), n, b)
|
||||
}
|
||||
|
||||
func encInt(n int64, b *[]byte) {
|
||||
switch {
|
||||
case n < -0x100000000000000:
|
||||
*b = append(*b, byte(gbIntM8), byte(n>>56), byte(n>>48), byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
|
||||
case n < -0x1000000000000:
|
||||
*b = append(*b, byte(gbIntM7), byte(n>>48), byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
|
||||
case n < -0x10000000000:
|
||||
*b = append(*b, byte(gbIntM6), byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
|
||||
case n < -0x100000000:
|
||||
*b = append(*b, byte(gbIntM5), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
|
||||
case n < -0x1000000:
|
||||
*b = append(*b, byte(gbIntM4), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
|
||||
case n < -0x10000:
|
||||
*b = append(*b, byte(gbIntM3), byte(n>>16), byte(n>>8), byte(n))
|
||||
case n < -0x100:
|
||||
*b = append(*b, byte(gbIntM2), byte(n>>8), byte(n))
|
||||
case n < 0:
|
||||
*b = append(*b, byte(gbIntM1), byte(n))
|
||||
case n <= gbIntMax:
|
||||
*b = append(*b, byte(gbInt0+n))
|
||||
case n <= 0xff:
|
||||
*b = append(*b, gbIntP1, byte(n))
|
||||
case n <= 0xffff:
|
||||
*b = append(*b, gbIntP2, byte(n>>8), byte(n))
|
||||
case n <= 0xffffff:
|
||||
*b = append(*b, gbIntP3, byte(n>>16), byte(n>>8), byte(n))
|
||||
case n <= 0xffffffff:
|
||||
*b = append(*b, gbIntP4, byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
|
||||
case n <= 0xffffffffff:
|
||||
*b = append(*b, gbIntP5, byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
|
||||
case n <= 0xffffffffffff:
|
||||
*b = append(*b, gbIntP6, byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
|
||||
case n <= 0xffffffffffffff:
|
||||
*b = append(*b, gbIntP7, byte(n>>48), byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
|
||||
case n <= 0x7fffffffffffffff:
|
||||
*b = append(*b, gbIntP8, byte(n>>56), byte(n>>48), byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
|
||||
}
|
||||
}
|
||||
|
||||
func decodeFloat(b []byte) float64 {
|
||||
var u uint64
|
||||
for i, v := range b {
|
||||
u |= uint64(v) << uint((i+8-len(b))*8)
|
||||
}
|
||||
return math.Float64frombits(u)
|
||||
}
|
||||
|
||||
// DecodeScalars decodes a []byte produced by EncodeScalars.
|
||||
func DecodeScalars(b []byte) (scalars []interface{}, err error) {
|
||||
b0 := b
|
||||
for len(b) != 0 {
|
||||
switch tag := b[0]; tag {
|
||||
//default:
|
||||
//return nil, fmt.Errorf("tag %d(%#x) not supported", b[0], b[0])
|
||||
case gbNull:
|
||||
scalars = append(scalars, nil)
|
||||
b = b[1:]
|
||||
case gbFalse:
|
||||
scalars = append(scalars, false)
|
||||
b = b[1:]
|
||||
case gbTrue:
|
||||
scalars = append(scalars, true)
|
||||
b = b[1:]
|
||||
case gbFloat0:
|
||||
scalars = append(scalars, 0.0)
|
||||
b = b[1:]
|
||||
case gbFloat1, gbFloat2, gbFloat3, gbFloat4, gbFloat5, gbFloat6, gbFloat7, gbFloat8:
|
||||
n := 1 + int(tag) - gbFloat0
|
||||
if len(b) < n-1 {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
scalars = append(scalars, decodeFloat(b[1:n]))
|
||||
b = b[n:]
|
||||
case gbComplex0, gbComplex1, gbComplex2, gbComplex3, gbComplex4, gbComplex5, gbComplex6, gbComplex7, gbComplex8:
|
||||
n := 1 + int(tag) - gbComplex0
|
||||
if len(b) < n-1 {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
re := decodeFloat(b[1:n])
|
||||
b = b[n:]
|
||||
|
||||
if len(b) == 0 {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
tag = b[0]
|
||||
if tag < gbComplex0 || tag > gbComplex8 {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
n = 1 + int(tag) - gbComplex0
|
||||
if len(b) < n-1 {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
scalars = append(scalars, complex(re, decodeFloat(b[1:n])))
|
||||
b = b[n:]
|
||||
case gbBytes00, gbBytes01, gbBytes02, gbBytes03, gbBytes04,
|
||||
gbBytes05, gbBytes06, gbBytes07, gbBytes08, gbBytes09,
|
||||
gbBytes10, gbBytes11, gbBytes12, gbBytes13, gbBytes14,
|
||||
gbBytes15, gbBytes16, gbBytes17:
|
||||
n := int(tag - gbBytes00)
|
||||
if len(b) < n+1 {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
scalars = append(scalars, append([]byte(nil), b[1:n+1]...))
|
||||
b = b[n+1:]
|
||||
case gbBytes1:
|
||||
if len(b) < 2 {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
n := int(b[1])
|
||||
b = b[2:]
|
||||
if len(b) < n {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
scalars = append(scalars, append([]byte(nil), b[:n]...))
|
||||
b = b[n:]
|
||||
case gbBytes2:
|
||||
if len(b) < 3 {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
n := int(b[1])<<8 | int(b[2]) + 1
|
||||
b = b[3:]
|
||||
if len(b) < n {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
scalars = append(scalars, append([]byte(nil), b[:n]...))
|
||||
b = b[n:]
|
||||
case gbString00, gbString01, gbString02, gbString03, gbString04,
|
||||
gbString05, gbString06, gbString07, gbString08, gbString09,
|
||||
gbString10, gbString11, gbString12, gbString13, gbString14,
|
||||
gbString15, gbString16, gbString17:
|
||||
n := int(tag - gbString00)
|
||||
if len(b) < n+1 {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
scalars = append(scalars, string(b[1:n+1]))
|
||||
b = b[n+1:]
|
||||
case gbString1:
|
||||
if len(b) < 2 {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
n := int(b[1])
|
||||
b = b[2:]
|
||||
if len(b) < n {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
scalars = append(scalars, string(b[:n]))
|
||||
b = b[n:]
|
||||
case gbString2:
|
||||
if len(b) < 3 {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
n := int(b[1])<<8 | int(b[2])
|
||||
b = b[3:]
|
||||
if len(b) < n {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
scalars = append(scalars, string(b[:n]))
|
||||
b = b[n:]
|
||||
case gbUintP1, gbUintP2, gbUintP3, gbUintP4, gbUintP5, gbUintP6, gbUintP7, gbUintP8:
|
||||
b = b[1:]
|
||||
n := 1 + int(tag) - gbUintP1
|
||||
if len(b) < n {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
var u uint64
|
||||
for _, v := range b[:n] {
|
||||
u = u<<8 | uint64(v)
|
||||
}
|
||||
scalars = append(scalars, u)
|
||||
b = b[n:]
|
||||
case gbIntM8, gbIntM7, gbIntM6, gbIntM5, gbIntM4, gbIntM3, gbIntM2, gbIntM1:
|
||||
b = b[1:]
|
||||
n := 8 - (int(tag) - gbIntM8)
|
||||
if len(b) < n {
|
||||
goto corrupted
|
||||
}
|
||||
u := uint64(math.MaxUint64)
|
||||
for _, v := range b[:n] {
|
||||
u = u<<8 | uint64(v)
|
||||
}
|
||||
scalars = append(scalars, int64(u))
|
||||
b = b[n:]
|
||||
case gbIntP1, gbIntP2, gbIntP3, gbIntP4, gbIntP5, gbIntP6, gbIntP7, gbIntP8:
|
||||
b = b[1:]
|
||||
n := 1 + int(tag) - gbIntP1
|
||||
if len(b) < n {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
i := int64(0)
|
||||
for _, v := range b[:n] {
|
||||
i = i<<8 | int64(v)
|
||||
}
|
||||
scalars = append(scalars, i)
|
||||
b = b[n:]
|
||||
default:
|
||||
scalars = append(scalars, int64(b[0])-gbInt0)
|
||||
b = b[1:]
|
||||
}
|
||||
}
|
||||
return append([]interface{}(nil), scalars...), nil
|
||||
|
||||
corrupted:
|
||||
return nil, &ErrDecodeScalars{append([]byte(nil), b0...), len(b0) - len(b)}
|
||||
}
|
||||
|
||||
func collateComplex(x, y complex128) int {
|
||||
switch rx, ry := real(x), real(y); {
|
||||
case rx < ry:
|
||||
return -1
|
||||
case rx == ry:
|
||||
switch ix, iy := imag(x), imag(y); {
|
||||
case ix < iy:
|
||||
return -1
|
||||
case ix == iy:
|
||||
return 0
|
||||
case ix > iy:
|
||||
return 1
|
||||
}
|
||||
}
|
||||
//case rx > ry:
|
||||
return 1
|
||||
}
|
||||
|
||||
func collateFloat(x, y float64) int {
|
||||
switch {
|
||||
case x < y:
|
||||
return -1
|
||||
case x == y:
|
||||
return 0
|
||||
}
|
||||
//case x > y:
|
||||
return 1
|
||||
}
|
||||
|
||||
func collateInt(x, y int64) int {
|
||||
switch {
|
||||
case x < y:
|
||||
return -1
|
||||
case x == y:
|
||||
return 0
|
||||
}
|
||||
//case x > y:
|
||||
return 1
|
||||
}
|
||||
|
||||
func collateUint(x, y uint64) int {
|
||||
switch {
|
||||
case x < y:
|
||||
return -1
|
||||
case x == y:
|
||||
return 0
|
||||
}
|
||||
//case x > y:
|
||||
return 1
|
||||
}
|
||||
|
||||
func collateIntUint(x int64, y uint64) int {
|
||||
if y > math.MaxInt64 {
|
||||
return -1
|
||||
}
|
||||
|
||||
return collateInt(x, int64(y))
|
||||
}
|
||||
|
||||
func collateUintInt(x uint64, y int64) int {
|
||||
return -collateIntUint(y, x)
|
||||
}
|
||||
|
||||
func collateType(i interface{}) (r interface{}, err error) {
|
||||
switch x := i.(type) {
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid collate type %T", x)
|
||||
case nil:
|
||||
return i, nil
|
||||
case bool:
|
||||
return i, nil
|
||||
case int8:
|
||||
return int64(x), nil
|
||||
case int16:
|
||||
return int64(x), nil
|
||||
case int32:
|
||||
return int64(x), nil
|
||||
case int64:
|
||||
return i, nil
|
||||
case int:
|
||||
return int64(x), nil
|
||||
case uint8:
|
||||
return uint64(x), nil
|
||||
case uint16:
|
||||
return uint64(x), nil
|
||||
case uint32:
|
||||
return uint64(x), nil
|
||||
case uint64:
|
||||
return i, nil
|
||||
case uint:
|
||||
return uint64(x), nil
|
||||
case float32:
|
||||
return float64(x), nil
|
||||
case float64:
|
||||
return i, nil
|
||||
case complex64:
|
||||
return complex128(x), nil
|
||||
case complex128:
|
||||
return i, nil
|
||||
case []byte:
|
||||
return i, nil
|
||||
case string:
|
||||
return i, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Collate collates two arrays of Go predeclared scalar types (and the typeless
|
||||
// nil or []byte). If any other type appears in x or y, Collate will return a
|
||||
// non nil error. String items are collated using strCollate or lexically
|
||||
// byte-wise (as when using Go comparison operators) when strCollate is nil.
|
||||
// []byte items are collated using bytes.Compare.
|
||||
//
|
||||
// Collate returns:
|
||||
//
|
||||
// -1 if x < y
|
||||
// 0 if x == y
|
||||
// +1 if x > y
|
||||
//
|
||||
// The same value as defined above must be returned from strCollate.
|
||||
//
|
||||
// The "outer" ordering is: nil, bool, number, []byte, string. IOW, nil is
|
||||
// "smaller" than anything else except other nil, numbers collate before
|
||||
// []byte, []byte collate before strings, etc.
|
||||
//
|
||||
// Integers and real numbers collate as expected in math. However, complex
|
||||
// numbers are not ordered in Go. Here the ordering is defined: Complex numbers
|
||||
// are in comparison considered first only by their real part. Iff the result
|
||||
// is equality then the imaginary part is used to determine the ordering. In
|
||||
// this "second order" comparing, integers and real numbers are considered as
|
||||
// complex numbers with a zero imaginary part.
|
||||
func Collate(x, y []interface{}, strCollate func(string, string) int) (r int, err error) {
|
||||
nx, ny := len(x), len(y)
|
||||
|
||||
switch {
|
||||
case nx == 0 && ny != 0:
|
||||
return -1, nil
|
||||
case nx == 0 && ny == 0:
|
||||
return 0, nil
|
||||
case nx != 0 && ny == 0:
|
||||
return 1, nil
|
||||
}
|
||||
|
||||
r = 1
|
||||
if nx > ny {
|
||||
x, y, r = y, x, -r
|
||||
}
|
||||
|
||||
var c int
|
||||
for i, xi0 := range x {
|
||||
yi0 := y[i]
|
||||
xi, err := collateType(xi0)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
yi, err := collateType(yi0)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
switch x := xi.(type) {
|
||||
default:
|
||||
panic(fmt.Errorf("internal error: %T", x))
|
||||
|
||||
case nil:
|
||||
switch yi.(type) {
|
||||
case nil:
|
||||
// nop
|
||||
default:
|
||||
return -r, nil
|
||||
}
|
||||
|
||||
case bool:
|
||||
switch y := yi.(type) {
|
||||
case nil:
|
||||
return r, nil
|
||||
case bool:
|
||||
switch {
|
||||
case !x && y:
|
||||
return -r, nil
|
||||
case x == y:
|
||||
// nop
|
||||
case x && !y:
|
||||
return r, nil
|
||||
}
|
||||
default:
|
||||
return -r, nil
|
||||
}
|
||||
|
||||
case int64:
|
||||
switch y := yi.(type) {
|
||||
case nil, bool:
|
||||
return r, nil
|
||||
case int64:
|
||||
c = collateInt(x, y)
|
||||
case uint64:
|
||||
c = collateIntUint(x, y)
|
||||
case float64:
|
||||
c = collateFloat(float64(x), y)
|
||||
case complex128:
|
||||
c = collateComplex(complex(float64(x), 0), y)
|
||||
case []byte:
|
||||
return -r, nil
|
||||
case string:
|
||||
return -r, nil
|
||||
}
|
||||
|
||||
if c != 0 {
|
||||
return c * r, nil
|
||||
}
|
||||
|
||||
case uint64:
|
||||
switch y := yi.(type) {
|
||||
case nil, bool:
|
||||
return r, nil
|
||||
case int64:
|
||||
c = collateUintInt(x, y)
|
||||
case uint64:
|
||||
c = collateUint(x, y)
|
||||
case float64:
|
||||
c = collateFloat(float64(x), y)
|
||||
case complex128:
|
||||
c = collateComplex(complex(float64(x), 0), y)
|
||||
case []byte:
|
||||
return -r, nil
|
||||
case string:
|
||||
return -r, nil
|
||||
}
|
||||
|
||||
if c != 0 {
|
||||
return c * r, nil
|
||||
}
|
||||
|
||||
case float64:
|
||||
switch y := yi.(type) {
|
||||
case nil, bool:
|
||||
return r, nil
|
||||
case int64:
|
||||
c = collateFloat(x, float64(y))
|
||||
case uint64:
|
||||
c = collateFloat(x, float64(y))
|
||||
case float64:
|
||||
c = collateFloat(x, y)
|
||||
case complex128:
|
||||
c = collateComplex(complex(x, 0), y)
|
||||
case []byte:
|
||||
return -r, nil
|
||||
case string:
|
||||
return -r, nil
|
||||
}
|
||||
|
||||
if c != 0 {
|
||||
return c * r, nil
|
||||
}
|
||||
|
||||
case complex128:
|
||||
switch y := yi.(type) {
|
||||
case nil, bool:
|
||||
return r, nil
|
||||
case int64:
|
||||
c = collateComplex(x, complex(float64(y), 0))
|
||||
case uint64:
|
||||
c = collateComplex(x, complex(float64(y), 0))
|
||||
case float64:
|
||||
c = collateComplex(x, complex(y, 0))
|
||||
case complex128:
|
||||
c = collateComplex(x, y)
|
||||
case []byte:
|
||||
return -r, nil
|
||||
case string:
|
||||
return -r, nil
|
||||
}
|
||||
|
||||
if c != 0 {
|
||||
return c * r, nil
|
||||
}
|
||||
|
||||
case []byte:
|
||||
switch y := yi.(type) {
|
||||
case nil, bool, int64, uint64, float64, complex128:
|
||||
return r, nil
|
||||
case []byte:
|
||||
c = bytes.Compare(x, y)
|
||||
case string:
|
||||
return -r, nil
|
||||
}
|
||||
|
||||
if c != 0 {
|
||||
return c * r, nil
|
||||
}
|
||||
|
||||
case string:
|
||||
switch y := yi.(type) {
|
||||
case nil, bool, int64, uint64, float64, complex128:
|
||||
return r, nil
|
||||
case []byte:
|
||||
return r, nil
|
||||
case string:
|
||||
switch {
|
||||
case strCollate != nil:
|
||||
c = strCollate(x, y)
|
||||
case x < y:
|
||||
return -r, nil
|
||||
case x == y:
|
||||
c = 0
|
||||
case x > y:
|
||||
return r, nil
|
||||
}
|
||||
}
|
||||
|
||||
if c != 0 {
|
||||
return c * r, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if nx == ny {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
return -r, nil
|
||||
}
|
155
vendor/github.com/cznic/ql/vendored/github.com/cznic/exp/lldb/lldb.go
generated
vendored
Normal file
155
vendor/github.com/cznic/ql/vendored/github.com/cznic/exp/lldb/lldb.go
generated
vendored
Normal file
@ -0,0 +1,155 @@
|
||||
// Copyright 2014 The lldb Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package lldb (WIP) implements a low level database engine. The database
|
||||
// model used could be considered a specific implementation of some small(est)
|
||||
// intersection of models listed in [1]. As a settled term is lacking, it'll be
|
||||
// called here a 'Virtual memory model' (VMM).
|
||||
//
|
||||
// Experimental release notes
|
||||
//
|
||||
// This is an experimental release. Don't open a DB from two applications or
|
||||
// two instances of an application - it will get corrupted (no file locking is
|
||||
// implemented and this task is delegated to lldb's clients).
|
||||
//
|
||||
// WARNING: THE LLDB API IS SUBJECT TO CHANGE.
|
||||
//
|
||||
// Filers
|
||||
//
|
||||
// A Filer is an abstraction of storage. A Filer may be a part of some process'
|
||||
// virtual address space, an OS file, a networked, remote file etc. Persistence
|
||||
// of the storage is optional, opaque to VMM and it is specific to a concrete
|
||||
// Filer implementation.
|
||||
//
|
||||
// Space management
|
||||
//
|
||||
// Mechanism to allocate, reallocate (resize), deallocate (and later reclaim
|
||||
// the unused) contiguous parts of a Filer, called blocks. Blocks are
|
||||
// identified and referred to by a handle, an int64.
|
||||
//
|
||||
// BTrees
|
||||
//
|
||||
// In addition to the VMM like services, lldb provides volatile and
|
||||
// non-volatile BTrees. Keys and values of a BTree are limited in size to 64kB
|
||||
// each (a bit more actually). Support for larger keys/values, if desired, can
|
||||
// be built atop a BTree to certain limits.
|
||||
//
|
||||
// Handles vs pointers
|
||||
//
|
||||
// A handle is the abstracted storage counterpart of a memory address. There
|
||||
// is one fundamental difference, though. Resizing a block never results in a
|
||||
// change to the handle which refers to the resized block, so a handle is more
|
||||
// akin to an unique numeric id/key. Yet it shares one property of pointers -
|
||||
// handles can be associated again with blocks after the original handle block
|
||||
// was deallocated. In other words, a handle uniqueness domain is the state of
|
||||
// the database and is not something comparable to e.g. an ever growing
|
||||
// numbering sequence.
|
||||
//
|
||||
// Also, as with memory pointers, dangling handles can be created and blocks
|
||||
// overwritten when such handles are used. Using a zero handle to refer to a
|
||||
// block will not panic; however, the resulting error is effectively the same
|
||||
// exceptional situation as dereferencing a nil pointer.
|
||||
//
|
||||
// Blocks
|
||||
//
|
||||
// Allocated/used blocks, are limited in size to only a little bit more than
|
||||
// 64kB. Bigger semantic entities/structures must be built in lldb's client
|
||||
// code. The content of a block has no semantics attached, it's only a fully
|
||||
// opaque `[]byte`.
|
||||
//
|
||||
// Scalars
|
||||
//
|
||||
// Use of "scalars" applies to EncodeScalars, DecodeScalars and Collate. Those
|
||||
// first two "to bytes" and "from bytes" functions are suggested for handling
|
||||
// multi-valued Allocator content items and/or keys/values of BTrees (using
|
||||
// Collate for keys). Types called "scalar" are:
|
||||
//
|
||||
// nil (the typeless one)
|
||||
// bool
|
||||
// all integral types: [u]int8, [u]int16, [u]int32, [u]int, [u]int64
|
||||
// all floating point types: float32, float64
|
||||
// all complex types: complex64, complex128
|
||||
// []byte (64kB max)
|
||||
// string (64kb max)
|
||||
//
|
||||
// Specific implementations
|
||||
//
|
||||
// Included are concrete implementations of some of the VMM interfaces included
|
||||
// to ease serving simple client code or for testing and possibly as an
|
||||
// example. More details in the documentation of such implementations.
|
||||
//
|
||||
// [1]: http://en.wikipedia.org/wiki/Database_model
|
||||
package lldb
|
||||
|
||||
const (
|
||||
fltSz = 0x70 // size of the FLT
|
||||
maxShort = 251
|
||||
maxRq = 65787
|
||||
maxFLTRq = 4112
|
||||
maxHandle = 1<<56 - 1
|
||||
atomLen = 16
|
||||
tagUsedLong = 0xfc
|
||||
tagUsedRelocated = 0xfd
|
||||
tagFreeShort = 0xfe
|
||||
tagFreeLong = 0xff
|
||||
tagNotCompressed = 0
|
||||
tagCompressed = 1
|
||||
)
|
||||
|
||||
// Content size n -> blocksize in atoms.
|
||||
func n2atoms(n int) int {
|
||||
if n > maxShort {
|
||||
n += 2
|
||||
}
|
||||
return (n+1)/16 + 1
|
||||
}
|
||||
|
||||
// Content size n -> number of padding zeros.
|
||||
func n2padding(n int) int {
|
||||
if n > maxShort {
|
||||
n += 2
|
||||
}
|
||||
return 15 - (n+1)&15
|
||||
}
|
||||
|
||||
// Handle <-> offset
|
||||
func h2off(h int64) int64 { return (h + 6) * 16 }
|
||||
func off2h(off int64) int64 { return off/16 - 6 }
|
||||
|
||||
// Get a 7B int64 from b
|
||||
func b2h(b []byte) (h int64) {
|
||||
for _, v := range b[:7] {
|
||||
h = h<<8 | int64(v)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Put a 7B int64 into b
|
||||
func h2b(b []byte, h int64) []byte {
|
||||
for i := range b[:7] {
|
||||
b[i], h = byte(h>>48), h<<8
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// Content length N (must be in [252, 65787]) to long used block M field.
|
||||
func n2m(n int) (m int) {
|
||||
return n % 0x10000
|
||||
}
|
||||
|
||||
// Long used block M (must be in [0, 65535]) field to content length N.
|
||||
func m2n(m int) (n int) {
|
||||
if m <= maxShort {
|
||||
m += 0x10000
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func bpack(a []byte) []byte {
|
||||
if cap(a) > len(a) {
|
||||
return append([]byte(nil), a...)
|
||||
}
|
||||
|
||||
return a
|
||||
}
|
344
vendor/github.com/cznic/ql/vendored/github.com/cznic/exp/lldb/memfiler.go
generated
vendored
Normal file
344
vendor/github.com/cznic/ql/vendored/github.com/cznic/exp/lldb/memfiler.go
generated
vendored
Normal file
@ -0,0 +1,344 @@
|
||||
// Copyright 2014 The lldb Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// A memory-only implementation of Filer.
|
||||
|
||||
/*
|
||||
|
||||
pgBits: 8
|
||||
BenchmarkMemFilerWrSeq 100000 19430 ns/op 1646.93 MB/s
|
||||
BenchmarkMemFilerRdSeq 100000 17390 ns/op 1840.13 MB/s
|
||||
BenchmarkMemFilerWrRand 1000000 1903 ns/op 133.94 MB/s
|
||||
BenchmarkMemFilerRdRand 1000000 1153 ns/op 221.16 MB/s
|
||||
|
||||
pgBits: 9
|
||||
BenchmarkMemFilerWrSeq 100000 16195 ns/op 1975.80 MB/s
|
||||
BenchmarkMemFilerRdSeq 200000 13011 ns/op 2459.39 MB/s
|
||||
BenchmarkMemFilerWrRand 1000000 2248 ns/op 227.28 MB/s
|
||||
BenchmarkMemFilerRdRand 1000000 1177 ns/op 433.94 MB/s
|
||||
|
||||
pgBits: 10
|
||||
BenchmarkMemFilerWrSeq 100000 16169 ns/op 1979.04 MB/s
|
||||
BenchmarkMemFilerRdSeq 200000 12673 ns/op 2524.91 MB/s
|
||||
BenchmarkMemFilerWrRand 1000000 5550 ns/op 184.30 MB/s
|
||||
BenchmarkMemFilerRdRand 1000000 1699 ns/op 601.79 MB/s
|
||||
|
||||
pgBits: 11
|
||||
BenchmarkMemFilerWrSeq 100000 13449 ns/op 2379.31 MB/s
|
||||
BenchmarkMemFilerRdSeq 200000 12058 ns/op 2653.80 MB/s
|
||||
BenchmarkMemFilerWrRand 500000 4335 ns/op 471.47 MB/s
|
||||
BenchmarkMemFilerRdRand 1000000 2843 ns/op 719.47 MB/s
|
||||
|
||||
pgBits: 12
|
||||
BenchmarkMemFilerWrSeq 200000 11976 ns/op 2672.00 MB/s
|
||||
BenchmarkMemFilerRdSeq 200000 12255 ns/op 2611.06 MB/s
|
||||
BenchmarkMemFilerWrRand 200000 8058 ns/op 507.14 MB/s
|
||||
BenchmarkMemFilerRdRand 500000 4365 ns/op 936.15 MB/s
|
||||
|
||||
pgBits: 13
|
||||
BenchmarkMemFilerWrSeq 200000 10852 ns/op 2948.69 MB/s
|
||||
BenchmarkMemFilerRdSeq 200000 11561 ns/op 2767.77 MB/s
|
||||
BenchmarkMemFilerWrRand 200000 9748 ns/op 840.15 MB/s
|
||||
BenchmarkMemFilerRdRand 500000 7236 ns/op 1131.59 MB/s
|
||||
|
||||
pgBits: 14
|
||||
BenchmarkMemFilerWrSeq 200000 10328 ns/op 3098.12 MB/s
|
||||
BenchmarkMemFilerRdSeq 200000 11292 ns/op 2833.66 MB/s
|
||||
BenchmarkMemFilerWrRand 100000 16768 ns/op 978.75 MB/s
|
||||
BenchmarkMemFilerRdRand 200000 13033 ns/op 1258.43 MB/s
|
||||
|
||||
pgBits: 15
|
||||
BenchmarkMemFilerWrSeq 200000 10309 ns/op 3103.93 MB/s
|
||||
BenchmarkMemFilerRdSeq 200000 11126 ns/op 2876.12 MB/s
|
||||
BenchmarkMemFilerWrRand 50000 31985 ns/op 1021.74 MB/s
|
||||
BenchmarkMemFilerRdRand 100000 25217 ns/op 1297.65 MB/s
|
||||
|
||||
pgBits: 16
|
||||
BenchmarkMemFilerWrSeq 200000 10324 ns/op 3099.45 MB/s
|
||||
BenchmarkMemFilerRdSeq 200000 11201 ns/op 2856.80 MB/s
|
||||
BenchmarkMemFilerWrRand 20000 55226 ns/op 1184.76 MB/s
|
||||
BenchmarkMemFilerRdRand 50000 48316 ns/op 1355.16 MB/s
|
||||
|
||||
pgBits: 17
|
||||
BenchmarkMemFilerWrSeq 200000 10377 ns/op 3083.53 MB/s
|
||||
BenchmarkMemFilerRdSeq 200000 11018 ns/op 2904.18 MB/s
|
||||
BenchmarkMemFilerWrRand 10000 143425 ns/op 913.12 MB/s
|
||||
BenchmarkMemFilerRdRand 20000 95267 ns/op 1376.99 MB/s
|
||||
|
||||
pgBits: 18
|
||||
BenchmarkMemFilerWrSeq 200000 10312 ns/op 3102.96 MB/s
|
||||
BenchmarkMemFilerRdSeq 200000 11069 ns/op 2890.84 MB/s
|
||||
BenchmarkMemFilerWrRand 5000 280910 ns/op 934.14 MB/s
|
||||
BenchmarkMemFilerRdRand 10000 188500 ns/op 1388.17 MB/s
|
||||
|
||||
*/
|
||||
|
||||
package lldb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/cznic/fileutil"
|
||||
"github.com/cznic/mathutil"
|
||||
)
|
||||
|
||||
const (
|
||||
pgBits = 16
|
||||
pgSize = 1 << pgBits
|
||||
pgMask = pgSize - 1
|
||||
)
|
||||
|
||||
var _ Filer = &MemFiler{} // Ensure MemFiler is a Filer.
|
||||
|
||||
type memFilerMap map[int64]*[pgSize]byte
|
||||
|
||||
// MemFiler is a memory backed Filer. It implements BeginUpdate, EndUpdate and
|
||||
// Rollback as no-ops. MemFiler is not automatically persistent, but it has
|
||||
// ReadFrom and WriteTo methods.
|
||||
type MemFiler struct {
|
||||
m memFilerMap
|
||||
nest int
|
||||
size int64
|
||||
}
|
||||
|
||||
// NewMemFiler returns a new MemFiler.
|
||||
func NewMemFiler() *MemFiler {
|
||||
return &MemFiler{m: memFilerMap{}}
|
||||
}
|
||||
|
||||
// BeginUpdate implements Filer.
|
||||
func (f *MemFiler) BeginUpdate() error {
|
||||
f.nest++
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close implements Filer.
|
||||
func (f *MemFiler) Close() (err error) {
|
||||
if f.nest != 0 {
|
||||
return &ErrPERM{(f.Name() + ":Close")}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// EndUpdate implements Filer.
|
||||
func (f *MemFiler) EndUpdate() (err error) {
|
||||
if f.nest == 0 {
|
||||
return &ErrPERM{(f.Name() + ": EndUpdate")}
|
||||
}
|
||||
|
||||
f.nest--
|
||||
return
|
||||
}
|
||||
|
||||
// Name implements Filer.
|
||||
func (f *MemFiler) Name() string {
|
||||
return fmt.Sprintf("%p.memfiler", f)
|
||||
}
|
||||
|
||||
// PunchHole implements Filer.
|
||||
func (f *MemFiler) PunchHole(off, size int64) (err error) {
|
||||
if off < 0 {
|
||||
return &ErrINVAL{f.Name() + ": PunchHole off", off}
|
||||
}
|
||||
|
||||
if size < 0 || off+size > f.size {
|
||||
return &ErrINVAL{f.Name() + ": PunchHole size", size}
|
||||
}
|
||||
|
||||
first := off >> pgBits
|
||||
if off&pgMask != 0 {
|
||||
first++
|
||||
}
|
||||
off += size - 1
|
||||
last := off >> pgBits
|
||||
if off&pgMask != 0 {
|
||||
last--
|
||||
}
|
||||
if limit := f.size >> pgBits; last > limit {
|
||||
last = limit
|
||||
}
|
||||
for pg := first; pg <= last; pg++ {
|
||||
delete(f.m, pg)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var zeroPage [pgSize]byte
|
||||
|
||||
// ReadAt implements Filer.
|
||||
func (f *MemFiler) ReadAt(b []byte, off int64) (n int, err error) {
|
||||
avail := f.size - off
|
||||
pgI := off >> pgBits
|
||||
pgO := int(off & pgMask)
|
||||
rem := len(b)
|
||||
if int64(rem) >= avail {
|
||||
rem = int(avail)
|
||||
err = io.EOF
|
||||
}
|
||||
for rem != 0 && avail > 0 {
|
||||
pg := f.m[pgI]
|
||||
if pg == nil {
|
||||
pg = &zeroPage
|
||||
}
|
||||
nc := copy(b[:mathutil.Min(rem, pgSize)], pg[pgO:])
|
||||
pgI++
|
||||
pgO = 0
|
||||
rem -= nc
|
||||
n += nc
|
||||
b = b[nc:]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ReadFrom is a helper to populate MemFiler's content from r. 'n' reports the
|
||||
// number of bytes read from 'r'.
|
||||
func (f *MemFiler) ReadFrom(r io.Reader) (n int64, err error) {
|
||||
if err = f.Truncate(0); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
b [pgSize]byte
|
||||
rn int
|
||||
off int64
|
||||
)
|
||||
|
||||
var rerr error
|
||||
for rerr == nil {
|
||||
if rn, rerr = r.Read(b[:]); rn != 0 {
|
||||
f.WriteAt(b[:rn], off)
|
||||
off += int64(rn)
|
||||
n += int64(rn)
|
||||
}
|
||||
}
|
||||
if !fileutil.IsEOF(rerr) {
|
||||
err = rerr
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Rollback implements Filer.
|
||||
func (f *MemFiler) Rollback() (err error) { return }
|
||||
|
||||
// Size implements Filer.
|
||||
func (f *MemFiler) Size() (int64, error) {
|
||||
return f.size, nil
|
||||
}
|
||||
|
||||
// Sync implements Filer.
|
||||
func (f *MemFiler) Sync() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Truncate implements Filer.
|
||||
func (f *MemFiler) Truncate(size int64) (err error) {
|
||||
switch {
|
||||
case size < 0:
|
||||
return &ErrINVAL{"Truncate size", size}
|
||||
case size == 0:
|
||||
f.m = memFilerMap{}
|
||||
f.size = 0
|
||||
return
|
||||
}
|
||||
|
||||
first := size >> pgBits
|
||||
if size&pgMask != 0 {
|
||||
first++
|
||||
}
|
||||
last := f.size >> pgBits
|
||||
if f.size&pgMask != 0 {
|
||||
last++
|
||||
}
|
||||
for ; first < last; first++ {
|
||||
delete(f.m, first)
|
||||
}
|
||||
|
||||
f.size = size
|
||||
return
|
||||
}
|
||||
|
||||
// WriteAt implements Filer.
|
||||
func (f *MemFiler) WriteAt(b []byte, off int64) (n int, err error) {
|
||||
pgI := off >> pgBits
|
||||
pgO := int(off & pgMask)
|
||||
n = len(b)
|
||||
rem := n
|
||||
var nc int
|
||||
for rem != 0 {
|
||||
if pgO == 0 && rem >= pgSize && bytes.Equal(b[:pgSize], zeroPage[:]) {
|
||||
delete(f.m, pgI)
|
||||
nc = pgSize
|
||||
} else {
|
||||
pg := f.m[pgI]
|
||||
if pg == nil {
|
||||
pg = new([pgSize]byte)
|
||||
f.m[pgI] = pg
|
||||
}
|
||||
nc = copy((*pg)[pgO:], b)
|
||||
}
|
||||
pgI++
|
||||
pgO = 0
|
||||
rem -= nc
|
||||
b = b[nc:]
|
||||
}
|
||||
f.size = mathutil.MaxInt64(f.size, off+int64(n))
|
||||
return
|
||||
}
|
||||
|
||||
// WriteTo is a helper to copy/persist MemFiler's content to w. If w is also
|
||||
// an io.WriterAt then WriteTo may attempt to _not_ write any big, for some
|
||||
// value of big, runs of zeros, i.e. it will attempt to punch holes, where
|
||||
// possible, in `w` if that happens to be a freshly created or to zero length
|
||||
// truncated OS file. 'n' reports the number of bytes written to 'w'.
|
||||
func (f *MemFiler) WriteTo(w io.Writer) (n int64, err error) {
|
||||
var (
|
||||
b [pgSize]byte
|
||||
wn, rn int
|
||||
off int64
|
||||
rerr error
|
||||
)
|
||||
|
||||
if wa, ok := w.(io.WriterAt); ok {
|
||||
lastPgI := f.size >> pgBits
|
||||
for pgI := int64(0); pgI <= lastPgI; pgI++ {
|
||||
sz := pgSize
|
||||
if pgI == lastPgI {
|
||||
sz = int(f.size & pgMask)
|
||||
}
|
||||
pg := f.m[pgI]
|
||||
if pg != nil {
|
||||
wn, err = wa.WriteAt(pg[:sz], off)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
n += int64(wn)
|
||||
off += int64(sz)
|
||||
if wn != sz {
|
||||
return n, io.ErrShortWrite
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var werr error
|
||||
for rerr == nil {
|
||||
if rn, rerr = f.ReadAt(b[:], off); rn != 0 {
|
||||
off += int64(rn)
|
||||
if wn, werr = w.Write(b[:rn]); werr != nil {
|
||||
return n, werr
|
||||
}
|
||||
|
||||
n += int64(wn)
|
||||
}
|
||||
}
|
||||
if !fileutil.IsEOF(rerr) {
|
||||
err = rerr
|
||||
}
|
||||
return
|
||||
}
|
130
vendor/github.com/cznic/ql/vendored/github.com/cznic/exp/lldb/osfiler.go
generated
vendored
Normal file
130
vendor/github.com/cznic/ql/vendored/github.com/cznic/exp/lldb/osfiler.go
generated
vendored
Normal file
@ -0,0 +1,130 @@
|
||||
// Copyright 2014 The lldb Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lldb
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/cznic/mathutil"
|
||||
)
|
||||
|
||||
var _ Filer = (*OSFiler)(nil)
|
||||
|
||||
// OSFile is an os.File like minimal set of methods allowing to construct a
|
||||
// Filer.
|
||||
type OSFile interface {
|
||||
Name() string
|
||||
Stat() (fi os.FileInfo, err error)
|
||||
Sync() (err error)
|
||||
Truncate(size int64) (err error)
|
||||
io.Closer
|
||||
io.Reader
|
||||
io.ReaderAt
|
||||
io.Seeker
|
||||
io.Writer
|
||||
io.WriterAt
|
||||
}
|
||||
|
||||
// OSFiler is like a SimpleFileFiler but based on an OSFile.
|
||||
type OSFiler struct {
|
||||
f OSFile
|
||||
nest int
|
||||
size int64 // not set if < 0
|
||||
}
|
||||
|
||||
// NewOSFiler returns a Filer from an OSFile. This Filer is like the
|
||||
// SimpleFileFiler, it does not implement the transaction related methods.
|
||||
func NewOSFiler(f OSFile) (r *OSFiler) {
|
||||
return &OSFiler{
|
||||
f: f,
|
||||
size: -1,
|
||||
}
|
||||
}
|
||||
|
||||
// BeginUpdate implements Filer.
|
||||
func (f *OSFiler) BeginUpdate() (err error) {
|
||||
f.nest++
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close implements Filer.
|
||||
func (f *OSFiler) Close() (err error) {
|
||||
if f.nest != 0 {
|
||||
return &ErrPERM{(f.Name() + ":Close")}
|
||||
}
|
||||
|
||||
return f.f.Close()
|
||||
}
|
||||
|
||||
// EndUpdate implements Filer.
|
||||
func (f *OSFiler) EndUpdate() (err error) {
|
||||
if f.nest == 0 {
|
||||
return &ErrPERM{(f.Name() + ":EndUpdate")}
|
||||
}
|
||||
|
||||
f.nest--
|
||||
return
|
||||
}
|
||||
|
||||
// Name implements Filer.
|
||||
func (f *OSFiler) Name() string {
|
||||
return f.f.Name()
|
||||
}
|
||||
|
||||
// PunchHole implements Filer.
|
||||
func (f *OSFiler) PunchHole(off, size int64) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// ReadAt implements Filer.
|
||||
func (f *OSFiler) ReadAt(b []byte, off int64) (n int, err error) {
|
||||
return f.f.ReadAt(b, off)
|
||||
}
|
||||
|
||||
// Rollback implements Filer.
|
||||
func (f *OSFiler) Rollback() (err error) { return }
|
||||
|
||||
// Size implements Filer.
|
||||
func (f *OSFiler) Size() (n int64, err error) {
|
||||
if f.size < 0 { // boot
|
||||
fi, err := f.f.Stat()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
f.size = fi.Size()
|
||||
}
|
||||
return f.size, nil
|
||||
}
|
||||
|
||||
// Sync implements Filer.
|
||||
func (f *OSFiler) Sync() (err error) {
|
||||
return f.f.Sync()
|
||||
}
|
||||
|
||||
// Truncate implements Filer.
|
||||
func (f *OSFiler) Truncate(size int64) (err error) {
|
||||
if size < 0 {
|
||||
return &ErrINVAL{"Truncate size", size}
|
||||
}
|
||||
|
||||
f.size = size
|
||||
return f.f.Truncate(size)
|
||||
}
|
||||
|
||||
// WriteAt implements Filer.
|
||||
func (f *OSFiler) WriteAt(b []byte, off int64) (n int, err error) {
|
||||
if f.size < 0 { // boot
|
||||
fi, err := os.Stat(f.f.Name())
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
f.size = fi.Size()
|
||||
}
|
||||
f.size = mathutil.MaxInt64(f.size, int64(len(b))+off)
|
||||
return f.f.WriteAt(b, off)
|
||||
}
|
123
vendor/github.com/cznic/ql/vendored/github.com/cznic/exp/lldb/simplefilefiler.go
generated
vendored
Normal file
123
vendor/github.com/cznic/ql/vendored/github.com/cznic/exp/lldb/simplefilefiler.go
generated
vendored
Normal file
@ -0,0 +1,123 @@
|
||||
// Copyright 2014 The lldb Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// A basic os.File backed Filer.
|
||||
|
||||
package lldb
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/cznic/fileutil"
|
||||
"github.com/cznic/mathutil"
|
||||
)
|
||||
|
||||
var _ Filer = &SimpleFileFiler{} // Ensure SimpleFileFiler is a Filer.
|
||||
|
||||
// SimpleFileFiler is an os.File backed Filer intended for use where structural
|
||||
// consistency can be reached by other means (SimpleFileFiler is for example
|
||||
// wrapped in eg. an RollbackFiler or ACIDFiler0) or where persistence is not
|
||||
// required (temporary/working data sets).
|
||||
//
|
||||
// SimpleFileFiler is the most simple os.File backed Filer implementation as it
|
||||
// does not really implement BeginUpdate and EndUpdate/Rollback in any way
|
||||
// which would protect the structural integrity of data. If misused e.g. as a
|
||||
// real database storage w/o other measures, it can easily cause data loss
|
||||
// when, for example, a power outage occurs or the updating process terminates
|
||||
// abruptly.
|
||||
type SimpleFileFiler struct {
|
||||
file *os.File
|
||||
nest int
|
||||
size int64 // not set if < 0
|
||||
}
|
||||
|
||||
// NewSimpleFileFiler returns a new SimpleFileFiler.
|
||||
func NewSimpleFileFiler(f *os.File) *SimpleFileFiler {
|
||||
return &SimpleFileFiler{file: f, size: -1}
|
||||
}
|
||||
|
||||
// BeginUpdate implements Filer.
|
||||
func (f *SimpleFileFiler) BeginUpdate() error {
|
||||
f.nest++
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close implements Filer.
|
||||
func (f *SimpleFileFiler) Close() (err error) {
|
||||
if f.nest != 0 {
|
||||
return &ErrPERM{(f.Name() + ":Close")}
|
||||
}
|
||||
|
||||
return f.file.Close()
|
||||
}
|
||||
|
||||
// EndUpdate implements Filer.
|
||||
func (f *SimpleFileFiler) EndUpdate() (err error) {
|
||||
if f.nest == 0 {
|
||||
return &ErrPERM{(f.Name() + ":EndUpdate")}
|
||||
}
|
||||
|
||||
f.nest--
|
||||
return
|
||||
}
|
||||
|
||||
// Name implements Filer.
|
||||
func (f *SimpleFileFiler) Name() string {
|
||||
return f.file.Name()
|
||||
}
|
||||
|
||||
// PunchHole implements Filer.
|
||||
func (f *SimpleFileFiler) PunchHole(off, size int64) (err error) {
|
||||
return fileutil.PunchHole(f.file, off, size)
|
||||
}
|
||||
|
||||
// ReadAt implements Filer.
|
||||
func (f *SimpleFileFiler) ReadAt(b []byte, off int64) (n int, err error) {
|
||||
return f.file.ReadAt(b, off)
|
||||
}
|
||||
|
||||
// Rollback implements Filer.
|
||||
func (f *SimpleFileFiler) Rollback() (err error) { return }
|
||||
|
||||
// Size implements Filer.
|
||||
func (f *SimpleFileFiler) Size() (int64, error) {
|
||||
if f.size < 0 { // boot
|
||||
fi, err := os.Stat(f.file.Name())
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
f.size = fi.Size()
|
||||
}
|
||||
return f.size, nil
|
||||
}
|
||||
|
||||
// Sync implements Filer.
|
||||
func (f *SimpleFileFiler) Sync() error {
|
||||
return f.file.Sync()
|
||||
}
|
||||
|
||||
// Truncate implements Filer.
|
||||
func (f *SimpleFileFiler) Truncate(size int64) (err error) {
|
||||
if size < 0 {
|
||||
return &ErrINVAL{"Truncate size", size}
|
||||
}
|
||||
|
||||
f.size = size
|
||||
return f.file.Truncate(size)
|
||||
}
|
||||
|
||||
// WriteAt implements Filer.
|
||||
func (f *SimpleFileFiler) WriteAt(b []byte, off int64) (n int, err error) {
|
||||
if f.size < 0 { // boot
|
||||
fi, err := os.Stat(f.file.Name())
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
f.size = fi.Size()
|
||||
}
|
||||
f.size = mathutil.MaxInt64(f.size, int64(len(b))+off)
|
||||
return f.file.WriteAt(b, off)
|
||||
}
|
642
vendor/github.com/cznic/ql/vendored/github.com/cznic/exp/lldb/xact.go
generated
vendored
Normal file
642
vendor/github.com/cznic/ql/vendored/github.com/cznic/exp/lldb/xact.go
generated
vendored
Normal file
@ -0,0 +1,642 @@
|
||||
// Copyright 2014 The lldb Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Structural transactions.
|
||||
|
||||
package lldb
|
||||
|
||||
//DONE+ TransactionalMemoryFiler
|
||||
// ----
|
||||
// Use NewRollbackFiler(myMemFiler, ...)
|
||||
|
||||
/*
|
||||
|
||||
bfBits: 3
|
||||
BenchmarkRollbackFiler 20000000 102 ns/op 9.73 MB/s
|
||||
|
||||
bfBits: 4
|
||||
BenchmarkRollbackFiler 50000000 55.7 ns/op 17.95 MB/s
|
||||
|
||||
bfBits: 5
|
||||
BenchmarkRollbackFiler 100000000 32.2 ns/op 31.06 MB/s
|
||||
|
||||
bfBits: 6
|
||||
BenchmarkRollbackFiler 100000000 20.6 ns/op 48.46 MB/s
|
||||
|
||||
bfBits: 7
|
||||
BenchmarkRollbackFiler 100000000 15.1 ns/op 66.12 MB/s
|
||||
|
||||
bfBits: 8
|
||||
BenchmarkRollbackFiler 100000000 10.5 ns/op 95.66 MB/s
|
||||
|
||||
bfBits: 9
|
||||
BenchmarkRollbackFiler 200000000 8.02 ns/op 124.74 MB/s
|
||||
|
||||
bfBits: 10
|
||||
BenchmarkRollbackFiler 200000000 9.25 ns/op 108.09 MB/s
|
||||
|
||||
bfBits: 11
|
||||
BenchmarkRollbackFiler 100000000 11.7 ns/op 85.47 MB/s
|
||||
|
||||
bfBits: 12
|
||||
BenchmarkRollbackFiler 100000000 17.2 ns/op 57.99 MB/s
|
||||
|
||||
bfBits: 13
|
||||
BenchmarkRollbackFiler 100000000 32.7 ns/op 30.58 MB/s
|
||||
|
||||
bfBits: 14
|
||||
BenchmarkRollbackFiler 50000000 39.6 ns/op 25.27 MB/s
|
||||
|
||||
*/
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/cznic/fileutil"
|
||||
"github.com/cznic/mathutil"
|
||||
)
|
||||
|
||||
var (
|
||||
_ Filer = &bitFiler{} // Ensure bitFiler is a Filer.
|
||||
_ Filer = &RollbackFiler{} // ditto
|
||||
)
|
||||
|
||||
const (
|
||||
bfBits = 9
|
||||
bfSize = 1 << bfBits
|
||||
bfMask = bfSize - 1
|
||||
)
|
||||
|
||||
var (
|
||||
bitmask = [8]byte{1, 2, 4, 8, 16, 32, 64, 128}
|
||||
bitZeroPage bitPage
|
||||
allDirtyFlags [bfSize >> 3]byte
|
||||
)
|
||||
|
||||
func init() {
|
||||
for i := range allDirtyFlags {
|
||||
allDirtyFlags[i] = 0xff
|
||||
}
|
||||
}
|
||||
|
||||
type (
|
||||
bitPage struct {
|
||||
prev, next *bitPage
|
||||
data [bfSize]byte
|
||||
flags [bfSize >> 3]byte
|
||||
dirty bool
|
||||
}
|
||||
|
||||
bitFilerMap map[int64]*bitPage
|
||||
|
||||
bitFiler struct {
|
||||
parent Filer
|
||||
m bitFilerMap
|
||||
size int64
|
||||
sync.Mutex
|
||||
}
|
||||
)
|
||||
|
||||
func newBitFiler(parent Filer) (f *bitFiler, err error) {
|
||||
sz, err := parent.Size()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return &bitFiler{parent: parent, m: bitFilerMap{}, size: sz}, nil
|
||||
}
|
||||
|
||||
func (f *bitFiler) BeginUpdate() error { panic("internal error") }
|
||||
func (f *bitFiler) EndUpdate() error { panic("internal error") }
|
||||
func (f *bitFiler) Rollback() error { panic("internal error") }
|
||||
func (f *bitFiler) Sync() error { panic("internal error") }
|
||||
|
||||
func (f *bitFiler) Close() (err error) { return }
|
||||
func (f *bitFiler) Name() string { return fmt.Sprintf("%p.bitfiler", f) }
|
||||
func (f *bitFiler) Size() (int64, error) { return f.size, nil }
|
||||
|
||||
func (f *bitFiler) PunchHole(off, size int64) (err error) {
|
||||
first := off >> bfBits
|
||||
if off&bfMask != 0 {
|
||||
first++
|
||||
}
|
||||
off += size - 1
|
||||
last := off >> bfBits
|
||||
if off&bfMask != 0 {
|
||||
last--
|
||||
}
|
||||
if limit := f.size >> bfBits; last > limit {
|
||||
last = limit
|
||||
}
|
||||
f.Lock()
|
||||
for pgI := first; pgI <= last; pgI++ {
|
||||
pg := &bitPage{}
|
||||
pg.flags = allDirtyFlags
|
||||
f.m[pgI] = pg
|
||||
}
|
||||
f.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
func (f *bitFiler) ReadAt(b []byte, off int64) (n int, err error) {
|
||||
avail := f.size - off
|
||||
pgI := off >> bfBits
|
||||
pgO := int(off & bfMask)
|
||||
rem := len(b)
|
||||
if int64(rem) >= avail {
|
||||
rem = int(avail)
|
||||
err = io.EOF
|
||||
}
|
||||
for rem != 0 && avail > 0 {
|
||||
f.Lock()
|
||||
pg := f.m[pgI]
|
||||
if pg == nil {
|
||||
pg = &bitPage{}
|
||||
if f.parent != nil {
|
||||
_, err = f.parent.ReadAt(pg.data[:], off&^bfMask)
|
||||
if err != nil && !fileutil.IsEOF(err) {
|
||||
f.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
err = nil
|
||||
}
|
||||
f.m[pgI] = pg
|
||||
}
|
||||
f.Unlock()
|
||||
nc := copy(b[:mathutil.Min(rem, bfSize)], pg.data[pgO:])
|
||||
pgI++
|
||||
pgO = 0
|
||||
rem -= nc
|
||||
n += nc
|
||||
b = b[nc:]
|
||||
off += int64(nc)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (f *bitFiler) Truncate(size int64) (err error) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
switch {
|
||||
case size < 0:
|
||||
return &ErrINVAL{"Truncate size", size}
|
||||
case size == 0:
|
||||
f.m = bitFilerMap{}
|
||||
f.size = 0
|
||||
return
|
||||
}
|
||||
|
||||
first := size >> bfBits
|
||||
if size&bfMask != 0 {
|
||||
first++
|
||||
}
|
||||
last := f.size >> bfBits
|
||||
if f.size&bfMask != 0 {
|
||||
last++
|
||||
}
|
||||
for ; first < last; first++ {
|
||||
delete(f.m, first)
|
||||
}
|
||||
|
||||
f.size = size
|
||||
return
|
||||
}
|
||||
|
||||
func (f *bitFiler) WriteAt(b []byte, off int64) (n int, err error) {
|
||||
off0 := off
|
||||
pgI := off >> bfBits
|
||||
pgO := int(off & bfMask)
|
||||
n = len(b)
|
||||
rem := n
|
||||
var nc int
|
||||
for rem != 0 {
|
||||
f.Lock()
|
||||
pg := f.m[pgI]
|
||||
if pg == nil {
|
||||
pg = &bitPage{}
|
||||
if f.parent != nil {
|
||||
_, err = f.parent.ReadAt(pg.data[:], off&^bfMask)
|
||||
if err != nil && !fileutil.IsEOF(err) {
|
||||
f.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
err = nil
|
||||
}
|
||||
f.m[pgI] = pg
|
||||
}
|
||||
f.Unlock()
|
||||
nc = copy(pg.data[pgO:], b)
|
||||
pgI++
|
||||
pg.dirty = true
|
||||
for i := pgO; i < pgO+nc; i++ {
|
||||
pg.flags[i>>3] |= bitmask[i&7]
|
||||
}
|
||||
pgO = 0
|
||||
rem -= nc
|
||||
b = b[nc:]
|
||||
off += int64(nc)
|
||||
}
|
||||
f.size = mathutil.MaxInt64(f.size, off0+int64(n))
|
||||
return
|
||||
}
|
||||
|
||||
func (f *bitFiler) link() {
|
||||
for pgI, pg := range f.m {
|
||||
nx, ok := f.m[pgI+1]
|
||||
if !ok || !nx.dirty {
|
||||
continue
|
||||
}
|
||||
|
||||
nx.prev, pg.next = pg, nx
|
||||
}
|
||||
}
|
||||
|
||||
func (f *bitFiler) dumpDirty(w io.WriterAt) (nwr int, err error) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.link()
|
||||
for pgI, pg := range f.m {
|
||||
if !pg.dirty {
|
||||
continue
|
||||
}
|
||||
|
||||
for pg.prev != nil && pg.prev.dirty {
|
||||
pg = pg.prev
|
||||
pgI--
|
||||
}
|
||||
|
||||
for pg != nil && pg.dirty {
|
||||
last := false
|
||||
var off int64
|
||||
first := -1
|
||||
for i := 0; i < bfSize; i++ {
|
||||
flag := pg.flags[i>>3]&bitmask[i&7] != 0
|
||||
switch {
|
||||
case flag && !last: // Leading edge detected
|
||||
off = pgI<<bfBits + int64(i)
|
||||
first = i
|
||||
case !flag && last: // Trailing edge detected
|
||||
n, err := w.WriteAt(pg.data[first:i], off)
|
||||
if n != i-first {
|
||||
return 0, err
|
||||
}
|
||||
first = -1
|
||||
nwr++
|
||||
}
|
||||
|
||||
last = flag
|
||||
}
|
||||
if first >= 0 {
|
||||
i := bfSize
|
||||
n, err := w.WriteAt(pg.data[first:i], off)
|
||||
if n != i-first {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
nwr++
|
||||
}
|
||||
|
||||
pg.dirty = false
|
||||
pg = pg.next
|
||||
pgI++
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// RollbackFiler is a Filer implementing structural transaction handling.
|
||||
// Structural transactions should be small and short lived because all non
|
||||
// committed data are held in memory until committed or discarded by a
|
||||
// Rollback.
|
||||
//
|
||||
// While using RollbackFiler, every intended update of the wrapped Filler, by
|
||||
// WriteAt, Truncate or PunchHole, _must_ be made within a transaction.
|
||||
// Attempts to do it outside of a transaction will return ErrPERM. OTOH,
|
||||
// invoking ReadAt outside of a transaction is not a problem.
|
||||
//
|
||||
// No nested transactions: All updates within a transaction are held in memory.
|
||||
// On a matching EndUpdate the updates held in memory are actually written to
|
||||
// the wrapped Filer.
|
||||
//
|
||||
// Nested transactions: Correct data will be seen from RollbackFiler when any
|
||||
// level of a nested transaction is rollbacked. The actual writing to the
|
||||
// wrapped Filer happens only when the outer most transaction nesting level is
|
||||
// closed.
|
||||
//
|
||||
// Invoking Rollback is an alternative to EndUpdate. It discards all changes
|
||||
// made at the current transaction level and returns the "state" (possibly not
|
||||
// yet persisted) of the Filer to what it was before the corresponding
|
||||
// BeginUpdate.
|
||||
//
|
||||
// During an open transaction, all reads (using ReadAt) are "dirty" reads,
|
||||
// seeing the uncommitted changes made to the Filer's data.
|
||||
//
|
||||
// Lldb databases should be based upon a RollbackFiler.
|
||||
//
|
||||
// With a wrapped MemFiler one gets transactional memory. With, for example a
|
||||
// wrapped disk based SimpleFileFiler it protects against at least some HW
|
||||
// errors - if Rollback is properly invoked on such failures and/or if there's
|
||||
// some WAL or 2PC or whatever other safe mechanism based recovery procedure
|
||||
// used by the client.
|
||||
//
|
||||
// The "real" writes to the wrapped Filer (or WAL instead) go through the
|
||||
// writerAt supplied to NewRollbackFiler.
|
||||
//
|
||||
// List of functions/methods which are recommended to be wrapped in a
|
||||
// BeginUpdate/EndUpdate structural transaction:
|
||||
//
|
||||
// Allocator.Alloc
|
||||
// Allocator.Free
|
||||
// Allocator.Realloc
|
||||
//
|
||||
// CreateBTree
|
||||
// RemoveBTree
|
||||
// BTree.Clear
|
||||
// BTree.Delete
|
||||
// BTree.DeleteAny
|
||||
// BTree.Clear
|
||||
// BTree.Extract
|
||||
// BTree.Get (it can mutate the DB)
|
||||
// BTree.Put
|
||||
// BTree.Set
|
||||
//
|
||||
// NOTE: RollbackFiler is a generic solution intended to wrap Filers provided
|
||||
// by this package which do not implement any of the transactional methods.
|
||||
// RollbackFiler thus _does not_ invoke any of the transactional methods of its
|
||||
// wrapped Filer.
|
||||
//
|
||||
// RollbackFiler is safe for concurrent use by multiple goroutines.
|
||||
type RollbackFiler struct {
|
||||
mu sync.RWMutex
|
||||
inCallback bool
|
||||
inCallbackMu sync.RWMutex
|
||||
bitFiler *bitFiler
|
||||
checkpoint func(int64) error
|
||||
closed bool
|
||||
f Filer
|
||||
parent Filer
|
||||
tlevel int // transaction nesting level, 0 == not in transaction
|
||||
writerAt io.WriterAt
|
||||
|
||||
// afterRollback, if not nil, is called after performing Rollback
|
||||
// without errros.
|
||||
afterRollback func() error
|
||||
}
|
||||
|
||||
// NewRollbackFiler returns a RollbackFiler wrapping f.
|
||||
//
|
||||
// The checkpoint parameter
|
||||
//
|
||||
// The checkpoint function is called after closing (by EndUpdate) the upper
|
||||
// most level open transaction if all calls of writerAt were successful and the
|
||||
// DB (or eg. a WAL) is thus now in a consistent state (virtually, in the ideal
|
||||
// world with no write caches, no HW failures, no process crashes, ...).
|
||||
//
|
||||
// NOTE: In, for example, a 2PC it is necessary to reflect also the sz
|
||||
// parameter as the new file size (as in the parameter to Truncate). All
|
||||
// changes were successfully written already by writerAt before invoking
|
||||
// checkpoint.
|
||||
//
|
||||
// The writerAt parameter
|
||||
//
|
||||
// The writerAt interface is used to commit the updates of the wrapped Filer.
|
||||
// If any invocation of writerAt fails then a non nil error will be returned
|
||||
// from EndUpdate and checkpoint will _not_ ne called. Neither is necessary to
|
||||
// call Rollback. The rule of thumb: The [structural] transaction [level] is
|
||||
// closed by invoking exactly once one of EndUpdate _or_ Rollback.
|
||||
//
|
||||
// It is presumed that writerAt uses WAL or 2PC or whatever other safe
|
||||
// mechanism to physically commit the updates.
|
||||
//
|
||||
// Updates performed by invocations of writerAt are byte-precise, but not
|
||||
// necessarily maximum possible length precise. IOW, for example an update
|
||||
// crossing page boundaries may be performed by more than one writerAt
|
||||
// invocation. No offset sorting is performed. This may change if it proves
|
||||
// to be a problem. Such change would be considered backward compatible.
|
||||
//
|
||||
// NOTE: Using RollbackFiler, but failing to ever invoke a matching "closing"
|
||||
// EndUpdate after an "opening" BeginUpdate means neither writerAt or
|
||||
// checkpoint will ever get called - with all the possible data loss
|
||||
// consequences.
|
||||
func NewRollbackFiler(f Filer, checkpoint func(sz int64) error, writerAt io.WriterAt) (r *RollbackFiler, err error) {
|
||||
if f == nil || checkpoint == nil || writerAt == nil {
|
||||
return nil, &ErrINVAL{Src: "lldb.NewRollbackFiler, nil argument"}
|
||||
}
|
||||
|
||||
return &RollbackFiler{
|
||||
checkpoint: checkpoint,
|
||||
f: f,
|
||||
writerAt: writerAt,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Implements Filer.
|
||||
func (r *RollbackFiler) BeginUpdate() (err error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
parent := r.f
|
||||
if r.tlevel != 0 {
|
||||
parent = r.bitFiler
|
||||
}
|
||||
r.bitFiler, err = newBitFiler(parent)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
r.tlevel++
|
||||
return
|
||||
}
|
||||
|
||||
// Implements Filer.
|
||||
//
|
||||
// Close will return an error if not invoked at nesting level 0. However, to
|
||||
// allow emergency closing from eg. a signal handler; if Close is invoked
|
||||
// within an open transaction(s), it rollbacks any non committed open
|
||||
// transactions and performs the Close operation.
|
||||
//
|
||||
// IOW: Regardless of the transaction nesting level the Close is always
|
||||
// performed but any uncommitted transaction data are lost.
|
||||
func (r *RollbackFiler) Close() (err error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
if r.closed {
|
||||
return &ErrPERM{r.f.Name() + ": Already closed"}
|
||||
}
|
||||
|
||||
r.closed = true
|
||||
if err = r.f.Close(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if r.tlevel != 0 {
|
||||
err = &ErrPERM{r.f.Name() + ": Close inside an open transaction"}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Implements Filer.
|
||||
func (r *RollbackFiler) EndUpdate() (err error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
if r.tlevel == 0 {
|
||||
return &ErrPERM{r.f.Name() + " : EndUpdate outside of a transaction"}
|
||||
}
|
||||
|
||||
sz, err := r.size() // Cannot call .Size() -> deadlock
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
r.tlevel--
|
||||
bf := r.bitFiler
|
||||
parent := bf.parent
|
||||
w := r.writerAt
|
||||
if r.tlevel != 0 {
|
||||
w = parent
|
||||
}
|
||||
nwr, err := bf.dumpDirty(w)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
switch {
|
||||
case r.tlevel == 0:
|
||||
r.bitFiler = nil
|
||||
if nwr == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
return r.checkpoint(sz)
|
||||
default:
|
||||
r.bitFiler = parent.(*bitFiler)
|
||||
sz, _ := bf.Size() // bitFiler.Size() never returns err != nil
|
||||
return parent.Truncate(sz)
|
||||
}
|
||||
}
|
||||
|
||||
// Implements Filer.
|
||||
func (r *RollbackFiler) Name() string {
|
||||
r.mu.RLock()
|
||||
defer r.mu.RUnlock()
|
||||
|
||||
return r.f.Name()
|
||||
}
|
||||
|
||||
// Implements Filer.
|
||||
func (r *RollbackFiler) PunchHole(off, size int64) error {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
if r.tlevel == 0 {
|
||||
return &ErrPERM{r.f.Name() + ": PunchHole outside of a transaction"}
|
||||
}
|
||||
|
||||
if off < 0 {
|
||||
return &ErrINVAL{r.f.Name() + ": PunchHole off", off}
|
||||
}
|
||||
|
||||
if size < 0 || off+size > r.bitFiler.size {
|
||||
return &ErrINVAL{r.f.Name() + ": PunchHole size", size}
|
||||
}
|
||||
|
||||
return r.bitFiler.PunchHole(off, size)
|
||||
}
|
||||
|
||||
// Implements Filer.
|
||||
func (r *RollbackFiler) ReadAt(b []byte, off int64) (n int, err error) {
|
||||
r.inCallbackMu.RLock()
|
||||
defer r.inCallbackMu.RUnlock()
|
||||
if !r.inCallback {
|
||||
r.mu.RLock()
|
||||
defer r.mu.RUnlock()
|
||||
}
|
||||
if r.tlevel == 0 {
|
||||
return r.f.ReadAt(b, off)
|
||||
}
|
||||
|
||||
return r.bitFiler.ReadAt(b, off)
|
||||
}
|
||||
|
||||
// Implements Filer.
|
||||
func (r *RollbackFiler) Rollback() (err error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
if r.tlevel == 0 {
|
||||
return &ErrPERM{r.f.Name() + ": Rollback outside of a transaction"}
|
||||
}
|
||||
|
||||
if r.tlevel > 1 {
|
||||
r.bitFiler = r.bitFiler.parent.(*bitFiler)
|
||||
}
|
||||
r.tlevel--
|
||||
if f := r.afterRollback; f != nil {
|
||||
r.inCallbackMu.Lock()
|
||||
r.inCallback = true
|
||||
r.inCallbackMu.Unlock()
|
||||
defer func() {
|
||||
r.inCallbackMu.Lock()
|
||||
r.inCallback = false
|
||||
r.inCallbackMu.Unlock()
|
||||
}()
|
||||
return f()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (r *RollbackFiler) size() (sz int64, err error) {
|
||||
if r.tlevel == 0 {
|
||||
return r.f.Size()
|
||||
}
|
||||
|
||||
return r.bitFiler.Size()
|
||||
}
|
||||
|
||||
// Implements Filer.
|
||||
func (r *RollbackFiler) Size() (sz int64, err error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
return r.size()
|
||||
}
|
||||
|
||||
// Implements Filer.
|
||||
func (r *RollbackFiler) Sync() error {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
return r.f.Sync()
|
||||
}
|
||||
|
||||
// Implements Filer.
|
||||
func (r *RollbackFiler) Truncate(size int64) error {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
if r.tlevel == 0 {
|
||||
return &ErrPERM{r.f.Name() + ": Truncate outside of a transaction"}
|
||||
}
|
||||
|
||||
return r.bitFiler.Truncate(size)
|
||||
}
|
||||
|
||||
// Implements Filer.
|
||||
func (r *RollbackFiler) WriteAt(b []byte, off int64) (n int, err error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
if r.tlevel == 0 {
|
||||
return 0, &ErrPERM{r.f.Name() + ": WriteAt outside of a transaction"}
|
||||
}
|
||||
|
||||
return r.bitFiler.WriteAt(b, off)
|
||||
}
|
27
vendor/github.com/cznic/sortutil/LICENSE
generated
vendored
Normal file
27
vendor/github.com/cznic/sortutil/LICENSE
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
Copyright (c) 2014 The sortutil Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the names of the authors nor the names of the
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
271
vendor/github.com/cznic/sortutil/sortutil.go
generated
vendored
Normal file
271
vendor/github.com/cznic/sortutil/sortutil.go
generated
vendored
Normal file
@ -0,0 +1,271 @@
|
||||
// Copyright 2014 The sortutil Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package sortutil provides utilities supplementing the standard 'sort' package.
|
||||
//
|
||||
// Changelog
|
||||
//
|
||||
// 2015-06-17: Added utils for math/big.{Int,Rat}.
|
||||
package sortutil
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
)
|
||||
|
||||
import "sort"
|
||||
|
||||
// BigIntSlice attaches the methods of sort.Interface to []*big.Int, sorting in increasing order.
|
||||
type BigIntSlice []*big.Int
|
||||
|
||||
func (s BigIntSlice) Len() int { return len(s) }
|
||||
func (s BigIntSlice) Less(i, j int) bool { return s[i].Cmp(s[j]) < 0 }
|
||||
func (s BigIntSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
// Sort is a convenience method.
|
||||
func (s BigIntSlice) Sort() {
|
||||
sort.Sort(s)
|
||||
}
|
||||
|
||||
// SearchBigInts searches for x in a sorted slice of *big.Int and returns the index
|
||||
// as specified by sort.Search. The slice must be sorted in ascending order.
|
||||
func SearchBigInts(a []*big.Int, x *big.Int) int {
|
||||
return sort.Search(len(a), func(i int) bool { return a[i].Cmp(x) >= 0 })
|
||||
}
|
||||
|
||||
// BigRatSlice attaches the methods of sort.Interface to []*big.Rat, sorting in increasing order.
|
||||
type BigRatSlice []*big.Rat
|
||||
|
||||
func (s BigRatSlice) Len() int { return len(s) }
|
||||
func (s BigRatSlice) Less(i, j int) bool { return s[i].Cmp(s[j]) < 0 }
|
||||
func (s BigRatSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
// Sort is a convenience method.
|
||||
func (s BigRatSlice) Sort() {
|
||||
sort.Sort(s)
|
||||
}
|
||||
|
||||
// SearchBigRats searches for x in a sorted slice of *big.Int and returns the index
|
||||
// as specified by sort.Search. The slice must be sorted in ascending order.
|
||||
func SearchBigRats(a []*big.Rat, x *big.Rat) int {
|
||||
return sort.Search(len(a), func(i int) bool { return a[i].Cmp(x) >= 0 })
|
||||
}
|
||||
|
||||
// ByteSlice attaches the methods of sort.Interface to []byte, sorting in increasing order.
|
||||
type ByteSlice []byte
|
||||
|
||||
func (s ByteSlice) Len() int { return len(s) }
|
||||
func (s ByteSlice) Less(i, j int) bool { return s[i] < s[j] }
|
||||
func (s ByteSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
// Sort is a convenience method.
|
||||
func (s ByteSlice) Sort() {
|
||||
sort.Sort(s)
|
||||
}
|
||||
|
||||
// SearchBytes searches for x in a sorted slice of bytes and returns the index
|
||||
// as specified by sort.Search. The slice must be sorted in ascending order.
|
||||
func SearchBytes(a []byte, x byte) int {
|
||||
return sort.Search(len(a), func(i int) bool { return a[i] >= x })
|
||||
}
|
||||
|
||||
// Float32Slice attaches the methods of sort.Interface to []float32, sorting in increasing order.
|
||||
type Float32Slice []float32
|
||||
|
||||
func (s Float32Slice) Len() int { return len(s) }
|
||||
func (s Float32Slice) Less(i, j int) bool { return s[i] < s[j] }
|
||||
func (s Float32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
// Sort is a convenience method.
|
||||
func (s Float32Slice) Sort() {
|
||||
sort.Sort(s)
|
||||
}
|
||||
|
||||
// SearchFloat32s searches for x in a sorted slice of float32 and returns the index
|
||||
// as specified by sort.Search. The slice must be sorted in ascending order.
|
||||
func SearchFloat32s(a []float32, x float32) int {
|
||||
return sort.Search(len(a), func(i int) bool { return a[i] >= x })
|
||||
}
|
||||
|
||||
// Int8Slice attaches the methods of sort.Interface to []int8, sorting in increasing order.
|
||||
type Int8Slice []int8
|
||||
|
||||
func (s Int8Slice) Len() int { return len(s) }
|
||||
func (s Int8Slice) Less(i, j int) bool { return s[i] < s[j] }
|
||||
func (s Int8Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
// Sort is a convenience method.
|
||||
func (s Int8Slice) Sort() {
|
||||
sort.Sort(s)
|
||||
}
|
||||
|
||||
// SearchInt8s searches for x in a sorted slice of int8 and returns the index
|
||||
// as specified by sort.Search. The slice must be sorted in ascending order.
|
||||
func SearchInt8s(a []int8, x int8) int {
|
||||
return sort.Search(len(a), func(i int) bool { return a[i] >= x })
|
||||
}
|
||||
|
||||
// Int16Slice attaches the methods of sort.Interface to []int16, sorting in increasing order.
|
||||
type Int16Slice []int16
|
||||
|
||||
func (s Int16Slice) Len() int { return len(s) }
|
||||
func (s Int16Slice) Less(i, j int) bool { return s[i] < s[j] }
|
||||
func (s Int16Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
// Sort is a convenience method.
|
||||
func (s Int16Slice) Sort() {
|
||||
sort.Sort(s)
|
||||
}
|
||||
|
||||
// SearchInt16s searches for x in a sorted slice of int16 and returns the index
|
||||
// as specified by sort.Search. The slice must be sorted in ascending order.
|
||||
func SearchInt16s(a []int16, x int16) int {
|
||||
return sort.Search(len(a), func(i int) bool { return a[i] >= x })
|
||||
}
|
||||
|
||||
// Int32Slice attaches the methods of sort.Interface to []int32, sorting in increasing order.
|
||||
type Int32Slice []int32
|
||||
|
||||
func (s Int32Slice) Len() int { return len(s) }
|
||||
func (s Int32Slice) Less(i, j int) bool { return s[i] < s[j] }
|
||||
func (s Int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
// Sort is a convenience method.
|
||||
func (s Int32Slice) Sort() {
|
||||
sort.Sort(s)
|
||||
}
|
||||
|
||||
// SearchInt32s searches for x in a sorted slice of int32 and returns the index
|
||||
// as specified by sort.Search. The slice must be sorted in ascending order.
|
||||
func SearchInt32s(a []int32, x int32) int {
|
||||
return sort.Search(len(a), func(i int) bool { return a[i] >= x })
|
||||
}
|
||||
|
||||
// Int64Slice attaches the methods of sort.Interface to []int64, sorting in increasing order.
|
||||
type Int64Slice []int64
|
||||
|
||||
func (s Int64Slice) Len() int { return len(s) }
|
||||
func (s Int64Slice) Less(i, j int) bool { return s[i] < s[j] }
|
||||
func (s Int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
// Sort is a convenience method.
|
||||
func (s Int64Slice) Sort() {
|
||||
sort.Sort(s)
|
||||
}
|
||||
|
||||
// SearchInt64s searches for x in a sorted slice of int64 and returns the index
|
||||
// as specified by sort.Search. The slice must be sorted in ascending order.
|
||||
func SearchInt64s(a []int64, x int64) int {
|
||||
return sort.Search(len(a), func(i int) bool { return a[i] >= x })
|
||||
}
|
||||
|
||||
// UintSlice attaches the methods of sort.Interface to []uint, sorting in increasing order.
|
||||
type UintSlice []uint
|
||||
|
||||
func (s UintSlice) Len() int { return len(s) }
|
||||
func (s UintSlice) Less(i, j int) bool { return s[i] < s[j] }
|
||||
func (s UintSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
// Sort is a convenience method.
|
||||
func (s UintSlice) Sort() {
|
||||
sort.Sort(s)
|
||||
}
|
||||
|
||||
// SearchUints searches for x in a sorted slice of uints and returns the index
|
||||
// as specified by sort.Search. The slice must be sorted in ascending order.
|
||||
func SearchUints(a []uint, x uint) int {
|
||||
return sort.Search(len(a), func(i int) bool { return a[i] >= x })
|
||||
}
|
||||
|
||||
// Uint16Slice attaches the methods of sort.Interface to []uint16, sorting in increasing order.
|
||||
type Uint16Slice []uint16
|
||||
|
||||
func (s Uint16Slice) Len() int { return len(s) }
|
||||
func (s Uint16Slice) Less(i, j int) bool { return s[i] < s[j] }
|
||||
func (s Uint16Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
// Sort is a convenience method.
|
||||
func (s Uint16Slice) Sort() {
|
||||
sort.Sort(s)
|
||||
}
|
||||
|
||||
// SearchUint16s searches for x in a sorted slice of uint16 and returns the index
|
||||
// as specified by sort.Search. The slice must be sorted in ascending order.
|
||||
func SearchUint16s(a []uint16, x uint16) int {
|
||||
return sort.Search(len(a), func(i int) bool { return a[i] >= x })
|
||||
}
|
||||
|
||||
// Uint32Slice attaches the methods of sort.Interface to []uint32, sorting in increasing order.
|
||||
type Uint32Slice []uint32
|
||||
|
||||
func (s Uint32Slice) Len() int { return len(s) }
|
||||
func (s Uint32Slice) Less(i, j int) bool { return s[i] < s[j] }
|
||||
func (s Uint32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
// Sort is a convenience method.
|
||||
func (s Uint32Slice) Sort() {
|
||||
sort.Sort(s)
|
||||
}
|
||||
|
||||
// SearchUint32s searches for x in a sorted slice of uint32 and returns the index
|
||||
// as specified by sort.Search. The slice must be sorted in ascending order.
|
||||
func SearchUint32s(a []uint32, x uint32) int {
|
||||
return sort.Search(len(a), func(i int) bool { return a[i] >= x })
|
||||
}
|
||||
|
||||
// Uint64Slice attaches the methods of sort.Interface to []uint64, sorting in increasing order.
|
||||
type Uint64Slice []uint64
|
||||
|
||||
func (s Uint64Slice) Len() int { return len(s) }
|
||||
func (s Uint64Slice) Less(i, j int) bool { return s[i] < s[j] }
|
||||
func (s Uint64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
// Sort is a convenience method.
|
||||
func (s Uint64Slice) Sort() {
|
||||
sort.Sort(s)
|
||||
}
|
||||
|
||||
// SearchUint64s searches for x in a sorted slice of uint64 and returns the index
|
||||
// as specified by sort.Search. The slice must be sorted in ascending order.
|
||||
func SearchUint64s(a []uint64, x uint64) int {
|
||||
return sort.Search(len(a), func(i int) bool { return a[i] >= x })
|
||||
}
|
||||
|
||||
// RuneSlice attaches the methods of sort.Interface to []rune, sorting in increasing order.
|
||||
type RuneSlice []rune
|
||||
|
||||
func (s RuneSlice) Len() int { return len(s) }
|
||||
func (s RuneSlice) Less(i, j int) bool { return s[i] < s[j] }
|
||||
func (s RuneSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
// Sort is a convenience method.
|
||||
func (s RuneSlice) Sort() {
|
||||
sort.Sort(s)
|
||||
}
|
||||
|
||||
// SearchRunes searches for x in a sorted slice of uint64 and returns the index
|
||||
// as specified by sort.Search. The slice must be sorted in ascending order.
|
||||
func SearchRunes(a []rune, x rune) int {
|
||||
return sort.Search(len(a), func(i int) bool { return a[i] >= x })
|
||||
}
|
||||
|
||||
// Dedupe returns n, the number of distinct elements in data. The resulting
|
||||
// elements are sorted in elements [0, n) or data[:n] for a slice.
|
||||
func Dedupe(data sort.Interface) (n int) {
|
||||
if n = data.Len(); n < 2 {
|
||||
return n
|
||||
}
|
||||
|
||||
sort.Sort(data)
|
||||
a, b := 0, 1
|
||||
for b < n {
|
||||
if data.Less(a, b) {
|
||||
a++
|
||||
if a != b {
|
||||
data.Swap(a, b)
|
||||
}
|
||||
}
|
||||
b++
|
||||
}
|
||||
return a + 1
|
||||
}
|
27
vendor/github.com/cznic/strutil/LICENSE
generated
vendored
Normal file
27
vendor/github.com/cznic/strutil/LICENSE
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
Copyright (c) 2014 The strutil Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the names of the authors nor the names of the
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
645
vendor/github.com/cznic/strutil/strutil.go
generated
vendored
Normal file
645
vendor/github.com/cznic/strutil/strutil.go
generated
vendored
Normal file
@ -0,0 +1,645 @@
|
||||
// Copyright (c) 2014 The sortutil Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package strutil collects utils supplemental to the standard strings package.
|
||||
package strutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base32"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Base32ExtDecode decodes base32 extended (RFC 4648) text to binary data.
|
||||
func Base32ExtDecode(text []byte) (data []byte, err error) {
|
||||
n := base32.HexEncoding.DecodedLen(len(text))
|
||||
data = make([]byte, n)
|
||||
decoder := base32.NewDecoder(base32.HexEncoding, bytes.NewBuffer(text))
|
||||
if n, err = decoder.Read(data); err != nil {
|
||||
n = 0
|
||||
}
|
||||
data = data[:n]
|
||||
return
|
||||
}
|
||||
|
||||
// Base32ExtEncode encodes binary data to base32 extended (RFC 4648) encoded text.
|
||||
func Base32ExtEncode(data []byte) (text []byte) {
|
||||
n := base32.HexEncoding.EncodedLen(len(data))
|
||||
buf := bytes.NewBuffer(make([]byte, 0, n))
|
||||
encoder := base32.NewEncoder(base32.HexEncoding, buf)
|
||||
encoder.Write(data)
|
||||
encoder.Close()
|
||||
if buf.Len() != n {
|
||||
panic("internal error")
|
||||
}
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
// Base64Decode decodes base64 text to binary data.
|
||||
func Base64Decode(text []byte) (data []byte, err error) {
|
||||
n := base64.StdEncoding.DecodedLen(len(text))
|
||||
data = make([]byte, n)
|
||||
decoder := base64.NewDecoder(base64.StdEncoding, bytes.NewBuffer(text))
|
||||
if n, err = decoder.Read(data); err != nil {
|
||||
n = 0
|
||||
}
|
||||
data = data[:n]
|
||||
return
|
||||
}
|
||||
|
||||
// Base64Encode encodes binary data to base64 encoded text.
|
||||
func Base64Encode(data []byte) (text []byte) {
|
||||
n := base64.StdEncoding.EncodedLen(len(data))
|
||||
buf := bytes.NewBuffer(make([]byte, 0, n))
|
||||
encoder := base64.NewEncoder(base64.StdEncoding, buf)
|
||||
encoder.Write(data)
|
||||
encoder.Close()
|
||||
if buf.Len() != n {
|
||||
panic("internal error")
|
||||
}
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
// Formatter is an io.Writer extended by a fmt.Printf like function Format
|
||||
type Formatter interface {
|
||||
io.Writer
|
||||
Format(format string, args ...interface{}) (n int, errno error)
|
||||
}
|
||||
|
||||
type indentFormatter struct {
|
||||
io.Writer
|
||||
indent []byte
|
||||
indentLevel int
|
||||
state int
|
||||
}
|
||||
|
||||
const (
|
||||
st0 = iota
|
||||
stBOL
|
||||
stPERC
|
||||
stBOLPERC
|
||||
)
|
||||
|
||||
// IndentFormatter returns a new Formatter which interprets %i and %u in the
|
||||
// Format() format string as indent and undent commands. The commands can
|
||||
// nest. The Formatter writes to io.Writer 'w' and inserts one 'indent'
|
||||
// string per current indent level value.
|
||||
// Behaviour of commands reaching negative indent levels is undefined.
|
||||
// IndentFormatter(os.Stdout, "\t").Format("abc%d%%e%i\nx\ny\n%uz\n", 3)
|
||||
// output:
|
||||
// abc3%e
|
||||
// x
|
||||
// y
|
||||
// z
|
||||
// The Go quoted string literal form of the above is:
|
||||
// "abc%%e\n\tx\n\tx\nz\n"
|
||||
// The commands can be scattered between separate invocations of Format(),
|
||||
// i.e. the formatter keeps track of the indent level and knows if it is
|
||||
// positioned on start of a line and should emit indentation(s).
|
||||
// The same output as above can be produced by e.g.:
|
||||
// f := IndentFormatter(os.Stdout, " ")
|
||||
// f.Format("abc%d%%e%i\nx\n", 3)
|
||||
// f.Format("y\n%uz\n")
|
||||
func IndentFormatter(w io.Writer, indent string) Formatter {
|
||||
return &indentFormatter{w, []byte(indent), 0, stBOL}
|
||||
}
|
||||
|
||||
func (f *indentFormatter) format(flat bool, format string, args ...interface{}) (n int, errno error) {
|
||||
buf := []byte{}
|
||||
for i := 0; i < len(format); i++ {
|
||||
c := format[i]
|
||||
switch f.state {
|
||||
case st0:
|
||||
switch c {
|
||||
case '\n':
|
||||
cc := c
|
||||
if flat && f.indentLevel != 0 {
|
||||
cc = ' '
|
||||
}
|
||||
buf = append(buf, cc)
|
||||
f.state = stBOL
|
||||
case '%':
|
||||
f.state = stPERC
|
||||
default:
|
||||
buf = append(buf, c)
|
||||
}
|
||||
case stBOL:
|
||||
switch c {
|
||||
case '\n':
|
||||
cc := c
|
||||
if flat && f.indentLevel != 0 {
|
||||
cc = ' '
|
||||
}
|
||||
buf = append(buf, cc)
|
||||
case '%':
|
||||
f.state = stBOLPERC
|
||||
default:
|
||||
if !flat {
|
||||
for i := 0; i < f.indentLevel; i++ {
|
||||
buf = append(buf, f.indent...)
|
||||
}
|
||||
}
|
||||
buf = append(buf, c)
|
||||
f.state = st0
|
||||
}
|
||||
case stBOLPERC:
|
||||
switch c {
|
||||
case 'i':
|
||||
f.indentLevel++
|
||||
f.state = stBOL
|
||||
case 'u':
|
||||
f.indentLevel--
|
||||
f.state = stBOL
|
||||
default:
|
||||
if !flat {
|
||||
for i := 0; i < f.indentLevel; i++ {
|
||||
buf = append(buf, f.indent...)
|
||||
}
|
||||
}
|
||||
buf = append(buf, '%', c)
|
||||
f.state = st0
|
||||
}
|
||||
case stPERC:
|
||||
switch c {
|
||||
case 'i':
|
||||
f.indentLevel++
|
||||
f.state = st0
|
||||
case 'u':
|
||||
f.indentLevel--
|
||||
f.state = st0
|
||||
default:
|
||||
buf = append(buf, '%', c)
|
||||
f.state = st0
|
||||
}
|
||||
default:
|
||||
panic("unexpected state")
|
||||
}
|
||||
}
|
||||
switch f.state {
|
||||
case stPERC, stBOLPERC:
|
||||
buf = append(buf, '%')
|
||||
}
|
||||
return f.Write([]byte(fmt.Sprintf(string(buf), args...)))
|
||||
}
|
||||
|
||||
func (f *indentFormatter) Format(format string, args ...interface{}) (n int, errno error) {
|
||||
return f.format(false, format, args...)
|
||||
}
|
||||
|
||||
type flatFormatter indentFormatter
|
||||
|
||||
// FlatFormatter returns a newly created Formatter with the same functionality as the one returned
|
||||
// by IndentFormatter except it allows a newline in the 'format' string argument of Format
|
||||
// to pass through iff indent level is currently zero.
|
||||
//
|
||||
// If indent level is non-zero then such new lines are changed to a space character.
|
||||
// There is no indent string, the %i and %u format verbs are used solely to determine the indent level.
|
||||
//
|
||||
// The FlatFormatter is intended for flattening of normally nested structure textual representation to
|
||||
// a one top level structure per line form.
|
||||
// FlatFormatter(os.Stdout, " ").Format("abc%d%%e%i\nx\ny\n%uz\n", 3)
|
||||
// output in the form of a Go quoted string literal:
|
||||
// "abc3%%e x y z\n"
|
||||
func FlatFormatter(w io.Writer) Formatter {
|
||||
return (*flatFormatter)(IndentFormatter(w, "").(*indentFormatter))
|
||||
}
|
||||
|
||||
func (f *flatFormatter) Format(format string, args ...interface{}) (n int, errno error) {
|
||||
return (*indentFormatter)(f).format(true, format, args...)
|
||||
}
|
||||
|
||||
// Pool handles aligning of strings having equal values to the same string instance.
|
||||
// Intended use is to conserve some memory e.g. where a large number of identically valued strings
|
||||
// with non identical backing arrays may exists in several semantically distinct instances of some structs.
|
||||
// Pool is *not* concurrent access safe. It doesn't handle common prefix/suffix aligning,
|
||||
// e.g. having s1 == "abc" and s2 == "bc", s2 is not automatically aligned as s1[1:].
|
||||
type Pool struct {
|
||||
pool map[string]string
|
||||
}
|
||||
|
||||
// NewPool returns a newly created Pool.
|
||||
func NewPool() *Pool {
|
||||
return &Pool{map[string]string{}}
|
||||
}
|
||||
|
||||
// Align returns a string with the same value as its argument. It guarantees that
|
||||
// all aligned strings share a single instance in memory.
|
||||
func (p *Pool) Align(s string) string {
|
||||
if a, ok := p.pool[s]; ok {
|
||||
return a
|
||||
}
|
||||
|
||||
s = StrPack(s)
|
||||
p.pool[s] = s
|
||||
return s
|
||||
}
|
||||
|
||||
// Count returns the number of items in the pool.
|
||||
func (p *Pool) Count() int {
|
||||
return len(p.pool)
|
||||
}
|
||||
|
||||
// GoPool is a concurrent access safe version of Pool.
|
||||
type GoPool struct {
|
||||
pool map[string]string
|
||||
rwm *sync.RWMutex
|
||||
}
|
||||
|
||||
// NewGoPool returns a newly created GoPool.
|
||||
func NewGoPool() (p *GoPool) {
|
||||
return &GoPool{map[string]string{}, &sync.RWMutex{}}
|
||||
}
|
||||
|
||||
// Align returns a string with the same value as its argument. It guarantees that
|
||||
// all aligned strings share a single instance in memory.
|
||||
func (p *GoPool) Align(s string) (y string) {
|
||||
if s != "" {
|
||||
p.rwm.RLock() // R++
|
||||
if a, ok := p.pool[s]; ok { // found
|
||||
p.rwm.RUnlock() // R--
|
||||
return a
|
||||
}
|
||||
|
||||
p.rwm.RUnlock() // R--
|
||||
// not found but with a race condition, retry within a write lock
|
||||
p.rwm.Lock() // W++
|
||||
defer p.rwm.Unlock() // W--
|
||||
if a, ok := p.pool[s]; ok { // done in a race
|
||||
return a
|
||||
}
|
||||
|
||||
// we won
|
||||
s = StrPack(s)
|
||||
p.pool[s] = s
|
||||
return s
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Count returns the number of items in the pool.
|
||||
func (p *GoPool) Count() int {
|
||||
return len(p.pool)
|
||||
}
|
||||
|
||||
// Dict is a string <-> id bijection. Dict is *not* concurrent access safe for assigning new ids
|
||||
// to strings not yet contained in the bijection.
|
||||
// Id for an empty string is guaranteed to be 0,
|
||||
// thus Id for any non empty string is guaranteed to be non zero.
|
||||
type Dict struct {
|
||||
si map[string]int
|
||||
is []string
|
||||
}
|
||||
|
||||
// NewDict returns a newly created Dict.
|
||||
func NewDict() (d *Dict) {
|
||||
d = &Dict{map[string]int{}, []string{}}
|
||||
d.Id("")
|
||||
return
|
||||
}
|
||||
|
||||
// Count returns the number of items in the dict.
|
||||
func (d *Dict) Count() int {
|
||||
return len(d.is)
|
||||
}
|
||||
|
||||
// Id maps string s to its numeric identificator.
|
||||
func (d *Dict) Id(s string) (y int) {
|
||||
if y, ok := d.si[s]; ok {
|
||||
return y
|
||||
}
|
||||
|
||||
s = StrPack(s)
|
||||
y = len(d.is)
|
||||
d.si[s] = y
|
||||
d.is = append(d.is, s)
|
||||
return
|
||||
}
|
||||
|
||||
// S maps an id to its string value and ok == true. Id values not contained in the bijection
|
||||
// return "", false.
|
||||
func (d *Dict) S(id int) (s string, ok bool) {
|
||||
if id >= len(d.is) {
|
||||
return "", false
|
||||
}
|
||||
return d.is[id], true
|
||||
}
|
||||
|
||||
// GoDict is a concurrent access safe version of Dict.
|
||||
type GoDict struct {
|
||||
si map[string]int
|
||||
is []string
|
||||
rwm *sync.RWMutex
|
||||
}
|
||||
|
||||
// NewGoDict returns a newly created GoDict.
|
||||
func NewGoDict() (d *GoDict) {
|
||||
d = &GoDict{map[string]int{}, []string{}, &sync.RWMutex{}}
|
||||
d.Id("")
|
||||
return
|
||||
}
|
||||
|
||||
// Count returns the number of items in the dict.
|
||||
func (d *GoDict) Count() int {
|
||||
return len(d.is)
|
||||
}
|
||||
|
||||
// Id maps string s to its numeric identificator. The implementation honors getting
|
||||
// an existing id at the cost of assigning a new one.
|
||||
func (d *GoDict) Id(s string) (y int) {
|
||||
d.rwm.RLock() // R++
|
||||
if y, ok := d.si[s]; ok { // found
|
||||
d.rwm.RUnlock() // R--
|
||||
return y
|
||||
}
|
||||
|
||||
d.rwm.RUnlock() // R--
|
||||
|
||||
// not found but with a race condition
|
||||
d.rwm.Lock() // W++ recheck with write lock
|
||||
defer d.rwm.Unlock() // W--
|
||||
if y, ok := d.si[s]; ok { // some other goroutine won already
|
||||
return y
|
||||
}
|
||||
|
||||
// a race free not found state => insert the string
|
||||
s = StrPack(s)
|
||||
y = len(d.is)
|
||||
d.si[s] = y
|
||||
d.is = append(d.is, s)
|
||||
return
|
||||
}
|
||||
|
||||
// S maps an id to its string value and ok == true. Id values not contained in the bijection
|
||||
// return "", false.
|
||||
func (d *GoDict) S(id int) (s string, ok bool) {
|
||||
d.rwm.RLock() // R++
|
||||
defer d.rwm.RUnlock() // R--
|
||||
if id >= len(d.is) {
|
||||
return "", false
|
||||
}
|
||||
return d.is[id], true
|
||||
}
|
||||
|
||||
// StrPack returns a new instance of s which is tightly packed in memory.
|
||||
// It is intended for avoiding the situation where having a live reference
|
||||
// to a string slice over an unreferenced biger underlying string keeps the biger one
|
||||
// in memory anyway - it can't be GCed.
|
||||
func StrPack(s string) string {
|
||||
return string([]byte(s))
|
||||
}
|
||||
|
||||
// JoinFields returns strings in flds joined by sep. Flds may contain arbitrary
|
||||
// bytes, including the sep as they are safely escaped. JoinFields panics if
|
||||
// sep is the backslash character or if len(sep) != 1.
|
||||
func JoinFields(flds []string, sep string) string {
|
||||
if len(sep) != 1 || sep == "\\" {
|
||||
panic("invalid separator")
|
||||
}
|
||||
|
||||
a := make([]string, len(flds))
|
||||
for i, v := range flds {
|
||||
v = strings.Replace(v, "\\", "\\0", -1)
|
||||
a[i] = strings.Replace(v, sep, "\\1", -1)
|
||||
}
|
||||
return strings.Join(a, sep)
|
||||
}
|
||||
|
||||
// SplitFields splits s, which must be produced by JoinFields using the same
|
||||
// sep, into flds. SplitFields panics if sep is the backslash character or if
|
||||
// len(sep) != 1.
|
||||
func SplitFields(s, sep string) (flds []string) {
|
||||
if len(sep) != 1 || sep == "\\" {
|
||||
panic("invalid separator")
|
||||
}
|
||||
|
||||
a := strings.Split(s, sep)
|
||||
r := make([]string, len(a))
|
||||
for i, v := range a {
|
||||
v = strings.Replace(v, "\\1", sep, -1)
|
||||
r[i] = strings.Replace(v, "\\0", "\\", -1)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// PrettyPrintHooks allow to customize the result of PrettyPrint for types
|
||||
// listed in the map value.
|
||||
type PrettyPrintHooks map[reflect.Type]func(f Formatter, v interface{}, prefix, suffix string)
|
||||
|
||||
// PrettyString returns the output of PrettyPrint as a string.
|
||||
func PrettyString(v interface{}, prefix, suffix string, hooks PrettyPrintHooks) string {
|
||||
var b bytes.Buffer
|
||||
PrettyPrint(&b, v, prefix, suffix, hooks)
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// PrettyPrint pretty prints v to w. Zero values and unexported struct fields
|
||||
// are omitted.
|
||||
func PrettyPrint(w io.Writer, v interface{}, prefix, suffix string, hooks PrettyPrintHooks) {
|
||||
if v == nil {
|
||||
return
|
||||
}
|
||||
|
||||
f := IndentFormatter(w, "· ")
|
||||
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
f.Format("\npanic: %v", e)
|
||||
}
|
||||
}()
|
||||
|
||||
prettyPrint(nil, f, prefix, suffix, v, hooks)
|
||||
}
|
||||
|
||||
func prettyPrint(protect map[interface{}]struct{}, sf Formatter, prefix, suffix string, v interface{}, hooks PrettyPrintHooks) {
|
||||
if v == nil {
|
||||
return
|
||||
}
|
||||
|
||||
rt := reflect.TypeOf(v)
|
||||
if handler := hooks[rt]; handler != nil {
|
||||
handler(sf, v, prefix, suffix)
|
||||
return
|
||||
}
|
||||
|
||||
rv := reflect.ValueOf(v)
|
||||
switch rt.Kind() {
|
||||
case reflect.Slice:
|
||||
if rv.Len() == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
sf.Format("%s[]%T{ // len %d%i\n", prefix, rv.Index(0).Interface(), rv.Len())
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
prettyPrint(protect, sf, fmt.Sprintf("%d: ", i), ",\n", rv.Index(i).Interface(), hooks)
|
||||
}
|
||||
suffix = strings.Replace(suffix, "%", "%%", -1)
|
||||
sf.Format("%u}" + suffix)
|
||||
case reflect.Array:
|
||||
if reflect.Zero(rt).Interface() == rv.Interface() {
|
||||
return
|
||||
}
|
||||
|
||||
sf.Format("%s[%d]%T{%i\n", prefix, rv.Len(), rv.Index(0).Interface())
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
prettyPrint(protect, sf, fmt.Sprintf("%d: ", i), ",\n", rv.Index(i).Interface(), hooks)
|
||||
}
|
||||
suffix = strings.Replace(suffix, "%", "%%", -1)
|
||||
sf.Format("%u}" + suffix)
|
||||
case reflect.Struct:
|
||||
if rt.NumField() == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if reflect.DeepEqual(reflect.Zero(rt).Interface(), rv.Interface()) {
|
||||
return
|
||||
}
|
||||
|
||||
sf.Format("%s%T{%i\n", prefix, v)
|
||||
for i := 0; i < rt.NumField(); i++ {
|
||||
f := rv.Field(i)
|
||||
if !f.CanInterface() {
|
||||
continue
|
||||
}
|
||||
|
||||
prettyPrint(protect, sf, fmt.Sprintf("%s: ", rt.Field(i).Name), ",\n", f.Interface(), hooks)
|
||||
}
|
||||
suffix = strings.Replace(suffix, "%", "%%", -1)
|
||||
sf.Format("%u}" + suffix)
|
||||
case reflect.Ptr:
|
||||
if rv.IsNil() {
|
||||
return
|
||||
}
|
||||
|
||||
rvi := rv.Interface()
|
||||
if _, ok := protect[rvi]; ok {
|
||||
suffix = strings.Replace(suffix, "%", "%%", -1)
|
||||
sf.Format("%s&%T{ /* recursive/repetitive pointee not shown */ }"+suffix, prefix, rv.Elem().Interface())
|
||||
return
|
||||
}
|
||||
|
||||
if protect == nil {
|
||||
protect = map[interface{}]struct{}{}
|
||||
}
|
||||
protect[rvi] = struct{}{}
|
||||
prettyPrint(protect, sf, prefix+"&", suffix, rv.Elem().Interface(), hooks)
|
||||
case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int8:
|
||||
if v := rv.Int(); v != 0 {
|
||||
suffix = strings.Replace(suffix, "%", "%%", -1)
|
||||
sf.Format("%s%v"+suffix, prefix, v)
|
||||
}
|
||||
case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint8:
|
||||
if v := rv.Uint(); v != 0 {
|
||||
suffix = strings.Replace(suffix, "%", "%%", -1)
|
||||
sf.Format("%s%v"+suffix, prefix, v)
|
||||
}
|
||||
case reflect.Float32, reflect.Float64:
|
||||
if v := rv.Float(); v != 0 {
|
||||
suffix = strings.Replace(suffix, "%", "%%", -1)
|
||||
sf.Format("%s%v"+suffix, prefix, v)
|
||||
}
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
if v := rv.Complex(); v != 0 {
|
||||
suffix = strings.Replace(suffix, "%", "%%", -1)
|
||||
sf.Format("%s%v"+suffix, prefix, v)
|
||||
}
|
||||
case reflect.Uintptr:
|
||||
if v := rv.Uint(); v != 0 {
|
||||
suffix = strings.Replace(suffix, "%", "%%", -1)
|
||||
sf.Format("%s%v"+suffix, prefix, v)
|
||||
}
|
||||
case reflect.UnsafePointer:
|
||||
s := fmt.Sprintf("%p", rv.Interface())
|
||||
if s == "0x0" {
|
||||
return
|
||||
}
|
||||
|
||||
suffix = strings.Replace(suffix, "%", "%%", -1)
|
||||
sf.Format("%s%s"+suffix, prefix, s)
|
||||
case reflect.Bool:
|
||||
if v := rv.Bool(); v {
|
||||
suffix = strings.Replace(suffix, "%", "%%", -1)
|
||||
sf.Format("%s%v"+suffix, prefix, rv.Bool())
|
||||
}
|
||||
case reflect.String:
|
||||
s := rv.Interface().(string)
|
||||
if s == "" {
|
||||
return
|
||||
}
|
||||
|
||||
suffix = strings.Replace(suffix, "%", "%%", -1)
|
||||
sf.Format("%s%q"+suffix, prefix, s)
|
||||
case reflect.Chan:
|
||||
if reflect.Zero(rt).Interface() == rv.Interface() {
|
||||
return
|
||||
}
|
||||
|
||||
c := rv.Cap()
|
||||
s := ""
|
||||
if c != 0 {
|
||||
s = fmt.Sprintf("// capacity: %d", c)
|
||||
}
|
||||
suffix = strings.Replace(suffix, "%", "%%", -1)
|
||||
sf.Format("%s%s %s%s"+suffix, prefix, rt.ChanDir(), rt.Elem().Name(), s)
|
||||
case reflect.Func:
|
||||
if rv.IsNil() {
|
||||
return
|
||||
}
|
||||
|
||||
var in, out []string
|
||||
for i := 0; i < rt.NumIn(); i++ {
|
||||
x := reflect.Zero(rt.In(i))
|
||||
in = append(in, fmt.Sprintf("%T", x.Interface()))
|
||||
}
|
||||
if rt.IsVariadic() {
|
||||
i := len(in) - 1
|
||||
in[i] = "..." + in[i][2:]
|
||||
}
|
||||
for i := 0; i < rt.NumOut(); i++ {
|
||||
out = append(out, rt.Out(i).Name())
|
||||
}
|
||||
s := "(" + strings.Join(in, ", ") + ")"
|
||||
t := strings.Join(out, ", ")
|
||||
if len(out) > 1 {
|
||||
t = "(" + t + ")"
|
||||
}
|
||||
if t != "" {
|
||||
t = " " + t
|
||||
}
|
||||
suffix = strings.Replace(suffix, "%", "%%", -1)
|
||||
sf.Format("%sfunc%s%s { ... }"+suffix, prefix, s, t)
|
||||
case reflect.Map:
|
||||
keys := rv.MapKeys()
|
||||
if len(keys) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
nf := IndentFormatter(&buf, "· ")
|
||||
var skeys []string
|
||||
for i, k := range keys {
|
||||
prettyPrint(protect, nf, "", "", k.Interface(), hooks)
|
||||
skeys = append(skeys, fmt.Sprintf("%s%10d", buf.Bytes(), i))
|
||||
buf.Reset()
|
||||
}
|
||||
sort.Strings(skeys)
|
||||
sf.Format("%s%T{%i\n", prefix, v)
|
||||
for _, k := range skeys {
|
||||
si := strings.TrimSpace(k[len(k)-10:])
|
||||
k = k[:len(k)-10]
|
||||
n, _ := strconv.ParseUint(si, 10, 64)
|
||||
mv := rv.MapIndex(keys[n])
|
||||
prettyPrint(protect, sf, fmt.Sprintf("%s: ", k), ",\n", mv.Interface(), hooks)
|
||||
}
|
||||
suffix = strings.Replace(suffix, "%", "%%", -1)
|
||||
sf.Format("%u}" + suffix)
|
||||
}
|
||||
}
|
27
vendor/github.com/cznic/zappy/LICENSE
generated
vendored
Normal file
27
vendor/github.com/cznic/zappy/LICENSE
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
Copyright (c) 2014 The zappy Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the names of the authors nor the names of the
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
38
vendor/github.com/cznic/zappy/decode.go
generated
vendored
Normal file
38
vendor/github.com/cznic/zappy/decode.go
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
// Copyright 2014 The zappy Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the SNAPPY-GO-LICENSE file.
|
||||
|
||||
package zappy
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
)
|
||||
|
||||
// ErrCorrupt reports that the input is invalid.
|
||||
var ErrCorrupt = errors.New("zappy: corrupt input")
|
||||
|
||||
// DecodedLen returns the length of the decoded block.
|
||||
func DecodedLen(src []byte) (int, error) {
|
||||
v, _, err := decodedLen(src)
|
||||
return v, err
|
||||
}
|
||||
|
||||
// decodedLen returns the length of the decoded block and the number of bytes
|
||||
// that the length header occupied.
|
||||
func decodedLen(src []byte) (blockLen, headerLen int, err error) {
|
||||
v, n := binary.Uvarint(src)
|
||||
if n == 0 {
|
||||
return 0, 0, ErrCorrupt
|
||||
}
|
||||
|
||||
if uint64(int(v)) != v {
|
||||
return 0, 0, errors.New("zappy: decoded block is too large")
|
||||
}
|
||||
|
||||
return int(v), n, nil
|
||||
}
|
121
vendor/github.com/cznic/zappy/decode_cgo.go
generated
vendored
Normal file
121
vendor/github.com/cznic/zappy/decode_cgo.go
generated
vendored
Normal file
@ -0,0 +1,121 @@
|
||||
// Copyright 2014 The zappy Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the SNAPPY-GO-LICENSE file.
|
||||
|
||||
// +build cgo,!purego
|
||||
|
||||
package zappy
|
||||
|
||||
/*
|
||||
|
||||
#include <stdint.h>
|
||||
#include <string.h>
|
||||
|
||||
// supports only uint32 encoded values
|
||||
int uvarint(unsigned int* n, uint8_t* src, int len) {
|
||||
int r = 0;
|
||||
unsigned int v = 0;
|
||||
unsigned int s = 0;
|
||||
while ((len-- != 0) && (++r <= 5)) {
|
||||
uint8_t b = *src++;
|
||||
v = v | ((b&0x7f)<<s);
|
||||
if (b < 0x80) {
|
||||
*n = v;
|
||||
return r;
|
||||
}
|
||||
|
||||
s += 7;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
int varint(int* n, uint8_t* src, int len) {
|
||||
unsigned int u;
|
||||
int i = uvarint(&u, src, len);
|
||||
int x = u>>1;
|
||||
if ((u&1) != 0)
|
||||
x = ~x;
|
||||
*n = x;
|
||||
return i;
|
||||
}
|
||||
|
||||
int decode(int s, int len_src, uint8_t* src, int len_dst, uint8_t* dst) {
|
||||
int d = 0;
|
||||
int length;
|
||||
while (s < len_src) {
|
||||
int n, i = varint(&n, src+s, len_src-s);
|
||||
if (i <= 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
s += i;
|
||||
if (n >= 0) {
|
||||
length = n+1;
|
||||
if ((length > len_dst-d) || (length > len_src-s))
|
||||
return -1;
|
||||
|
||||
memcpy(dst+d, src+s, length);
|
||||
d += length;
|
||||
s += length;
|
||||
continue;
|
||||
}
|
||||
|
||||
|
||||
length = -n;
|
||||
int offset;
|
||||
i = uvarint((unsigned int*)(&offset), src+s, len_src-s);
|
||||
if (i <= 0)
|
||||
return -1;
|
||||
|
||||
s += i;
|
||||
if (s > len_src)
|
||||
return -1;
|
||||
|
||||
int end = d+length;
|
||||
if ((offset > d) || (end > len_dst))
|
||||
return -1;
|
||||
|
||||
for( ; d < end; d++)
|
||||
*(dst+d) = *(dst+d-offset);
|
||||
}
|
||||
return d;
|
||||
}
|
||||
|
||||
*/
|
||||
import "C"
|
||||
|
||||
func puregoDecode() bool { return false }
|
||||
|
||||
// Decode returns the decoded form of src. The returned slice may be a sub-
|
||||
// slice of buf if buf was large enough to hold the entire decoded block.
|
||||
// Otherwise, a newly allocated slice will be returned.
|
||||
// It is valid to pass a nil buf.
|
||||
func Decode(buf, src []byte) ([]byte, error) {
|
||||
dLen, s, err := decodedLen(src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if dLen == 0 {
|
||||
if len(src) == 1 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
if len(buf) < dLen {
|
||||
buf = make([]byte, dLen)
|
||||
}
|
||||
|
||||
d := int(C.decode(C.int(s), C.int(len(src)), (*C.uint8_t)(&src[0]), C.int(len(buf)), (*C.uint8_t)(&buf[0])))
|
||||
if d != dLen {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
return buf[:d], nil
|
||||
}
|
89
vendor/github.com/cznic/zappy/decode_nocgo.go
generated
vendored
Normal file
89
vendor/github.com/cznic/zappy/decode_nocgo.go
generated
vendored
Normal file
@ -0,0 +1,89 @@
|
||||
// Copyright 2014 The zappy Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the SNAPPY-GO-LICENSE file.
|
||||
|
||||
// +build !cgo purego
|
||||
|
||||
package zappy
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
func puregoDecode() bool { return true }
|
||||
|
||||
// Decode returns the decoded form of src. The returned slice may be a sub-
|
||||
// slice of buf if buf was large enough to hold the entire decoded block.
|
||||
// Otherwise, a newly allocated slice will be returned.
|
||||
// It is valid to pass a nil buf.
|
||||
func Decode(buf, src []byte) ([]byte, error) {
|
||||
dLen, s, err := decodedLen(src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if dLen == 0 {
|
||||
if len(src) == 1 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
if len(buf) < dLen {
|
||||
buf = make([]byte, dLen)
|
||||
}
|
||||
|
||||
var d, offset, length int
|
||||
for s < len(src) {
|
||||
n, i := binary.Varint(src[s:])
|
||||
if i <= 0 {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
s += i
|
||||
if n >= 0 {
|
||||
length = int(n + 1)
|
||||
if length > len(buf)-d || length > len(src)-s {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
copy(buf[d:], src[s:s+length])
|
||||
d += length
|
||||
s += length
|
||||
continue
|
||||
}
|
||||
|
||||
length = int(-n)
|
||||
off64, i := binary.Uvarint(src[s:])
|
||||
if i <= 0 {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
offset = int(off64)
|
||||
s += i
|
||||
if s > len(src) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
end := d + length
|
||||
if offset > d || end > len(buf) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
for s, v := range buf[d-offset : end-offset] {
|
||||
buf[d+s] = v
|
||||
}
|
||||
d = end
|
||||
|
||||
}
|
||||
if d != dLen {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
return buf[:d], nil
|
||||
}
|
37
vendor/github.com/cznic/zappy/encode.go
generated
vendored
Normal file
37
vendor/github.com/cznic/zappy/encode.go
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
// Copyright 2014 The zappy Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the SNAPPY-GO-LICENSE file.
|
||||
|
||||
package zappy
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
// We limit how far copy back-references can go, the same as the snappy C++
|
||||
// code.
|
||||
const maxOffset = 1 << 20
|
||||
|
||||
// emitCopy writes a copy chunk and returns the number of bytes written.
|
||||
func emitCopy(dst []byte, offset, length int) (n int) {
|
||||
n = binary.PutVarint(dst, int64(-length))
|
||||
n += binary.PutUvarint(dst[n:], uint64(offset))
|
||||
return
|
||||
}
|
||||
|
||||
// emitLiteral writes a literal chunk and returns the number of bytes written.
|
||||
func emitLiteral(dst, lit []byte) (n int) {
|
||||
n = binary.PutVarint(dst, int64(len(lit)-1))
|
||||
n += copy(dst[n:], lit)
|
||||
return
|
||||
}
|
||||
|
||||
// MaxEncodedLen returns the maximum length of a zappy block, given its
|
||||
// uncompressed length.
|
||||
func MaxEncodedLen(srcLen int) int {
|
||||
return 10 + srcLen
|
||||
}
|
140
vendor/github.com/cznic/zappy/encode_cgo.go
generated
vendored
Normal file
140
vendor/github.com/cznic/zappy/encode_cgo.go
generated
vendored
Normal file
@ -0,0 +1,140 @@
|
||||
// Copyright 2014 The zappy Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the SNAPPY-GO-LICENSE file.
|
||||
|
||||
// +build cgo,!purego
|
||||
|
||||
package zappy
|
||||
|
||||
/*
|
||||
|
||||
#include <stdint.h>
|
||||
#include <string.h>
|
||||
|
||||
#define MAXOFFSET 1<<20
|
||||
|
||||
int putUvarint(uint8_t* buf, unsigned int x) {
|
||||
int i = 1;
|
||||
for (; x >= 0x80; i++) {
|
||||
*buf++ = x|0x80;
|
||||
x >>= 7;
|
||||
}
|
||||
*buf = x;
|
||||
return i;
|
||||
}
|
||||
|
||||
int putVarint(uint8_t* buf, int x) {
|
||||
unsigned int ux = x << 1;
|
||||
if (x < 0)
|
||||
ux = ~ux;
|
||||
return putUvarint(buf, ux);
|
||||
}
|
||||
|
||||
int emitLiteral(uint8_t* dst, uint8_t* lit, int len_lit) {
|
||||
int n = putVarint(dst, len_lit-1);
|
||||
memcpy(dst+n, lit, len_lit);
|
||||
return n+len_lit;
|
||||
}
|
||||
|
||||
int emitCopy(uint8_t* dst, int off, int len) {
|
||||
int n = putVarint(dst, -len);
|
||||
return n+putUvarint(dst+n, (unsigned int)off);
|
||||
}
|
||||
|
||||
int encode(int d, uint8_t* dst, uint8_t* src, int len_src) {
|
||||
int table[1<<12];
|
||||
int s = 0;
|
||||
int t = 0;
|
||||
int lit = 0;
|
||||
int lim = 0;
|
||||
memset(table, 0, sizeof(table));
|
||||
for (lim = len_src-3; s < lim; ) {
|
||||
// Update the hash table.
|
||||
uint32_t b0 = src[s];
|
||||
uint32_t b1 = src[s+1];
|
||||
uint32_t b2 = src[s+2];
|
||||
uint32_t b3 = src[s+3];
|
||||
uint32_t h = b0 | (b1<<8) | (b2<<16) | (b3<<24);
|
||||
uint32_t i;
|
||||
more:
|
||||
i = (h*0x1e35a7bd)>>20;
|
||||
t = table[i];
|
||||
table[i] = s;
|
||||
// If t is invalid or src[s:s+4] differs from src[t:t+4], accumulate a literal byte.
|
||||
if ((t == 0) || (s-t >= MAXOFFSET) || (b0 != src[t]) || (b1 != src[t+1]) || (b2 != src[t+2]) || (b3 != src[t+3])) {
|
||||
s++;
|
||||
if (s >= lim)
|
||||
break;
|
||||
|
||||
b0 = b1;
|
||||
b1 = b2;
|
||||
b2 = b3;
|
||||
b3 = src[s+3];
|
||||
h = (h>>8) | ((b3)<<24);
|
||||
goto more;
|
||||
}
|
||||
|
||||
// Otherwise, we have a match. First, emit any pending literal bytes.
|
||||
if (lit != s) {
|
||||
d += emitLiteral(dst+d, src+lit, s-lit);
|
||||
}
|
||||
// Extend the match to be as long as possible.
|
||||
int s0 = s;
|
||||
s += 4;
|
||||
t += 4;
|
||||
while ((s < len_src) && (src[s] == src[t])) {
|
||||
s++;
|
||||
t++;
|
||||
}
|
||||
d += emitCopy(dst+d, s-t, s-s0);
|
||||
lit = s;
|
||||
}
|
||||
// Emit any final pending literal bytes and return.
|
||||
if (lit != len_src) {
|
||||
d += emitLiteral(dst+d, src+lit, len_src-lit);
|
||||
}
|
||||
return d;
|
||||
}
|
||||
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math"
|
||||
)
|
||||
|
||||
func puregoEncode() bool { return false }
|
||||
|
||||
// Encode returns the encoded form of src. The returned slice may be a sub-
|
||||
// slice of buf if buf was large enough to hold the entire encoded block.
|
||||
// Otherwise, a newly allocated slice will be returned.
|
||||
// It is valid to pass a nil buf.
|
||||
func Encode(buf, src []byte) ([]byte, error) {
|
||||
if n := MaxEncodedLen(len(src)); len(buf) < n {
|
||||
buf = make([]byte, n)
|
||||
}
|
||||
|
||||
if len(src) > math.MaxInt32 {
|
||||
return nil, fmt.Errorf("zappy.Encode: too long data: %d bytes", len(src))
|
||||
}
|
||||
|
||||
// The block starts with the varint-encoded length of the decompressed bytes.
|
||||
d := binary.PutUvarint(buf, uint64(len(src)))
|
||||
|
||||
// Return early if src is short.
|
||||
if len(src) <= 4 {
|
||||
if len(src) != 0 {
|
||||
d += emitLiteral(buf[d:], src)
|
||||
}
|
||||
return buf[:d], nil
|
||||
}
|
||||
|
||||
d = int(C.encode(C.int(d), (*C.uint8_t)(&buf[0]), (*C.uint8_t)(&src[0]), C.int(len(src))))
|
||||
return buf[:d], nil
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user