- Modifications so that development is easier.
- Striving for clone && boostrap and test simplicity. - Modifications strive to use go 1.7 in repo directory. - Moved libs to src directory where go expects them.
This commit is contained in:
parent
aa2999bd96
commit
175b6037d9
9
script/env
Executable file
9
script/env
Executable file
@ -0,0 +1,9 @@
|
||||
#!/bin/bash
|
||||
|
||||
## Set all environmental variables here.
|
||||
|
||||
export ROOTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )"
|
||||
export GOPATH=$ROOTDIR/.gopath
|
||||
export GOPATH=$GOPATH:$ROOTDIR/vendor/:$ROOTDIR/.vendor/go17/usr/local/go/
|
||||
export GOROOT=$ROOTDIR/.vendor/go17/usr/local/go/
|
||||
export GOCMD=$ROOTDIR/.vendor/go17/usr/local/go/bin/go
|
2
test.sh
2
test.sh
@ -7,7 +7,7 @@ retval=0
|
||||
for testsuite in base mysql sql
|
||||
do
|
||||
pushd go/${testsuite} > /dev/null;
|
||||
go test $*;
|
||||
$GOCMD test $*;
|
||||
[ $? -ne 0 ] && retval=1
|
||||
popd > /dev/null;
|
||||
done
|
||||
|
5
vendor/github.com/BurntSushi/toml/.gitignore
generated
vendored
5
vendor/github.com/BurntSushi/toml/.gitignore
generated
vendored
@ -1,5 +0,0 @@
|
||||
TAGS
|
||||
tags
|
||||
.*.swp
|
||||
tomlcheck/tomlcheck
|
||||
toml.test
|
12
vendor/github.com/BurntSushi/toml/.travis.yml
generated
vendored
12
vendor/github.com/BurntSushi/toml/.travis.yml
generated
vendored
@ -1,12 +0,0 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.1
|
||||
- 1.2
|
||||
- tip
|
||||
install:
|
||||
- go install ./...
|
||||
- go get github.com/BurntSushi/toml-test
|
||||
script:
|
||||
- export PATH="$PATH:$HOME/gopath/bin"
|
||||
- make test
|
||||
|
3
vendor/github.com/BurntSushi/toml/COMPATIBLE
generated
vendored
3
vendor/github.com/BurntSushi/toml/COMPATIBLE
generated
vendored
@ -1,3 +0,0 @@
|
||||
Compatible with TOML version
|
||||
[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md)
|
||||
|
14
vendor/github.com/BurntSushi/toml/COPYING
generated
vendored
14
vendor/github.com/BurntSushi/toml/COPYING
generated
vendored
@ -1,14 +0,0 @@
|
||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
||||
Version 2, December 2004
|
||||
|
||||
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim or modified
|
||||
copies of this license document, and changing it is allowed as long
|
||||
as the name is changed.
|
||||
|
||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
||||
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
||||
|
||||
0. You just DO WHAT THE FUCK YOU WANT TO.
|
||||
|
19
vendor/github.com/BurntSushi/toml/Makefile
generated
vendored
19
vendor/github.com/BurntSushi/toml/Makefile
generated
vendored
@ -1,19 +0,0 @@
|
||||
install:
|
||||
go install ./...
|
||||
|
||||
test: install
|
||||
go test -v
|
||||
toml-test toml-test-decoder
|
||||
toml-test -encoder toml-test-encoder
|
||||
|
||||
fmt:
|
||||
gofmt -w *.go */*.go
|
||||
colcheck *.go */*.go
|
||||
|
||||
tags:
|
||||
find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS
|
||||
|
||||
push:
|
||||
git push origin master
|
||||
git push github master
|
||||
|
220
vendor/github.com/BurntSushi/toml/README.md
generated
vendored
220
vendor/github.com/BurntSushi/toml/README.md
generated
vendored
@ -1,220 +0,0 @@
|
||||
## TOML parser and encoder for Go with reflection
|
||||
|
||||
TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
|
||||
reflection interface similar to Go's standard library `json` and `xml`
|
||||
packages. This package also supports the `encoding.TextUnmarshaler` and
|
||||
`encoding.TextMarshaler` interfaces so that you can define custom data
|
||||
representations. (There is an example of this below.)
|
||||
|
||||
Spec: https://github.com/mojombo/toml
|
||||
|
||||
Compatible with TOML version
|
||||
[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md)
|
||||
|
||||
Documentation: http://godoc.org/github.com/BurntSushi/toml
|
||||
|
||||
Installation:
|
||||
|
||||
```bash
|
||||
go get github.com/BurntSushi/toml
|
||||
```
|
||||
|
||||
Try the toml validator:
|
||||
|
||||
```bash
|
||||
go get github.com/BurntSushi/toml/cmd/tomlv
|
||||
tomlv some-toml-file.toml
|
||||
```
|
||||
|
||||
[![Build status](https://api.travis-ci.org/BurntSushi/toml.png)](https://travis-ci.org/BurntSushi/toml)
|
||||
|
||||
|
||||
### Testing
|
||||
|
||||
This package passes all tests in
|
||||
[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder
|
||||
and the encoder.
|
||||
|
||||
### Examples
|
||||
|
||||
This package works similarly to how the Go standard library handles `XML`
|
||||
and `JSON`. Namely, data is loaded into Go values via reflection.
|
||||
|
||||
For the simplest example, consider some TOML file as just a list of keys
|
||||
and values:
|
||||
|
||||
```toml
|
||||
Age = 25
|
||||
Cats = [ "Cauchy", "Plato" ]
|
||||
Pi = 3.14
|
||||
Perfection = [ 6, 28, 496, 8128 ]
|
||||
DOB = 1987-07-05T05:45:00Z
|
||||
```
|
||||
|
||||
Which could be defined in Go as:
|
||||
|
||||
```go
|
||||
type Config struct {
|
||||
Age int
|
||||
Cats []string
|
||||
Pi float64
|
||||
Perfection []int
|
||||
DOB time.Time // requires `import time`
|
||||
}
|
||||
```
|
||||
|
||||
And then decoded with:
|
||||
|
||||
```go
|
||||
var conf Config
|
||||
if _, err := toml.Decode(tomlData, &conf); err != nil {
|
||||
// handle error
|
||||
}
|
||||
```
|
||||
|
||||
You can also use struct tags if your struct field name doesn't map to a TOML
|
||||
key value directly:
|
||||
|
||||
```toml
|
||||
some_key_NAME = "wat"
|
||||
```
|
||||
|
||||
```go
|
||||
type TOML struct {
|
||||
ObscureKey string `toml:"some_key_NAME"`
|
||||
}
|
||||
```
|
||||
|
||||
### Using the `encoding.TextUnmarshaler` interface
|
||||
|
||||
Here's an example that automatically parses duration strings into
|
||||
`time.Duration` values:
|
||||
|
||||
```toml
|
||||
[[song]]
|
||||
name = "Thunder Road"
|
||||
duration = "4m49s"
|
||||
|
||||
[[song]]
|
||||
name = "Stairway to Heaven"
|
||||
duration = "8m03s"
|
||||
```
|
||||
|
||||
Which can be decoded with:
|
||||
|
||||
```go
|
||||
type song struct {
|
||||
Name string
|
||||
Duration duration
|
||||
}
|
||||
type songs struct {
|
||||
Song []song
|
||||
}
|
||||
var favorites songs
|
||||
if _, err := Decode(blob, &favorites); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, s := range favorites.Song {
|
||||
fmt.Printf("%s (%s)\n", s.Name, s.Duration)
|
||||
}
|
||||
```
|
||||
|
||||
And you'll also need a `duration` type that satisfies the
|
||||
`encoding.TextUnmarshaler` interface:
|
||||
|
||||
```go
|
||||
type duration struct {
|
||||
time.Duration
|
||||
}
|
||||
|
||||
func (d *duration) UnmarshalText(text []byte) error {
|
||||
var err error
|
||||
d.Duration, err = time.ParseDuration(string(text))
|
||||
return err
|
||||
}
|
||||
```
|
||||
|
||||
### More complex usage
|
||||
|
||||
Here's an example of how to load the example from the official spec page:
|
||||
|
||||
```toml
|
||||
# This is a TOML document. Boom.
|
||||
|
||||
title = "TOML Example"
|
||||
|
||||
[owner]
|
||||
name = "Tom Preston-Werner"
|
||||
organization = "GitHub"
|
||||
bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
|
||||
dob = 1979-05-27T07:32:00Z # First class dates? Why not?
|
||||
|
||||
[database]
|
||||
server = "192.168.1.1"
|
||||
ports = [ 8001, 8001, 8002 ]
|
||||
connection_max = 5000
|
||||
enabled = true
|
||||
|
||||
[servers]
|
||||
|
||||
# You can indent as you please. Tabs or spaces. TOML don't care.
|
||||
[servers.alpha]
|
||||
ip = "10.0.0.1"
|
||||
dc = "eqdc10"
|
||||
|
||||
[servers.beta]
|
||||
ip = "10.0.0.2"
|
||||
dc = "eqdc10"
|
||||
|
||||
[clients]
|
||||
data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
|
||||
|
||||
# Line breaks are OK when inside arrays
|
||||
hosts = [
|
||||
"alpha",
|
||||
"omega"
|
||||
]
|
||||
```
|
||||
|
||||
And the corresponding Go types are:
|
||||
|
||||
```go
|
||||
type tomlConfig struct {
|
||||
Title string
|
||||
Owner ownerInfo
|
||||
DB database `toml:"database"`
|
||||
Servers map[string]server
|
||||
Clients clients
|
||||
}
|
||||
|
||||
type ownerInfo struct {
|
||||
Name string
|
||||
Org string `toml:"organization"`
|
||||
Bio string
|
||||
DOB time.Time
|
||||
}
|
||||
|
||||
type database struct {
|
||||
Server string
|
||||
Ports []int
|
||||
ConnMax int `toml:"connection_max"`
|
||||
Enabled bool
|
||||
}
|
||||
|
||||
type server struct {
|
||||
IP string
|
||||
DC string
|
||||
}
|
||||
|
||||
type clients struct {
|
||||
Data [][]interface{}
|
||||
Hosts []string
|
||||
}
|
||||
```
|
||||
|
||||
Note that a case insensitive match will be tried if an exact match can't be
|
||||
found.
|
||||
|
||||
A working example of the above can be found in `_examples/example.{go,toml}`.
|
||||
|
14
vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING
generated
vendored
14
vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING
generated
vendored
@ -1,14 +0,0 @@
|
||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
||||
Version 2, December 2004
|
||||
|
||||
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim or modified
|
||||
copies of this license document, and changing it is allowed as long
|
||||
as the name is changed.
|
||||
|
||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
||||
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
||||
|
||||
0. You just DO WHAT THE FUCK YOU WANT TO.
|
||||
|
14
vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/README.md
generated
vendored
14
vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/README.md
generated
vendored
@ -1,14 +0,0 @@
|
||||
# Implements the TOML test suite interface
|
||||
|
||||
This is an implementation of the interface expected by
|
||||
[toml-test](https://github.com/BurntSushi/toml-test) for my
|
||||
[toml parser written in Go](https://github.com/BurntSushi/toml).
|
||||
In particular, it maps TOML data on `stdin` to a JSON format on `stdout`.
|
||||
|
||||
|
||||
Compatible with TOML version
|
||||
[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md)
|
||||
|
||||
Compatible with `toml-test` version
|
||||
[v0.2.0](https://github.com/BurntSushi/toml-test/tree/v0.2.0)
|
||||
|
90
vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/main.go
generated
vendored
90
vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/main.go
generated
vendored
@ -1,90 +0,0 @@
|
||||
// Command toml-test-decoder satisfies the toml-test interface for testing
|
||||
// TOML decoders. Namely, it accepts TOML on stdin and outputs JSON on stdout.
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
)
|
||||
|
||||
func init() {
|
||||
log.SetFlags(0)
|
||||
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
}
|
||||
|
||||
func usage() {
|
||||
log.Printf("Usage: %s < toml-file\n", path.Base(os.Args[0]))
|
||||
flag.PrintDefaults()
|
||||
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func main() {
|
||||
if flag.NArg() != 0 {
|
||||
flag.Usage()
|
||||
}
|
||||
|
||||
var tmp interface{}
|
||||
if _, err := toml.DecodeReader(os.Stdin, &tmp); err != nil {
|
||||
log.Fatalf("Error decoding TOML: %s", err)
|
||||
}
|
||||
|
||||
typedTmp := translate(tmp)
|
||||
if err := json.NewEncoder(os.Stdout).Encode(typedTmp); err != nil {
|
||||
log.Fatalf("Error encoding JSON: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func translate(tomlData interface{}) interface{} {
|
||||
switch orig := tomlData.(type) {
|
||||
case map[string]interface{}:
|
||||
typed := make(map[string]interface{}, len(orig))
|
||||
for k, v := range orig {
|
||||
typed[k] = translate(v)
|
||||
}
|
||||
return typed
|
||||
case []map[string]interface{}:
|
||||
typed := make([]map[string]interface{}, len(orig))
|
||||
for i, v := range orig {
|
||||
typed[i] = translate(v).(map[string]interface{})
|
||||
}
|
||||
return typed
|
||||
case []interface{}:
|
||||
typed := make([]interface{}, len(orig))
|
||||
for i, v := range orig {
|
||||
typed[i] = translate(v)
|
||||
}
|
||||
|
||||
// We don't really need to tag arrays, but let's be future proof.
|
||||
// (If TOML ever supports tuples, we'll need this.)
|
||||
return tag("array", typed)
|
||||
case time.Time:
|
||||
return tag("datetime", orig.Format("2006-01-02T15:04:05Z"))
|
||||
case bool:
|
||||
return tag("bool", fmt.Sprintf("%v", orig))
|
||||
case int64:
|
||||
return tag("integer", fmt.Sprintf("%d", orig))
|
||||
case float64:
|
||||
return tag("float", fmt.Sprintf("%v", orig))
|
||||
case string:
|
||||
return tag("string", orig)
|
||||
}
|
||||
|
||||
panic(fmt.Sprintf("Unknown type: %T", tomlData))
|
||||
}
|
||||
|
||||
func tag(typeName string, data interface{}) map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"type": typeName,
|
||||
"value": data,
|
||||
}
|
||||
}
|
14
vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING
generated
vendored
14
vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING
generated
vendored
@ -1,14 +0,0 @@
|
||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
||||
Version 2, December 2004
|
||||
|
||||
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim or modified
|
||||
copies of this license document, and changing it is allowed as long
|
||||
as the name is changed.
|
||||
|
||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
||||
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
||||
|
||||
0. You just DO WHAT THE FUCK YOU WANT TO.
|
||||
|
14
vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/README.md
generated
vendored
14
vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/README.md
generated
vendored
@ -1,14 +0,0 @@
|
||||
# Implements the TOML test suite interface for TOML encoders
|
||||
|
||||
This is an implementation of the interface expected by
|
||||
[toml-test](https://github.com/BurntSushi/toml-test) for the
|
||||
[TOML encoder](https://github.com/BurntSushi/toml).
|
||||
In particular, it maps JSON data on `stdin` to a TOML format on `stdout`.
|
||||
|
||||
|
||||
Compatible with TOML version
|
||||
[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md)
|
||||
|
||||
Compatible with `toml-test` version
|
||||
[v0.2.0](https://github.com/BurntSushi/toml-test/tree/v0.2.0)
|
||||
|
131
vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/main.go
generated
vendored
131
vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/main.go
generated
vendored
@ -1,131 +0,0 @@
|
||||
// Command toml-test-encoder satisfies the toml-test interface for testing
|
||||
// TOML encoders. Namely, it accepts JSON on stdin and outputs TOML on stdout.
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
)
|
||||
|
||||
func init() {
|
||||
log.SetFlags(0)
|
||||
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
}
|
||||
|
||||
func usage() {
|
||||
log.Printf("Usage: %s < json-file\n", path.Base(os.Args[0]))
|
||||
flag.PrintDefaults()
|
||||
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func main() {
|
||||
if flag.NArg() != 0 {
|
||||
flag.Usage()
|
||||
}
|
||||
|
||||
var tmp interface{}
|
||||
if err := json.NewDecoder(os.Stdin).Decode(&tmp); err != nil {
|
||||
log.Fatalf("Error decoding JSON: %s", err)
|
||||
}
|
||||
|
||||
tomlData := translate(tmp)
|
||||
if err := toml.NewEncoder(os.Stdout).Encode(tomlData); err != nil {
|
||||
log.Fatalf("Error encoding TOML: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func translate(typedJson interface{}) interface{} {
|
||||
switch v := typedJson.(type) {
|
||||
case map[string]interface{}:
|
||||
if len(v) == 2 && in("type", v) && in("value", v) {
|
||||
return untag(v)
|
||||
}
|
||||
m := make(map[string]interface{}, len(v))
|
||||
for k, v2 := range v {
|
||||
m[k] = translate(v2)
|
||||
}
|
||||
return m
|
||||
case []interface{}:
|
||||
tabArray := make([]map[string]interface{}, len(v))
|
||||
for i := range v {
|
||||
if m, ok := translate(v[i]).(map[string]interface{}); ok {
|
||||
tabArray[i] = m
|
||||
} else {
|
||||
log.Fatalf("JSON arrays may only contain objects. This " +
|
||||
"corresponds to only tables being allowed in " +
|
||||
"TOML table arrays.")
|
||||
}
|
||||
}
|
||||
return tabArray
|
||||
}
|
||||
log.Fatalf("Unrecognized JSON format '%T'.", typedJson)
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func untag(typed map[string]interface{}) interface{} {
|
||||
t := typed["type"].(string)
|
||||
v := typed["value"]
|
||||
switch t {
|
||||
case "string":
|
||||
return v.(string)
|
||||
case "integer":
|
||||
v := v.(string)
|
||||
n, err := strconv.Atoi(v)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not parse '%s' as integer: %s", v, err)
|
||||
}
|
||||
return n
|
||||
case "float":
|
||||
v := v.(string)
|
||||
f, err := strconv.ParseFloat(v, 64)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not parse '%s' as float64: %s", v, err)
|
||||
}
|
||||
return f
|
||||
case "datetime":
|
||||
v := v.(string)
|
||||
t, err := time.Parse("2006-01-02T15:04:05Z", v)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not parse '%s' as a datetime: %s", v, err)
|
||||
}
|
||||
return t
|
||||
case "bool":
|
||||
v := v.(string)
|
||||
switch v {
|
||||
case "true":
|
||||
return true
|
||||
case "false":
|
||||
return false
|
||||
}
|
||||
log.Fatalf("Could not parse '%s' as a boolean.", v)
|
||||
case "array":
|
||||
v := v.([]interface{})
|
||||
array := make([]interface{}, len(v))
|
||||
for i := range v {
|
||||
if m, ok := v[i].(map[string]interface{}); ok {
|
||||
array[i] = untag(m)
|
||||
} else {
|
||||
log.Fatalf("Arrays may only contain other arrays or "+
|
||||
"primitive values, but found a '%T'.", m)
|
||||
}
|
||||
}
|
||||
return array
|
||||
}
|
||||
log.Fatalf("Unrecognized tag type '%s'.", t)
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func in(key string, m map[string]interface{}) bool {
|
||||
_, ok := m[key]
|
||||
return ok
|
||||
}
|
14
vendor/github.com/BurntSushi/toml/cmd/tomlv/COPYING
generated
vendored
14
vendor/github.com/BurntSushi/toml/cmd/tomlv/COPYING
generated
vendored
@ -1,14 +0,0 @@
|
||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
||||
Version 2, December 2004
|
||||
|
||||
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim or modified
|
||||
copies of this license document, and changing it is allowed as long
|
||||
as the name is changed.
|
||||
|
||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
||||
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
||||
|
||||
0. You just DO WHAT THE FUCK YOU WANT TO.
|
||||
|
22
vendor/github.com/BurntSushi/toml/cmd/tomlv/README.md
generated
vendored
22
vendor/github.com/BurntSushi/toml/cmd/tomlv/README.md
generated
vendored
@ -1,22 +0,0 @@
|
||||
# TOML Validator
|
||||
|
||||
If Go is installed, it's simple to try it out:
|
||||
|
||||
```bash
|
||||
go get github.com/BurntSushi/toml/cmd/tomlv
|
||||
tomlv some-toml-file.toml
|
||||
```
|
||||
|
||||
You can see the types of every key in a TOML file with:
|
||||
|
||||
```bash
|
||||
tomlv -types some-toml-file.toml
|
||||
```
|
||||
|
||||
At the moment, only one error message is reported at a time. Error messages
|
||||
include line numbers. No output means that the files given are valid TOML, or
|
||||
there is a bug in `tomlv`.
|
||||
|
||||
Compatible with TOML version
|
||||
[v0.1.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.1.0.md)
|
||||
|
61
vendor/github.com/BurntSushi/toml/cmd/tomlv/main.go
generated
vendored
61
vendor/github.com/BurntSushi/toml/cmd/tomlv/main.go
generated
vendored
@ -1,61 +0,0 @@
|
||||
// Command tomlv validates TOML documents and prints each key's type.
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
)
|
||||
|
||||
var (
|
||||
flagTypes = false
|
||||
)
|
||||
|
||||
func init() {
|
||||
log.SetFlags(0)
|
||||
|
||||
flag.BoolVar(&flagTypes, "types", flagTypes,
|
||||
"When set, the types of every defined key will be shown.")
|
||||
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
}
|
||||
|
||||
func usage() {
|
||||
log.Printf("Usage: %s toml-file [ toml-file ... ]\n",
|
||||
path.Base(os.Args[0]))
|
||||
flag.PrintDefaults()
|
||||
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func main() {
|
||||
if flag.NArg() < 1 {
|
||||
flag.Usage()
|
||||
}
|
||||
for _, f := range flag.Args() {
|
||||
var tmp interface{}
|
||||
md, err := toml.DecodeFile(f, &tmp)
|
||||
if err != nil {
|
||||
log.Fatalf("Error in '%s': %s", f, err)
|
||||
}
|
||||
if flagTypes {
|
||||
printTypes(md)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func printTypes(md toml.MetaData) {
|
||||
tabw := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
|
||||
for _, key := range md.Keys() {
|
||||
fmt.Fprintf(tabw, "%s%s\t%s\n",
|
||||
strings.Repeat(" ", len(key)-1), key, md.Type(key...))
|
||||
}
|
||||
tabw.Flush()
|
||||
}
|
472
vendor/github.com/BurntSushi/toml/decode.go
generated
vendored
472
vendor/github.com/BurntSushi/toml/decode.go
generated
vendored
@ -1,472 +0,0 @@
|
||||
package toml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var e = fmt.Errorf
|
||||
|
||||
// Primitive is a TOML value that hasn't been decoded into a Go value.
|
||||
// When using the various `Decode*` functions, the type `Primitive` may
|
||||
// be given to any value, and its decoding will be delayed.
|
||||
//
|
||||
// A `Primitive` value can be decoded using the `PrimitiveDecode` function.
|
||||
//
|
||||
// The underlying representation of a `Primitive` value is subject to change.
|
||||
// Do not rely on it.
|
||||
//
|
||||
// N.B. Primitive values are still parsed, so using them will only avoid
|
||||
// the overhead of reflection. They can be useful when you don't know the
|
||||
// exact type of TOML data until run time.
|
||||
type Primitive struct {
|
||||
undecoded interface{}
|
||||
context Key
|
||||
}
|
||||
|
||||
// DEPRECATED!
|
||||
//
|
||||
// Use MetaData.PrimitiveDecode instead.
|
||||
func PrimitiveDecode(primValue Primitive, v interface{}) error {
|
||||
md := MetaData{decoded: make(map[string]bool)}
|
||||
return md.unify(primValue.undecoded, rvalue(v))
|
||||
}
|
||||
|
||||
// PrimitiveDecode is just like the other `Decode*` functions, except it
|
||||
// decodes a TOML value that has already been parsed. Valid primitive values
|
||||
// can *only* be obtained from values filled by the decoder functions,
|
||||
// including this method. (i.e., `v` may contain more `Primitive`
|
||||
// values.)
|
||||
//
|
||||
// Meta data for primitive values is included in the meta data returned by
|
||||
// the `Decode*` functions with one exception: keys returned by the Undecoded
|
||||
// method will only reflect keys that were decoded. Namely, any keys hidden
|
||||
// behind a Primitive will be considered undecoded. Executing this method will
|
||||
// update the undecoded keys in the meta data. (See the example.)
|
||||
func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
|
||||
md.context = primValue.context
|
||||
defer func() { md.context = nil }()
|
||||
return md.unify(primValue.undecoded, rvalue(v))
|
||||
}
|
||||
|
||||
// Decode will decode the contents of `data` in TOML format into a pointer
|
||||
// `v`.
|
||||
//
|
||||
// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be
|
||||
// used interchangeably.)
|
||||
//
|
||||
// TOML arrays of tables correspond to either a slice of structs or a slice
|
||||
// of maps.
|
||||
//
|
||||
// TOML datetimes correspond to Go `time.Time` values.
|
||||
//
|
||||
// All other TOML types (float, string, int, bool and array) correspond
|
||||
// to the obvious Go types.
|
||||
//
|
||||
// An exception to the above rules is if a type implements the
|
||||
// encoding.TextUnmarshaler interface. In this case, any primitive TOML value
|
||||
// (floats, strings, integers, booleans and datetimes) will be converted to
|
||||
// a byte string and given to the value's UnmarshalText method. See the
|
||||
// Unmarshaler example for a demonstration with time duration strings.
|
||||
//
|
||||
// Key mapping
|
||||
//
|
||||
// TOML keys can map to either keys in a Go map or field names in a Go
|
||||
// struct. The special `toml` struct tag may be used to map TOML keys to
|
||||
// struct fields that don't match the key name exactly. (See the example.)
|
||||
// A case insensitive match to struct names will be tried if an exact match
|
||||
// can't be found.
|
||||
//
|
||||
// The mapping between TOML values and Go values is loose. That is, there
|
||||
// may exist TOML values that cannot be placed into your representation, and
|
||||
// there may be parts of your representation that do not correspond to
|
||||
// TOML values. This loose mapping can be made stricter by using the IsDefined
|
||||
// and/or Undecoded methods on the MetaData returned.
|
||||
//
|
||||
// This decoder will not handle cyclic types. If a cyclic type is passed,
|
||||
// `Decode` will not terminate.
|
||||
func Decode(data string, v interface{}) (MetaData, error) {
|
||||
p, err := parse(data)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
md := MetaData{
|
||||
p.mapping, p.types, p.ordered,
|
||||
make(map[string]bool, len(p.ordered)), nil,
|
||||
}
|
||||
return md, md.unify(p.mapping, rvalue(v))
|
||||
}
|
||||
|
||||
// DecodeFile is just like Decode, except it will automatically read the
|
||||
// contents of the file at `fpath` and decode it for you.
|
||||
func DecodeFile(fpath string, v interface{}) (MetaData, error) {
|
||||
bs, err := ioutil.ReadFile(fpath)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
return Decode(string(bs), v)
|
||||
}
|
||||
|
||||
// DecodeReader is just like Decode, except it will consume all bytes
|
||||
// from the reader and decode it for you.
|
||||
func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
|
||||
bs, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
return Decode(string(bs), v)
|
||||
}
|
||||
|
||||
// unify performs a sort of type unification based on the structure of `rv`,
|
||||
// which is the client representation.
|
||||
//
|
||||
// Any type mismatch produces an error. Finding a type that we don't know
|
||||
// how to handle produces an unsupported type error.
|
||||
func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
||||
// Special case. Look for a `Primitive` value.
|
||||
if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
|
||||
// Save the undecoded data and the key context into the primitive
|
||||
// value.
|
||||
context := make(Key, len(md.context))
|
||||
copy(context, md.context)
|
||||
rv.Set(reflect.ValueOf(Primitive{
|
||||
undecoded: data,
|
||||
context: context,
|
||||
}))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Special case. Handle time.Time values specifically.
|
||||
// TODO: Remove this code when we decide to drop support for Go 1.1.
|
||||
// This isn't necessary in Go 1.2 because time.Time satisfies the encoding
|
||||
// interfaces.
|
||||
if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) {
|
||||
return md.unifyDatetime(data, rv)
|
||||
}
|
||||
|
||||
// Special case. Look for a value satisfying the TextUnmarshaler interface.
|
||||
if v, ok := rv.Interface().(TextUnmarshaler); ok {
|
||||
return md.unifyText(data, v)
|
||||
}
|
||||
// BUG(burntsushi)
|
||||
// The behavior here is incorrect whenever a Go type satisfies the
|
||||
// encoding.TextUnmarshaler interface but also corresponds to a TOML
|
||||
// hash or array. In particular, the unmarshaler should only be applied
|
||||
// to primitive TOML values. But at this point, it will be applied to
|
||||
// all kinds of values and produce an incorrect error whenever those values
|
||||
// are hashes or arrays (including arrays of tables).
|
||||
|
||||
k := rv.Kind()
|
||||
|
||||
// laziness
|
||||
if k >= reflect.Int && k <= reflect.Uint64 {
|
||||
return md.unifyInt(data, rv)
|
||||
}
|
||||
switch k {
|
||||
case reflect.Ptr:
|
||||
elem := reflect.New(rv.Type().Elem())
|
||||
err := md.unify(data, reflect.Indirect(elem))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rv.Set(elem)
|
||||
return nil
|
||||
case reflect.Struct:
|
||||
return md.unifyStruct(data, rv)
|
||||
case reflect.Map:
|
||||
return md.unifyMap(data, rv)
|
||||
case reflect.Array:
|
||||
return md.unifyArray(data, rv)
|
||||
case reflect.Slice:
|
||||
return md.unifySlice(data, rv)
|
||||
case reflect.String:
|
||||
return md.unifyString(data, rv)
|
||||
case reflect.Bool:
|
||||
return md.unifyBool(data, rv)
|
||||
case reflect.Interface:
|
||||
// we only support empty interfaces.
|
||||
if rv.NumMethod() > 0 {
|
||||
return e("Unsupported type '%s'.", rv.Kind())
|
||||
}
|
||||
return md.unifyAnything(data, rv)
|
||||
case reflect.Float32:
|
||||
fallthrough
|
||||
case reflect.Float64:
|
||||
return md.unifyFloat64(data, rv)
|
||||
}
|
||||
return e("Unsupported type '%s'.", rv.Kind())
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
|
||||
tmap, ok := mapping.(map[string]interface{})
|
||||
if !ok {
|
||||
return mismatch(rv, "map", mapping)
|
||||
}
|
||||
|
||||
for key, datum := range tmap {
|
||||
var f *field
|
||||
fields := cachedTypeFields(rv.Type())
|
||||
for i := range fields {
|
||||
ff := &fields[i]
|
||||
if ff.name == key {
|
||||
f = ff
|
||||
break
|
||||
}
|
||||
if f == nil && strings.EqualFold(ff.name, key) {
|
||||
f = ff
|
||||
}
|
||||
}
|
||||
if f != nil {
|
||||
subv := rv
|
||||
for _, i := range f.index {
|
||||
subv = indirect(subv.Field(i))
|
||||
}
|
||||
if isUnifiable(subv) {
|
||||
md.decoded[md.context.add(key).String()] = true
|
||||
md.context = append(md.context, key)
|
||||
if err := md.unify(datum, subv); err != nil {
|
||||
return e("Type mismatch for '%s.%s': %s",
|
||||
rv.Type().String(), f.name, err)
|
||||
}
|
||||
md.context = md.context[0 : len(md.context)-1]
|
||||
} else if f.name != "" {
|
||||
// Bad user! No soup for you!
|
||||
return e("Field '%s.%s' is unexported, and therefore cannot "+
|
||||
"be loaded with reflection.", rv.Type().String(), f.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
|
||||
tmap, ok := mapping.(map[string]interface{})
|
||||
if !ok {
|
||||
return badtype("map", mapping)
|
||||
}
|
||||
if rv.IsNil() {
|
||||
rv.Set(reflect.MakeMap(rv.Type()))
|
||||
}
|
||||
for k, v := range tmap {
|
||||
md.decoded[md.context.add(k).String()] = true
|
||||
md.context = append(md.context, k)
|
||||
|
||||
rvkey := indirect(reflect.New(rv.Type().Key()))
|
||||
rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
|
||||
if err := md.unify(v, rvval); err != nil {
|
||||
return err
|
||||
}
|
||||
md.context = md.context[0 : len(md.context)-1]
|
||||
|
||||
rvkey.SetString(k)
|
||||
rv.SetMapIndex(rvkey, rvval)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
|
||||
datav := reflect.ValueOf(data)
|
||||
if datav.Kind() != reflect.Slice {
|
||||
return badtype("slice", data)
|
||||
}
|
||||
sliceLen := datav.Len()
|
||||
if sliceLen != rv.Len() {
|
||||
return e("expected array length %d; got TOML array of length %d",
|
||||
rv.Len(), sliceLen)
|
||||
}
|
||||
return md.unifySliceArray(datav, rv)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
|
||||
datav := reflect.ValueOf(data)
|
||||
if datav.Kind() != reflect.Slice {
|
||||
return badtype("slice", data)
|
||||
}
|
||||
sliceLen := datav.Len()
|
||||
if rv.IsNil() {
|
||||
rv.Set(reflect.MakeSlice(rv.Type(), sliceLen, sliceLen))
|
||||
}
|
||||
return md.unifySliceArray(datav, rv)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
|
||||
sliceLen := data.Len()
|
||||
for i := 0; i < sliceLen; i++ {
|
||||
v := data.Index(i).Interface()
|
||||
sliceval := indirect(rv.Index(i))
|
||||
if err := md.unify(v, sliceval); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error {
|
||||
if _, ok := data.(time.Time); ok {
|
||||
rv.Set(reflect.ValueOf(data))
|
||||
return nil
|
||||
}
|
||||
return badtype("time.Time", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
|
||||
if s, ok := data.(string); ok {
|
||||
rv.SetString(s)
|
||||
return nil
|
||||
}
|
||||
return badtype("string", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
|
||||
if num, ok := data.(float64); ok {
|
||||
switch rv.Kind() {
|
||||
case reflect.Float32:
|
||||
fallthrough
|
||||
case reflect.Float64:
|
||||
rv.SetFloat(num)
|
||||
default:
|
||||
panic("bug")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return badtype("float", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
|
||||
if num, ok := data.(int64); ok {
|
||||
if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
|
||||
switch rv.Kind() {
|
||||
case reflect.Int, reflect.Int64:
|
||||
// No bounds checking necessary.
|
||||
case reflect.Int8:
|
||||
if num < math.MinInt8 || num > math.MaxInt8 {
|
||||
return e("Value '%d' is out of range for int8.", num)
|
||||
}
|
||||
case reflect.Int16:
|
||||
if num < math.MinInt16 || num > math.MaxInt16 {
|
||||
return e("Value '%d' is out of range for int16.", num)
|
||||
}
|
||||
case reflect.Int32:
|
||||
if num < math.MinInt32 || num > math.MaxInt32 {
|
||||
return e("Value '%d' is out of range for int32.", num)
|
||||
}
|
||||
}
|
||||
rv.SetInt(num)
|
||||
} else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
|
||||
unum := uint64(num)
|
||||
switch rv.Kind() {
|
||||
case reflect.Uint, reflect.Uint64:
|
||||
// No bounds checking necessary.
|
||||
case reflect.Uint8:
|
||||
if num < 0 || unum > math.MaxUint8 {
|
||||
return e("Value '%d' is out of range for uint8.", num)
|
||||
}
|
||||
case reflect.Uint16:
|
||||
if num < 0 || unum > math.MaxUint16 {
|
||||
return e("Value '%d' is out of range for uint16.", num)
|
||||
}
|
||||
case reflect.Uint32:
|
||||
if num < 0 || unum > math.MaxUint32 {
|
||||
return e("Value '%d' is out of range for uint32.", num)
|
||||
}
|
||||
}
|
||||
rv.SetUint(unum)
|
||||
} else {
|
||||
panic("unreachable")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return badtype("integer", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
|
||||
if b, ok := data.(bool); ok {
|
||||
rv.SetBool(b)
|
||||
return nil
|
||||
}
|
||||
return badtype("boolean", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
|
||||
rv.Set(reflect.ValueOf(data))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error {
|
||||
var s string
|
||||
switch sdata := data.(type) {
|
||||
case TextMarshaler:
|
||||
text, err := sdata.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s = string(text)
|
||||
case fmt.Stringer:
|
||||
s = sdata.String()
|
||||
case string:
|
||||
s = sdata
|
||||
case bool:
|
||||
s = fmt.Sprintf("%v", sdata)
|
||||
case int64:
|
||||
s = fmt.Sprintf("%d", sdata)
|
||||
case float64:
|
||||
s = fmt.Sprintf("%f", sdata)
|
||||
default:
|
||||
return badtype("primitive (string-like)", data)
|
||||
}
|
||||
if err := v.UnmarshalText([]byte(s)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// rvalue returns a reflect.Value of `v`. All pointers are resolved.
|
||||
func rvalue(v interface{}) reflect.Value {
|
||||
return indirect(reflect.ValueOf(v))
|
||||
}
|
||||
|
||||
// indirect returns the value pointed to by a pointer.
|
||||
// Pointers are followed until the value is not a pointer.
|
||||
// New values are allocated for each nil pointer.
|
||||
//
|
||||
// An exception to this rule is if the value satisfies an interface of
|
||||
// interest to us (like encoding.TextUnmarshaler).
|
||||
func indirect(v reflect.Value) reflect.Value {
|
||||
if v.Kind() != reflect.Ptr {
|
||||
if v.CanAddr() {
|
||||
pv := v.Addr()
|
||||
if _, ok := pv.Interface().(TextUnmarshaler); ok {
|
||||
return pv
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
if v.IsNil() {
|
||||
v.Set(reflect.New(v.Type().Elem()))
|
||||
}
|
||||
return indirect(reflect.Indirect(v))
|
||||
}
|
||||
|
||||
func isUnifiable(rv reflect.Value) bool {
|
||||
if rv.CanSet() {
|
||||
return true
|
||||
}
|
||||
if _, ok := rv.Interface().(TextUnmarshaler); ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func badtype(expected string, data interface{}) error {
|
||||
return e("Expected %s but found '%T'.", expected, data)
|
||||
}
|
||||
|
||||
func mismatch(user reflect.Value, expected string, data interface{}) error {
|
||||
return e("Type mismatch for %s. Expected %s but found '%T'.",
|
||||
user.Type().String(), expected, data)
|
||||
}
|
99
vendor/github.com/BurntSushi/toml/decode_meta.go
generated
vendored
99
vendor/github.com/BurntSushi/toml/decode_meta.go
generated
vendored
@ -1,99 +0,0 @@
|
||||
package toml
|
||||
|
||||
import "strings"
|
||||
|
||||
// MetaData allows access to meta information about TOML data that may not
|
||||
// be inferrable via reflection. In particular, whether a key has been defined
|
||||
// and the TOML type of a key.
|
||||
type MetaData struct {
|
||||
mapping map[string]interface{}
|
||||
types map[string]tomlType
|
||||
keys []Key
|
||||
decoded map[string]bool
|
||||
context Key // Used only during decoding.
|
||||
}
|
||||
|
||||
// IsDefined returns true if the key given exists in the TOML data. The key
|
||||
// should be specified hierarchially. e.g.,
|
||||
//
|
||||
// // access the TOML key 'a.b.c'
|
||||
// IsDefined("a", "b", "c")
|
||||
//
|
||||
// IsDefined will return false if an empty key given. Keys are case sensitive.
|
||||
func (md *MetaData) IsDefined(key ...string) bool {
|
||||
if len(key) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
var hash map[string]interface{}
|
||||
var ok bool
|
||||
var hashOrVal interface{} = md.mapping
|
||||
for _, k := range key {
|
||||
if hash, ok = hashOrVal.(map[string]interface{}); !ok {
|
||||
return false
|
||||
}
|
||||
if hashOrVal, ok = hash[k]; !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Type returns a string representation of the type of the key specified.
|
||||
//
|
||||
// Type will return the empty string if given an empty key or a key that
|
||||
// does not exist. Keys are case sensitive.
|
||||
func (md *MetaData) Type(key ...string) string {
|
||||
fullkey := strings.Join(key, ".")
|
||||
if typ, ok := md.types[fullkey]; ok {
|
||||
return typ.typeString()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Key is the type of any TOML key, including key groups. Use (MetaData).Keys
|
||||
// to get values of this type.
|
||||
type Key []string
|
||||
|
||||
func (k Key) String() string {
|
||||
return strings.Join(k, ".")
|
||||
}
|
||||
|
||||
func (k Key) add(piece string) Key {
|
||||
newKey := make(Key, len(k)+1)
|
||||
copy(newKey, k)
|
||||
newKey[len(k)] = piece
|
||||
return newKey
|
||||
}
|
||||
|
||||
// Keys returns a slice of every key in the TOML data, including key groups.
|
||||
// Each key is itself a slice, where the first element is the top of the
|
||||
// hierarchy and the last is the most specific.
|
||||
//
|
||||
// The list will have the same order as the keys appeared in the TOML data.
|
||||
//
|
||||
// All keys returned are non-empty.
|
||||
func (md *MetaData) Keys() []Key {
|
||||
return md.keys
|
||||
}
|
||||
|
||||
// Undecoded returns all keys that have not been decoded in the order in which
|
||||
// they appear in the original TOML document.
|
||||
//
|
||||
// This includes keys that haven't been decoded because of a Primitive value.
|
||||
// Once the Primitive value is decoded, the keys will be considered decoded.
|
||||
//
|
||||
// Also note that decoding into an empty interface will result in no decoding,
|
||||
// and so no keys will be considered decoded.
|
||||
//
|
||||
// In this sense, the Undecoded keys correspond to keys in the TOML document
|
||||
// that do not have a concrete type in your representation.
|
||||
func (md *MetaData) Undecoded() []Key {
|
||||
undecoded := make([]Key, 0, len(md.keys))
|
||||
for _, key := range md.keys {
|
||||
if !md.decoded[key.String()] {
|
||||
undecoded = append(undecoded, key)
|
||||
}
|
||||
}
|
||||
return undecoded
|
||||
}
|
540
vendor/github.com/BurntSushi/toml/decode_test.go
generated
vendored
540
vendor/github.com/BurntSushi/toml/decode_test.go
generated
vendored
@ -1,540 +0,0 @@
|
||||
package toml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
log.SetFlags(0)
|
||||
}
|
||||
|
||||
func TestDecodeSimple(t *testing.T) {
|
||||
var testSimple = `
|
||||
age = 250
|
||||
andrew = "gallant"
|
||||
kait = "brady"
|
||||
now = 1987-07-05T05:45:00Z
|
||||
yesOrNo = true
|
||||
pi = 3.14
|
||||
colors = [
|
||||
["red", "green", "blue"],
|
||||
["cyan", "magenta", "yellow", "black"],
|
||||
]
|
||||
|
||||
[My.Cats]
|
||||
plato = "cat 1"
|
||||
cauchy = "cat 2"
|
||||
`
|
||||
|
||||
type cats struct {
|
||||
Plato string
|
||||
Cauchy string
|
||||
}
|
||||
type simple struct {
|
||||
Age int
|
||||
Colors [][]string
|
||||
Pi float64
|
||||
YesOrNo bool
|
||||
Now time.Time
|
||||
Andrew string
|
||||
Kait string
|
||||
My map[string]cats
|
||||
}
|
||||
|
||||
var val simple
|
||||
_, err := Decode(testSimple, &val)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
now, err := time.Parse("2006-01-02T15:04:05", "1987-07-05T05:45:00")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
var answer = simple{
|
||||
Age: 250,
|
||||
Andrew: "gallant",
|
||||
Kait: "brady",
|
||||
Now: now,
|
||||
YesOrNo: true,
|
||||
Pi: 3.14,
|
||||
Colors: [][]string{
|
||||
{"red", "green", "blue"},
|
||||
{"cyan", "magenta", "yellow", "black"},
|
||||
},
|
||||
My: map[string]cats{
|
||||
"Cats": cats{Plato: "cat 1", Cauchy: "cat 2"},
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(val, answer) {
|
||||
t.Fatalf("Expected\n-----\n%#v\n-----\nbut got\n-----\n%#v\n",
|
||||
answer, val)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeEmbedded(t *testing.T) {
|
||||
type Dog struct{ Name string }
|
||||
type Age int
|
||||
|
||||
tests := map[string]struct {
|
||||
input string
|
||||
decodeInto interface{}
|
||||
wantDecoded interface{}
|
||||
}{
|
||||
"embedded struct": {
|
||||
input: `Name = "milton"`,
|
||||
decodeInto: &struct{ Dog }{},
|
||||
wantDecoded: &struct{ Dog }{Dog{"milton"}},
|
||||
},
|
||||
"embedded non-nil pointer to struct": {
|
||||
input: `Name = "milton"`,
|
||||
decodeInto: &struct{ *Dog }{},
|
||||
wantDecoded: &struct{ *Dog }{&Dog{"milton"}},
|
||||
},
|
||||
"embedded nil pointer to struct": {
|
||||
input: ``,
|
||||
decodeInto: &struct{ *Dog }{},
|
||||
wantDecoded: &struct{ *Dog }{nil},
|
||||
},
|
||||
"embedded int": {
|
||||
input: `Age = -5`,
|
||||
decodeInto: &struct{ Age }{},
|
||||
wantDecoded: &struct{ Age }{-5},
|
||||
},
|
||||
}
|
||||
|
||||
for label, test := range tests {
|
||||
_, err := Decode(test.input, test.decodeInto)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(test.wantDecoded, test.decodeInto) {
|
||||
t.Errorf("%s: want decoded == %+v, got %+v",
|
||||
label, test.wantDecoded, test.decodeInto)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTableArrays(t *testing.T) {
|
||||
var tomlTableArrays = `
|
||||
[[albums]]
|
||||
name = "Born to Run"
|
||||
|
||||
[[albums.songs]]
|
||||
name = "Jungleland"
|
||||
|
||||
[[albums.songs]]
|
||||
name = "Meeting Across the River"
|
||||
|
||||
[[albums]]
|
||||
name = "Born in the USA"
|
||||
|
||||
[[albums.songs]]
|
||||
name = "Glory Days"
|
||||
|
||||
[[albums.songs]]
|
||||
name = "Dancing in the Dark"
|
||||
`
|
||||
|
||||
type Song struct {
|
||||
Name string
|
||||
}
|
||||
|
||||
type Album struct {
|
||||
Name string
|
||||
Songs []Song
|
||||
}
|
||||
|
||||
type Music struct {
|
||||
Albums []Album
|
||||
}
|
||||
|
||||
expected := Music{[]Album{
|
||||
{"Born to Run", []Song{{"Jungleland"}, {"Meeting Across the River"}}},
|
||||
{"Born in the USA", []Song{{"Glory Days"}, {"Dancing in the Dark"}}},
|
||||
}}
|
||||
var got Music
|
||||
if _, err := Decode(tomlTableArrays, &got); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(expected, got) {
|
||||
t.Fatalf("\n%#v\n!=\n%#v\n", expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
// Case insensitive matching tests.
|
||||
// A bit more comprehensive than needed given the current implementation,
|
||||
// but implementations change.
|
||||
// Probably still missing demonstrations of some ugly corner cases regarding
|
||||
// case insensitive matching and multiple fields.
|
||||
func TestCase(t *testing.T) {
|
||||
var caseToml = `
|
||||
tOpString = "string"
|
||||
tOpInt = 1
|
||||
tOpFloat = 1.1
|
||||
tOpBool = true
|
||||
tOpdate = 2006-01-02T15:04:05Z
|
||||
tOparray = [ "array" ]
|
||||
Match = "i should be in Match only"
|
||||
MatcH = "i should be in MatcH only"
|
||||
once = "just once"
|
||||
[nEst.eD]
|
||||
nEstedString = "another string"
|
||||
`
|
||||
|
||||
type InsensitiveEd struct {
|
||||
NestedString string
|
||||
}
|
||||
|
||||
type InsensitiveNest struct {
|
||||
Ed InsensitiveEd
|
||||
}
|
||||
|
||||
type Insensitive struct {
|
||||
TopString string
|
||||
TopInt int
|
||||
TopFloat float64
|
||||
TopBool bool
|
||||
TopDate time.Time
|
||||
TopArray []string
|
||||
Match string
|
||||
MatcH string
|
||||
Once string
|
||||
OncE string
|
||||
Nest InsensitiveNest
|
||||
}
|
||||
|
||||
tme, err := time.Parse(time.RFC3339, time.RFC3339[:len(time.RFC3339)-5])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
expected := Insensitive{
|
||||
TopString: "string",
|
||||
TopInt: 1,
|
||||
TopFloat: 1.1,
|
||||
TopBool: true,
|
||||
TopDate: tme,
|
||||
TopArray: []string{"array"},
|
||||
MatcH: "i should be in MatcH only",
|
||||
Match: "i should be in Match only",
|
||||
Once: "just once",
|
||||
OncE: "",
|
||||
Nest: InsensitiveNest{
|
||||
Ed: InsensitiveEd{NestedString: "another string"},
|
||||
},
|
||||
}
|
||||
var got Insensitive
|
||||
if _, err := Decode(caseToml, &got); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(expected, got) {
|
||||
t.Fatalf("\n%#v\n!=\n%#v\n", expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPointers(t *testing.T) {
|
||||
type Object struct {
|
||||
Type string
|
||||
Description string
|
||||
}
|
||||
|
||||
type Dict struct {
|
||||
NamedObject map[string]*Object
|
||||
BaseObject *Object
|
||||
Strptr *string
|
||||
Strptrs []*string
|
||||
}
|
||||
s1, s2, s3 := "blah", "abc", "def"
|
||||
expected := &Dict{
|
||||
Strptr: &s1,
|
||||
Strptrs: []*string{&s2, &s3},
|
||||
NamedObject: map[string]*Object{
|
||||
"foo": {"FOO", "fooooo!!!"},
|
||||
"bar": {"BAR", "ba-ba-ba-ba-barrrr!!!"},
|
||||
},
|
||||
BaseObject: &Object{"BASE", "da base"},
|
||||
}
|
||||
|
||||
ex1 := `
|
||||
Strptr = "blah"
|
||||
Strptrs = ["abc", "def"]
|
||||
|
||||
[NamedObject.foo]
|
||||
Type = "FOO"
|
||||
Description = "fooooo!!!"
|
||||
|
||||
[NamedObject.bar]
|
||||
Type = "BAR"
|
||||
Description = "ba-ba-ba-ba-barrrr!!!"
|
||||
|
||||
[BaseObject]
|
||||
Type = "BASE"
|
||||
Description = "da base"
|
||||
`
|
||||
dict := new(Dict)
|
||||
_, err := Decode(ex1, dict)
|
||||
if err != nil {
|
||||
t.Errorf("Decode error: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(expected, dict) {
|
||||
t.Fatalf("\n%#v\n!=\n%#v\n", expected, dict)
|
||||
}
|
||||
}
|
||||
|
||||
type sphere struct {
|
||||
Center [3]float64
|
||||
Radius float64
|
||||
}
|
||||
|
||||
func TestDecodeSimpleArray(t *testing.T) {
|
||||
var s1 sphere
|
||||
if _, err := Decode(`center = [0.0, 1.5, 0.0]`, &s1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeArrayWrongSize(t *testing.T) {
|
||||
var s1 sphere
|
||||
if _, err := Decode(`center = [0.1, 2.3]`, &s1); err == nil {
|
||||
t.Fatal("Expected array type mismatch error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeLargeIntoSmallInt(t *testing.T) {
|
||||
type table struct {
|
||||
Value int8
|
||||
}
|
||||
var tab table
|
||||
if _, err := Decode(`value = 500`, &tab); err == nil {
|
||||
t.Fatal("Expected integer out-of-bounds error.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeSizedInts(t *testing.T) {
|
||||
type table struct {
|
||||
U8 uint8
|
||||
U16 uint16
|
||||
U32 uint32
|
||||
U64 uint64
|
||||
U uint
|
||||
I8 int8
|
||||
I16 int16
|
||||
I32 int32
|
||||
I64 int64
|
||||
I int
|
||||
}
|
||||
answer := table{1, 1, 1, 1, 1, -1, -1, -1, -1, -1}
|
||||
toml := `
|
||||
u8 = 1
|
||||
u16 = 1
|
||||
u32 = 1
|
||||
u64 = 1
|
||||
u = 1
|
||||
i8 = -1
|
||||
i16 = -1
|
||||
i32 = -1
|
||||
i64 = -1
|
||||
i = -1
|
||||
`
|
||||
var tab table
|
||||
if _, err := Decode(toml, &tab); err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
if answer != tab {
|
||||
t.Fatalf("Expected %#v but got %#v", answer, tab)
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleMetaData_PrimitiveDecode() {
|
||||
var md MetaData
|
||||
var err error
|
||||
|
||||
var tomlBlob = `
|
||||
ranking = ["Springsteen", "J Geils"]
|
||||
|
||||
[bands.Springsteen]
|
||||
started = 1973
|
||||
albums = ["Greetings", "WIESS", "Born to Run", "Darkness"]
|
||||
|
||||
[bands.J Geils]
|
||||
started = 1970
|
||||
albums = ["The J. Geils Band", "Full House", "Blow Your Face Out"]
|
||||
`
|
||||
|
||||
type band struct {
|
||||
Started int
|
||||
Albums []string
|
||||
}
|
||||
type classics struct {
|
||||
Ranking []string
|
||||
Bands map[string]Primitive
|
||||
}
|
||||
|
||||
// Do the initial decode. Reflection is delayed on Primitive values.
|
||||
var music classics
|
||||
if md, err = Decode(tomlBlob, &music); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// MetaData still includes information on Primitive values.
|
||||
fmt.Printf("Is `bands.Springsteen` defined? %v\n",
|
||||
md.IsDefined("bands", "Springsteen"))
|
||||
|
||||
// Decode primitive data into Go values.
|
||||
for _, artist := range music.Ranking {
|
||||
// A band is a primitive value, so we need to decode it to get a
|
||||
// real `band` value.
|
||||
primValue := music.Bands[artist]
|
||||
|
||||
var aBand band
|
||||
if err = md.PrimitiveDecode(primValue, &aBand); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Printf("%s started in %d.\n", artist, aBand.Started)
|
||||
}
|
||||
// Check to see if there were any fields left undecoded.
|
||||
// Note that this won't be empty before decoding the Primitive value!
|
||||
fmt.Printf("Undecoded: %q\n", md.Undecoded())
|
||||
|
||||
// Output:
|
||||
// Is `bands.Springsteen` defined? true
|
||||
// Springsteen started in 1973.
|
||||
// J Geils started in 1970.
|
||||
// Undecoded: []
|
||||
}
|
||||
|
||||
func ExampleDecode() {
|
||||
var tomlBlob = `
|
||||
# Some comments.
|
||||
[alpha]
|
||||
ip = "10.0.0.1"
|
||||
|
||||
[alpha.config]
|
||||
Ports = [ 8001, 8002 ]
|
||||
Location = "Toronto"
|
||||
Created = 1987-07-05T05:45:00Z
|
||||
|
||||
[beta]
|
||||
ip = "10.0.0.2"
|
||||
|
||||
[beta.config]
|
||||
Ports = [ 9001, 9002 ]
|
||||
Location = "New Jersey"
|
||||
Created = 1887-01-05T05:55:00Z
|
||||
`
|
||||
|
||||
type serverConfig struct {
|
||||
Ports []int
|
||||
Location string
|
||||
Created time.Time
|
||||
}
|
||||
|
||||
type server struct {
|
||||
IP string `toml:"ip"`
|
||||
Config serverConfig `toml:"config"`
|
||||
}
|
||||
|
||||
type servers map[string]server
|
||||
|
||||
var config servers
|
||||
if _, err := Decode(tomlBlob, &config); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, name := range []string{"alpha", "beta"} {
|
||||
s := config[name]
|
||||
fmt.Printf("Server: %s (ip: %s) in %s created on %s\n",
|
||||
name, s.IP, s.Config.Location,
|
||||
s.Config.Created.Format("2006-01-02"))
|
||||
fmt.Printf("Ports: %v\n", s.Config.Ports)
|
||||
}
|
||||
|
||||
// Output:
|
||||
// Server: alpha (ip: 10.0.0.1) in Toronto created on 1987-07-05
|
||||
// Ports: [8001 8002]
|
||||
// Server: beta (ip: 10.0.0.2) in New Jersey created on 1887-01-05
|
||||
// Ports: [9001 9002]
|
||||
}
|
||||
|
||||
type duration struct {
|
||||
time.Duration
|
||||
}
|
||||
|
||||
func (d *duration) UnmarshalText(text []byte) error {
|
||||
var err error
|
||||
d.Duration, err = time.ParseDuration(string(text))
|
||||
return err
|
||||
}
|
||||
|
||||
// Example Unmarshaler shows how to decode TOML strings into your own
|
||||
// custom data type.
|
||||
func Example_unmarshaler() {
|
||||
blob := `
|
||||
[[song]]
|
||||
name = "Thunder Road"
|
||||
duration = "4m49s"
|
||||
|
||||
[[song]]
|
||||
name = "Stairway to Heaven"
|
||||
duration = "8m03s"
|
||||
`
|
||||
type song struct {
|
||||
Name string
|
||||
Duration duration
|
||||
}
|
||||
type songs struct {
|
||||
Song []song
|
||||
}
|
||||
var favorites songs
|
||||
if _, err := Decode(blob, &favorites); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Code to implement the TextUnmarshaler interface for `duration`:
|
||||
//
|
||||
// type duration struct {
|
||||
// time.Duration
|
||||
// }
|
||||
//
|
||||
// func (d *duration) UnmarshalText(text []byte) error {
|
||||
// var err error
|
||||
// d.Duration, err = time.ParseDuration(string(text))
|
||||
// return err
|
||||
// }
|
||||
|
||||
for _, s := range favorites.Song {
|
||||
fmt.Printf("%s (%s)\n", s.Name, s.Duration)
|
||||
}
|
||||
// Output:
|
||||
// Thunder Road (4m49s)
|
||||
// Stairway to Heaven (8m3s)
|
||||
}
|
||||
|
||||
// Example StrictDecoding shows how to detect whether there are keys in the
|
||||
// TOML document that weren't decoded into the value given. This is useful
|
||||
// for returning an error to the user if they've included extraneous fields
|
||||
// in their configuration.
|
||||
func Example_strictDecoding() {
|
||||
var blob = `
|
||||
key1 = "value1"
|
||||
key2 = "value2"
|
||||
key3 = "value3"
|
||||
`
|
||||
type config struct {
|
||||
Key1 string
|
||||
Key3 string
|
||||
}
|
||||
|
||||
var conf config
|
||||
md, err := Decode(blob, &conf)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Printf("Undecoded keys: %q\n", md.Undecoded())
|
||||
// Output:
|
||||
// Undecoded keys: ["key2"]
|
||||
}
|
27
vendor/github.com/BurntSushi/toml/doc.go
generated
vendored
27
vendor/github.com/BurntSushi/toml/doc.go
generated
vendored
@ -1,27 +0,0 @@
|
||||
/*
|
||||
Package toml provides facilities for decoding and encoding TOML configuration
|
||||
files via reflection. There is also support for delaying decoding with
|
||||
the Primitive type, and querying the set of keys in a TOML document with the
|
||||
MetaData type.
|
||||
|
||||
The specification implemented: https://github.com/mojombo/toml
|
||||
|
||||
The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify
|
||||
whether a file is a valid TOML document. It can also be used to print the
|
||||
type of each key in a TOML document.
|
||||
|
||||
Testing
|
||||
|
||||
There are two important types of tests used for this package. The first is
|
||||
contained inside '*_test.go' files and uses the standard Go unit testing
|
||||
framework. These tests are primarily devoted to holistically testing the
|
||||
decoder and encoder.
|
||||
|
||||
The second type of testing is used to verify the implementation's adherence
|
||||
to the TOML specification. These tests have been factored into their own
|
||||
project: https://github.com/BurntSushi/toml-test
|
||||
|
||||
The reason the tests are in a separate project is so that they can be used by
|
||||
any implementation of TOML. Namely, it is language agnostic.
|
||||
*/
|
||||
package toml
|
515
vendor/github.com/BurntSushi/toml/encode.go
generated
vendored
515
vendor/github.com/BurntSushi/toml/encode.go
generated
vendored
@ -1,515 +0,0 @@
|
||||
package toml
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type tomlEncodeError struct{ error }
|
||||
|
||||
var (
|
||||
errArrayMixedElementTypes = errors.New(
|
||||
"can't encode array with mixed element types")
|
||||
errArrayNilElement = errors.New(
|
||||
"can't encode array with nil element")
|
||||
errNonString = errors.New(
|
||||
"can't encode a map with non-string key type")
|
||||
errAnonNonStruct = errors.New(
|
||||
"can't encode an anonymous field that is not a struct")
|
||||
errArrayNoTable = errors.New(
|
||||
"TOML array element can't contain a table")
|
||||
errNoKey = errors.New(
|
||||
"top-level values must be a Go map or struct")
|
||||
errAnything = errors.New("") // used in testing
|
||||
)
|
||||
|
||||
var quotedReplacer = strings.NewReplacer(
|
||||
"\t", "\\t",
|
||||
"\n", "\\n",
|
||||
"\r", "\\r",
|
||||
"\"", "\\\"",
|
||||
"\\", "\\\\",
|
||||
)
|
||||
|
||||
// Encoder controls the encoding of Go values to a TOML document to some
|
||||
// io.Writer.
|
||||
//
|
||||
// The indentation level can be controlled with the Indent field.
|
||||
type Encoder struct {
|
||||
// A single indentation level. By default it is two spaces.
|
||||
Indent string
|
||||
|
||||
// hasWritten is whether we have written any output to w yet.
|
||||
hasWritten bool
|
||||
w *bufio.Writer
|
||||
}
|
||||
|
||||
// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer
|
||||
// given. By default, a single indentation level is 2 spaces.
|
||||
func NewEncoder(w io.Writer) *Encoder {
|
||||
return &Encoder{
|
||||
w: bufio.NewWriter(w),
|
||||
Indent: " ",
|
||||
}
|
||||
}
|
||||
|
||||
// Encode writes a TOML representation of the Go value to the underlying
|
||||
// io.Writer. If the value given cannot be encoded to a valid TOML document,
|
||||
// then an error is returned.
|
||||
//
|
||||
// The mapping between Go values and TOML values should be precisely the same
|
||||
// as for the Decode* functions. Similarly, the TextMarshaler interface is
|
||||
// supported by encoding the resulting bytes as strings. (If you want to write
|
||||
// arbitrary binary data then you will need to use something like base64 since
|
||||
// TOML does not have any binary types.)
|
||||
//
|
||||
// When encoding TOML hashes (i.e., Go maps or structs), keys without any
|
||||
// sub-hashes are encoded first.
|
||||
//
|
||||
// If a Go map is encoded, then its keys are sorted alphabetically for
|
||||
// deterministic output. More control over this behavior may be provided if
|
||||
// there is demand for it.
|
||||
//
|
||||
// Encoding Go values without a corresponding TOML representation---like map
|
||||
// types with non-string keys---will cause an error to be returned. Similarly
|
||||
// for mixed arrays/slices, arrays/slices with nil elements, embedded
|
||||
// non-struct types and nested slices containing maps or structs.
|
||||
// (e.g., [][]map[string]string is not allowed but []map[string]string is OK
|
||||
// and so is []map[string][]string.)
|
||||
func (enc *Encoder) Encode(v interface{}) error {
|
||||
rv := eindirect(reflect.ValueOf(v))
|
||||
if err := enc.safeEncode(Key([]string{}), rv); err != nil {
|
||||
return err
|
||||
}
|
||||
return enc.w.Flush()
|
||||
}
|
||||
|
||||
func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
if terr, ok := r.(tomlEncodeError); ok {
|
||||
err = terr.error
|
||||
return
|
||||
}
|
||||
panic(r)
|
||||
}
|
||||
}()
|
||||
enc.encode(key, rv)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (enc *Encoder) encode(key Key, rv reflect.Value) {
|
||||
// Special case. Time needs to be in ISO8601 format.
|
||||
// Special case. If we can marshal the type to text, then we used that.
|
||||
// Basically, this prevents the encoder for handling these types as
|
||||
// generic structs (or whatever the underlying type of a TextMarshaler is).
|
||||
switch rv.Interface().(type) {
|
||||
case time.Time, TextMarshaler:
|
||||
enc.keyEqElement(key, rv)
|
||||
return
|
||||
}
|
||||
|
||||
k := rv.Kind()
|
||||
switch k {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
|
||||
reflect.Uint64,
|
||||
reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
|
||||
enc.keyEqElement(key, rv)
|
||||
case reflect.Array, reflect.Slice:
|
||||
if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
|
||||
enc.eArrayOfTables(key, rv)
|
||||
} else {
|
||||
enc.keyEqElement(key, rv)
|
||||
}
|
||||
case reflect.Interface:
|
||||
if rv.IsNil() {
|
||||
return
|
||||
}
|
||||
enc.encode(key, rv.Elem())
|
||||
case reflect.Map:
|
||||
if rv.IsNil() {
|
||||
return
|
||||
}
|
||||
enc.eTable(key, rv)
|
||||
case reflect.Ptr:
|
||||
if rv.IsNil() {
|
||||
return
|
||||
}
|
||||
enc.encode(key, rv.Elem())
|
||||
case reflect.Struct:
|
||||
enc.eTable(key, rv)
|
||||
default:
|
||||
panic(e("Unsupported type for key '%s': %s", key, k))
|
||||
}
|
||||
}
|
||||
|
||||
// eElement encodes any value that can be an array element (primitives and
|
||||
// arrays).
|
||||
func (enc *Encoder) eElement(rv reflect.Value) {
|
||||
switch v := rv.Interface().(type) {
|
||||
case time.Time:
|
||||
// Special case time.Time as a primitive. Has to come before
|
||||
// TextMarshaler below because time.Time implements
|
||||
// encoding.TextMarshaler, but we need to always use UTC.
|
||||
enc.wf(v.In(time.FixedZone("UTC", 0)).Format("2006-01-02T15:04:05Z"))
|
||||
return
|
||||
case TextMarshaler:
|
||||
// Special case. Use text marshaler if it's available for this value.
|
||||
if s, err := v.MarshalText(); err != nil {
|
||||
encPanic(err)
|
||||
} else {
|
||||
enc.writeQuoted(string(s))
|
||||
}
|
||||
return
|
||||
}
|
||||
switch rv.Kind() {
|
||||
case reflect.Bool:
|
||||
enc.wf(strconv.FormatBool(rv.Bool()))
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
enc.wf(strconv.FormatInt(rv.Int(), 10))
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16,
|
||||
reflect.Uint32, reflect.Uint64:
|
||||
enc.wf(strconv.FormatUint(rv.Uint(), 10))
|
||||
case reflect.Float32:
|
||||
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32)))
|
||||
case reflect.Float64:
|
||||
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64)))
|
||||
case reflect.Array, reflect.Slice:
|
||||
enc.eArrayOrSliceElement(rv)
|
||||
case reflect.Interface:
|
||||
enc.eElement(rv.Elem())
|
||||
case reflect.String:
|
||||
enc.writeQuoted(rv.String())
|
||||
default:
|
||||
panic(e("Unexpected primitive type: %s", rv.Kind()))
|
||||
}
|
||||
}
|
||||
|
||||
// By the TOML spec, all floats must have a decimal with at least one
|
||||
// number on either side.
|
||||
func floatAddDecimal(fstr string) string {
|
||||
if !strings.Contains(fstr, ".") {
|
||||
return fstr + ".0"
|
||||
}
|
||||
return fstr
|
||||
}
|
||||
|
||||
func (enc *Encoder) writeQuoted(s string) {
|
||||
enc.wf("\"%s\"", quotedReplacer.Replace(s))
|
||||
}
|
||||
|
||||
func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
|
||||
length := rv.Len()
|
||||
enc.wf("[")
|
||||
for i := 0; i < length; i++ {
|
||||
elem := rv.Index(i)
|
||||
enc.eElement(elem)
|
||||
if i != length-1 {
|
||||
enc.wf(", ")
|
||||
}
|
||||
}
|
||||
enc.wf("]")
|
||||
}
|
||||
|
||||
func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
|
||||
if len(key) == 0 {
|
||||
encPanic(errNoKey)
|
||||
}
|
||||
panicIfInvalidKey(key, true)
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
trv := rv.Index(i)
|
||||
if isNil(trv) {
|
||||
continue
|
||||
}
|
||||
enc.newline()
|
||||
enc.wf("%s[[%s]]", enc.indentStr(key), key.String())
|
||||
enc.newline()
|
||||
enc.eMapOrStruct(key, trv)
|
||||
}
|
||||
}
|
||||
|
||||
func (enc *Encoder) eTable(key Key, rv reflect.Value) {
|
||||
if len(key) == 1 {
|
||||
// Output an extra new line between top-level tables.
|
||||
// (The newline isn't written if nothing else has been written though.)
|
||||
enc.newline()
|
||||
}
|
||||
if len(key) > 0 {
|
||||
panicIfInvalidKey(key, true)
|
||||
enc.wf("%s[%s]", enc.indentStr(key), key.String())
|
||||
enc.newline()
|
||||
}
|
||||
enc.eMapOrStruct(key, rv)
|
||||
}
|
||||
|
||||
func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) {
|
||||
switch rv := eindirect(rv); rv.Kind() {
|
||||
case reflect.Map:
|
||||
enc.eMap(key, rv)
|
||||
case reflect.Struct:
|
||||
enc.eStruct(key, rv)
|
||||
default:
|
||||
panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String())
|
||||
}
|
||||
}
|
||||
|
||||
func (enc *Encoder) eMap(key Key, rv reflect.Value) {
|
||||
rt := rv.Type()
|
||||
if rt.Key().Kind() != reflect.String {
|
||||
encPanic(errNonString)
|
||||
}
|
||||
|
||||
// Sort keys so that we have deterministic output. And write keys directly
|
||||
// underneath this key first, before writing sub-structs or sub-maps.
|
||||
var mapKeysDirect, mapKeysSub []string
|
||||
for _, mapKey := range rv.MapKeys() {
|
||||
k := mapKey.String()
|
||||
if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) {
|
||||
mapKeysSub = append(mapKeysSub, k)
|
||||
} else {
|
||||
mapKeysDirect = append(mapKeysDirect, k)
|
||||
}
|
||||
}
|
||||
|
||||
var writeMapKeys = func(mapKeys []string) {
|
||||
sort.Strings(mapKeys)
|
||||
for _, mapKey := range mapKeys {
|
||||
mrv := rv.MapIndex(reflect.ValueOf(mapKey))
|
||||
if isNil(mrv) {
|
||||
// Don't write anything for nil fields.
|
||||
continue
|
||||
}
|
||||
enc.encode(key.add(mapKey), mrv)
|
||||
}
|
||||
}
|
||||
writeMapKeys(mapKeysDirect)
|
||||
writeMapKeys(mapKeysSub)
|
||||
}
|
||||
|
||||
func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
|
||||
// Write keys for fields directly under this key first, because if we write
|
||||
// a field that creates a new table, then all keys under it will be in that
|
||||
// table (not the one we're writing here).
|
||||
rt := rv.Type()
|
||||
var fieldsDirect, fieldsSub [][]int
|
||||
var addFields func(rt reflect.Type, rv reflect.Value, start []int)
|
||||
addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
|
||||
for i := 0; i < rt.NumField(); i++ {
|
||||
f := rt.Field(i)
|
||||
// skip unexporded fields
|
||||
if f.PkgPath != "" {
|
||||
continue
|
||||
}
|
||||
frv := rv.Field(i)
|
||||
if f.Anonymous {
|
||||
frv := eindirect(frv)
|
||||
t := frv.Type()
|
||||
if t.Kind() != reflect.Struct {
|
||||
encPanic(errAnonNonStruct)
|
||||
}
|
||||
addFields(t, frv, f.Index)
|
||||
} else if typeIsHash(tomlTypeOfGo(frv)) {
|
||||
fieldsSub = append(fieldsSub, append(start, f.Index...))
|
||||
} else {
|
||||
fieldsDirect = append(fieldsDirect, append(start, f.Index...))
|
||||
}
|
||||
}
|
||||
}
|
||||
addFields(rt, rv, nil)
|
||||
|
||||
var writeFields = func(fields [][]int) {
|
||||
for _, fieldIndex := range fields {
|
||||
sft := rt.FieldByIndex(fieldIndex)
|
||||
sf := rv.FieldByIndex(fieldIndex)
|
||||
if isNil(sf) {
|
||||
// Don't write anything for nil fields.
|
||||
continue
|
||||
}
|
||||
|
||||
keyName := sft.Tag.Get("toml")
|
||||
if keyName == "-" {
|
||||
continue
|
||||
}
|
||||
if keyName == "" {
|
||||
keyName = sft.Name
|
||||
}
|
||||
enc.encode(key.add(keyName), sf)
|
||||
}
|
||||
}
|
||||
writeFields(fieldsDirect)
|
||||
writeFields(fieldsSub)
|
||||
}
|
||||
|
||||
// tomlTypeName returns the TOML type name of the Go value's type. It is used to
|
||||
// determine whether the types of array elements are mixed (which is forbidden).
|
||||
// If the Go value is nil, then it is illegal for it to be an array element, and
|
||||
// valueIsNil is returned as true.
|
||||
|
||||
// Returns the TOML type of a Go value. The type may be `nil`, which means
|
||||
// no concrete TOML type could be found.
|
||||
func tomlTypeOfGo(rv reflect.Value) tomlType {
|
||||
if isNil(rv) || !rv.IsValid() {
|
||||
return nil
|
||||
}
|
||||
switch rv.Kind() {
|
||||
case reflect.Bool:
|
||||
return tomlBool
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
|
||||
reflect.Uint64:
|
||||
return tomlInteger
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return tomlFloat
|
||||
case reflect.Array, reflect.Slice:
|
||||
if typeEqual(tomlHash, tomlArrayType(rv)) {
|
||||
return tomlArrayHash
|
||||
} else {
|
||||
return tomlArray
|
||||
}
|
||||
case reflect.Ptr, reflect.Interface:
|
||||
return tomlTypeOfGo(rv.Elem())
|
||||
case reflect.String:
|
||||
return tomlString
|
||||
case reflect.Map:
|
||||
return tomlHash
|
||||
case reflect.Struct:
|
||||
switch rv.Interface().(type) {
|
||||
case time.Time:
|
||||
return tomlDatetime
|
||||
case TextMarshaler:
|
||||
return tomlString
|
||||
default:
|
||||
return tomlHash
|
||||
}
|
||||
default:
|
||||
panic("unexpected reflect.Kind: " + rv.Kind().String())
|
||||
}
|
||||
}
|
||||
|
||||
// tomlArrayType returns the element type of a TOML array. The type returned
|
||||
// may be nil if it cannot be determined (e.g., a nil slice or a zero length
|
||||
// slize). This function may also panic if it finds a type that cannot be
|
||||
// expressed in TOML (such as nil elements, heterogeneous arrays or directly
|
||||
// nested arrays of tables).
|
||||
func tomlArrayType(rv reflect.Value) tomlType {
|
||||
if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
firstType := tomlTypeOfGo(rv.Index(0))
|
||||
if firstType == nil {
|
||||
encPanic(errArrayNilElement)
|
||||
}
|
||||
|
||||
rvlen := rv.Len()
|
||||
for i := 1; i < rvlen; i++ {
|
||||
elem := rv.Index(i)
|
||||
switch elemType := tomlTypeOfGo(elem); {
|
||||
case elemType == nil:
|
||||
encPanic(errArrayNilElement)
|
||||
case !typeEqual(firstType, elemType):
|
||||
encPanic(errArrayMixedElementTypes)
|
||||
}
|
||||
}
|
||||
// If we have a nested array, then we must make sure that the nested
|
||||
// array contains ONLY primitives.
|
||||
// This checks arbitrarily nested arrays.
|
||||
if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) {
|
||||
nest := tomlArrayType(eindirect(rv.Index(0)))
|
||||
if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) {
|
||||
encPanic(errArrayNoTable)
|
||||
}
|
||||
}
|
||||
return firstType
|
||||
}
|
||||
|
||||
func (enc *Encoder) newline() {
|
||||
if enc.hasWritten {
|
||||
enc.wf("\n")
|
||||
}
|
||||
}
|
||||
|
||||
func (enc *Encoder) keyEqElement(key Key, val reflect.Value) {
|
||||
if len(key) == 0 {
|
||||
encPanic(errNoKey)
|
||||
}
|
||||
panicIfInvalidKey(key, false)
|
||||
enc.wf("%s%s = ", enc.indentStr(key), key[len(key)-1])
|
||||
enc.eElement(val)
|
||||
enc.newline()
|
||||
}
|
||||
|
||||
func (enc *Encoder) wf(format string, v ...interface{}) {
|
||||
if _, err := fmt.Fprintf(enc.w, format, v...); err != nil {
|
||||
encPanic(err)
|
||||
}
|
||||
enc.hasWritten = true
|
||||
}
|
||||
|
||||
func (enc *Encoder) indentStr(key Key) string {
|
||||
return strings.Repeat(enc.Indent, len(key)-1)
|
||||
}
|
||||
|
||||
func encPanic(err error) {
|
||||
panic(tomlEncodeError{err})
|
||||
}
|
||||
|
||||
func eindirect(v reflect.Value) reflect.Value {
|
||||
switch v.Kind() {
|
||||
case reflect.Ptr, reflect.Interface:
|
||||
return eindirect(v.Elem())
|
||||
default:
|
||||
return v
|
||||
}
|
||||
}
|
||||
|
||||
func isNil(rv reflect.Value) bool {
|
||||
switch rv.Kind() {
|
||||
case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
||||
return rv.IsNil()
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func panicIfInvalidKey(key Key, hash bool) {
|
||||
if hash {
|
||||
for _, k := range key {
|
||||
if !isValidTableName(k) {
|
||||
encPanic(e("Key '%s' is not a valid table name. Table names "+
|
||||
"cannot contain '[', ']' or '.'.", key.String()))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if !isValidKeyName(key[len(key)-1]) {
|
||||
encPanic(e("Key '%s' is not a name. Key names "+
|
||||
"cannot contain whitespace.", key.String()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func isValidTableName(s string) bool {
|
||||
if len(s) == 0 {
|
||||
return false
|
||||
}
|
||||
for _, r := range s {
|
||||
if r == '[' || r == ']' || r == '.' {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func isValidKeyName(s string) bool {
|
||||
if len(s) == 0 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
506
vendor/github.com/BurntSushi/toml/encode_test.go
generated
vendored
506
vendor/github.com/BurntSushi/toml/encode_test.go
generated
vendored
@ -1,506 +0,0 @@
|
||||
package toml
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestEncodeRoundTrip(t *testing.T) {
|
||||
type Config struct {
|
||||
Age int
|
||||
Cats []string
|
||||
Pi float64
|
||||
Perfection []int
|
||||
DOB time.Time
|
||||
Ipaddress net.IP
|
||||
}
|
||||
|
||||
var inputs = Config{
|
||||
13,
|
||||
[]string{"one", "two", "three"},
|
||||
3.145,
|
||||
[]int{11, 2, 3, 4},
|
||||
time.Now(),
|
||||
net.ParseIP("192.168.59.254"),
|
||||
}
|
||||
|
||||
var firstBuffer bytes.Buffer
|
||||
e := NewEncoder(&firstBuffer)
|
||||
err := e.Encode(inputs)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var outputs Config
|
||||
if _, err := Decode(firstBuffer.String(), &outputs); err != nil {
|
||||
log.Printf("Could not decode:\n-----\n%s\n-----\n",
|
||||
firstBuffer.String())
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// could test each value individually, but I'm lazy
|
||||
var secondBuffer bytes.Buffer
|
||||
e2 := NewEncoder(&secondBuffer)
|
||||
err = e2.Encode(outputs)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if firstBuffer.String() != secondBuffer.String() {
|
||||
t.Error(
|
||||
firstBuffer.String(),
|
||||
"\n\n is not identical to\n\n",
|
||||
secondBuffer.String())
|
||||
}
|
||||
}
|
||||
|
||||
// XXX(burntsushi)
|
||||
// I think these tests probably should be removed. They are good, but they
|
||||
// ought to be obsolete by toml-test.
|
||||
func TestEncode(t *testing.T) {
|
||||
type Embedded struct {
|
||||
Int int `toml:"_int"`
|
||||
}
|
||||
type NonStruct int
|
||||
|
||||
date := time.Date(2014, 5, 11, 20, 30, 40, 0, time.FixedZone("IST", 3600))
|
||||
dateStr := "2014-05-11T19:30:40Z"
|
||||
|
||||
tests := map[string]struct {
|
||||
input interface{}
|
||||
wantOutput string
|
||||
wantError error
|
||||
}{
|
||||
"bool field": {
|
||||
input: struct {
|
||||
BoolTrue bool
|
||||
BoolFalse bool
|
||||
}{true, false},
|
||||
wantOutput: "BoolTrue = true\nBoolFalse = false\n",
|
||||
},
|
||||
"int fields": {
|
||||
input: struct {
|
||||
Int int
|
||||
Int8 int8
|
||||
Int16 int16
|
||||
Int32 int32
|
||||
Int64 int64
|
||||
}{1, 2, 3, 4, 5},
|
||||
wantOutput: "Int = 1\nInt8 = 2\nInt16 = 3\nInt32 = 4\nInt64 = 5\n",
|
||||
},
|
||||
"uint fields": {
|
||||
input: struct {
|
||||
Uint uint
|
||||
Uint8 uint8
|
||||
Uint16 uint16
|
||||
Uint32 uint32
|
||||
Uint64 uint64
|
||||
}{1, 2, 3, 4, 5},
|
||||
wantOutput: "Uint = 1\nUint8 = 2\nUint16 = 3\nUint32 = 4" +
|
||||
"\nUint64 = 5\n",
|
||||
},
|
||||
"float fields": {
|
||||
input: struct {
|
||||
Float32 float32
|
||||
Float64 float64
|
||||
}{1.5, 2.5},
|
||||
wantOutput: "Float32 = 1.5\nFloat64 = 2.5\n",
|
||||
},
|
||||
"string field": {
|
||||
input: struct{ String string }{"foo"},
|
||||
wantOutput: "String = \"foo\"\n",
|
||||
},
|
||||
"string field and unexported field": {
|
||||
input: struct {
|
||||
String string
|
||||
unexported int
|
||||
}{"foo", 0},
|
||||
wantOutput: "String = \"foo\"\n",
|
||||
},
|
||||
"datetime field in UTC": {
|
||||
input: struct{ Date time.Time }{date},
|
||||
wantOutput: fmt.Sprintf("Date = %s\n", dateStr),
|
||||
},
|
||||
"datetime field as primitive": {
|
||||
// Using a map here to fail if isStructOrMap() returns true for
|
||||
// time.Time.
|
||||
input: map[string]interface{}{
|
||||
"Date": date,
|
||||
"Int": 1,
|
||||
},
|
||||
wantOutput: fmt.Sprintf("Date = %s\nInt = 1\n", dateStr),
|
||||
},
|
||||
"array fields": {
|
||||
input: struct {
|
||||
IntArray0 [0]int
|
||||
IntArray3 [3]int
|
||||
}{[0]int{}, [3]int{1, 2, 3}},
|
||||
wantOutput: "IntArray0 = []\nIntArray3 = [1, 2, 3]\n",
|
||||
},
|
||||
"slice fields": {
|
||||
input: struct{ IntSliceNil, IntSlice0, IntSlice3 []int }{
|
||||
nil, []int{}, []int{1, 2, 3},
|
||||
},
|
||||
wantOutput: "IntSlice0 = []\nIntSlice3 = [1, 2, 3]\n",
|
||||
},
|
||||
"datetime slices": {
|
||||
input: struct{ DatetimeSlice []time.Time }{
|
||||
[]time.Time{date, date},
|
||||
},
|
||||
wantOutput: fmt.Sprintf("DatetimeSlice = [%s, %s]\n",
|
||||
dateStr, dateStr),
|
||||
},
|
||||
"nested arrays and slices": {
|
||||
input: struct {
|
||||
SliceOfArrays [][2]int
|
||||
ArrayOfSlices [2][]int
|
||||
SliceOfArraysOfSlices [][2][]int
|
||||
ArrayOfSlicesOfArrays [2][][2]int
|
||||
SliceOfMixedArrays [][2]interface{}
|
||||
ArrayOfMixedSlices [2][]interface{}
|
||||
}{
|
||||
[][2]int{{1, 2}, {3, 4}},
|
||||
[2][]int{{1, 2}, {3, 4}},
|
||||
[][2][]int{
|
||||
{
|
||||
{1, 2}, {3, 4},
|
||||
},
|
||||
{
|
||||
{5, 6}, {7, 8},
|
||||
},
|
||||
},
|
||||
[2][][2]int{
|
||||
{
|
||||
{1, 2}, {3, 4},
|
||||
},
|
||||
{
|
||||
{5, 6}, {7, 8},
|
||||
},
|
||||
},
|
||||
[][2]interface{}{
|
||||
{1, 2}, {"a", "b"},
|
||||
},
|
||||
[2][]interface{}{
|
||||
{1, 2}, {"a", "b"},
|
||||
},
|
||||
},
|
||||
wantOutput: `SliceOfArrays = [[1, 2], [3, 4]]
|
||||
ArrayOfSlices = [[1, 2], [3, 4]]
|
||||
SliceOfArraysOfSlices = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
|
||||
ArrayOfSlicesOfArrays = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
|
||||
SliceOfMixedArrays = [[1, 2], ["a", "b"]]
|
||||
ArrayOfMixedSlices = [[1, 2], ["a", "b"]]
|
||||
`,
|
||||
},
|
||||
"empty slice": {
|
||||
input: struct{ Empty []interface{} }{[]interface{}{}},
|
||||
wantOutput: "Empty = []\n",
|
||||
},
|
||||
"(error) slice with element type mismatch (string and integer)": {
|
||||
input: struct{ Mixed []interface{} }{[]interface{}{1, "a"}},
|
||||
wantError: errArrayMixedElementTypes,
|
||||
},
|
||||
"(error) slice with element type mismatch (integer and float)": {
|
||||
input: struct{ Mixed []interface{} }{[]interface{}{1, 2.5}},
|
||||
wantError: errArrayMixedElementTypes,
|
||||
},
|
||||
"slice with elems of differing Go types, same TOML types": {
|
||||
input: struct {
|
||||
MixedInts []interface{}
|
||||
MixedFloats []interface{}
|
||||
}{
|
||||
[]interface{}{
|
||||
int(1), int8(2), int16(3), int32(4), int64(5),
|
||||
uint(1), uint8(2), uint16(3), uint32(4), uint64(5),
|
||||
},
|
||||
[]interface{}{float32(1.5), float64(2.5)},
|
||||
},
|
||||
wantOutput: "MixedInts = [1, 2, 3, 4, 5, 1, 2, 3, 4, 5]\n" +
|
||||
"MixedFloats = [1.5, 2.5]\n",
|
||||
},
|
||||
"(error) slice w/ element type mismatch (one is nested array)": {
|
||||
input: struct{ Mixed []interface{} }{
|
||||
[]interface{}{1, []interface{}{2}},
|
||||
},
|
||||
wantError: errArrayMixedElementTypes,
|
||||
},
|
||||
"(error) slice with 1 nil element": {
|
||||
input: struct{ NilElement1 []interface{} }{[]interface{}{nil}},
|
||||
wantError: errArrayNilElement,
|
||||
},
|
||||
"(error) slice with 1 nil element (and other non-nil elements)": {
|
||||
input: struct{ NilElement []interface{} }{
|
||||
[]interface{}{1, nil},
|
||||
},
|
||||
wantError: errArrayNilElement,
|
||||
},
|
||||
"simple map": {
|
||||
input: map[string]int{"a": 1, "b": 2},
|
||||
wantOutput: "a = 1\nb = 2\n",
|
||||
},
|
||||
"map with interface{} value type": {
|
||||
input: map[string]interface{}{"a": 1, "b": "c"},
|
||||
wantOutput: "a = 1\nb = \"c\"\n",
|
||||
},
|
||||
"map with interface{} value type, some of which are structs": {
|
||||
input: map[string]interface{}{
|
||||
"a": struct{ Int int }{2},
|
||||
"b": 1,
|
||||
},
|
||||
wantOutput: "b = 1\n\n[a]\n Int = 2\n",
|
||||
},
|
||||
"nested map": {
|
||||
input: map[string]map[string]int{
|
||||
"a": {"b": 1},
|
||||
"c": {"d": 2},
|
||||
},
|
||||
wantOutput: "[a]\n b = 1\n\n[c]\n d = 2\n",
|
||||
},
|
||||
"nested struct": {
|
||||
input: struct{ Struct struct{ Int int } }{
|
||||
struct{ Int int }{1},
|
||||
},
|
||||
wantOutput: "[Struct]\n Int = 1\n",
|
||||
},
|
||||
"nested struct and non-struct field": {
|
||||
input: struct {
|
||||
Struct struct{ Int int }
|
||||
Bool bool
|
||||
}{struct{ Int int }{1}, true},
|
||||
wantOutput: "Bool = true\n\n[Struct]\n Int = 1\n",
|
||||
},
|
||||
"2 nested structs": {
|
||||
input: struct{ Struct1, Struct2 struct{ Int int } }{
|
||||
struct{ Int int }{1}, struct{ Int int }{2},
|
||||
},
|
||||
wantOutput: "[Struct1]\n Int = 1\n\n[Struct2]\n Int = 2\n",
|
||||
},
|
||||
"deeply nested structs": {
|
||||
input: struct {
|
||||
Struct1, Struct2 struct{ Struct3 *struct{ Int int } }
|
||||
}{
|
||||
struct{ Struct3 *struct{ Int int } }{&struct{ Int int }{1}},
|
||||
struct{ Struct3 *struct{ Int int } }{nil},
|
||||
},
|
||||
wantOutput: "[Struct1]\n [Struct1.Struct3]\n Int = 1" +
|
||||
"\n\n[Struct2]\n",
|
||||
},
|
||||
"nested struct with nil struct elem": {
|
||||
input: struct {
|
||||
Struct struct{ Inner *struct{ Int int } }
|
||||
}{
|
||||
struct{ Inner *struct{ Int int } }{nil},
|
||||
},
|
||||
wantOutput: "[Struct]\n",
|
||||
},
|
||||
"nested struct with no fields": {
|
||||
input: struct {
|
||||
Struct struct{ Inner struct{} }
|
||||
}{
|
||||
struct{ Inner struct{} }{struct{}{}},
|
||||
},
|
||||
wantOutput: "[Struct]\n [Struct.Inner]\n",
|
||||
},
|
||||
"struct with tags": {
|
||||
input: struct {
|
||||
Struct struct {
|
||||
Int int `toml:"_int"`
|
||||
} `toml:"_struct"`
|
||||
Bool bool `toml:"_bool"`
|
||||
}{
|
||||
struct {
|
||||
Int int `toml:"_int"`
|
||||
}{1}, true,
|
||||
},
|
||||
wantOutput: "_bool = true\n\n[_struct]\n _int = 1\n",
|
||||
},
|
||||
"embedded struct": {
|
||||
input: struct{ Embedded }{Embedded{1}},
|
||||
wantOutput: "_int = 1\n",
|
||||
},
|
||||
"embedded *struct": {
|
||||
input: struct{ *Embedded }{&Embedded{1}},
|
||||
wantOutput: "_int = 1\n",
|
||||
},
|
||||
"nested embedded struct": {
|
||||
input: struct {
|
||||
Struct struct{ Embedded } `toml:"_struct"`
|
||||
}{struct{ Embedded }{Embedded{1}}},
|
||||
wantOutput: "[_struct]\n _int = 1\n",
|
||||
},
|
||||
"nested embedded *struct": {
|
||||
input: struct {
|
||||
Struct struct{ *Embedded } `toml:"_struct"`
|
||||
}{struct{ *Embedded }{&Embedded{1}}},
|
||||
wantOutput: "[_struct]\n _int = 1\n",
|
||||
},
|
||||
"array of tables": {
|
||||
input: struct {
|
||||
Structs []*struct{ Int int } `toml:"struct"`
|
||||
}{
|
||||
[]*struct{ Int int }{{1}, {3}},
|
||||
},
|
||||
wantOutput: "[[struct]]\n Int = 1\n\n[[struct]]\n Int = 3\n",
|
||||
},
|
||||
"array of tables order": {
|
||||
input: map[string]interface{}{
|
||||
"map": map[string]interface{}{
|
||||
"zero": 5,
|
||||
"arr": []map[string]int{
|
||||
map[string]int{
|
||||
"friend": 5,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantOutput: "[map]\n zero = 5\n\n [[map.arr]]\n friend = 5\n",
|
||||
},
|
||||
"(error) top-level slice": {
|
||||
input: []struct{ Int int }{{1}, {2}, {3}},
|
||||
wantError: errNoKey,
|
||||
},
|
||||
"(error) slice of slice": {
|
||||
input: struct {
|
||||
Slices [][]struct{ Int int }
|
||||
}{
|
||||
[][]struct{ Int int }{{{1}}, {{2}}, {{3}}},
|
||||
},
|
||||
wantError: errArrayNoTable,
|
||||
},
|
||||
"(error) map no string key": {
|
||||
input: map[int]string{1: ""},
|
||||
wantError: errNonString,
|
||||
},
|
||||
"(error) anonymous non-struct": {
|
||||
input: struct{ NonStruct }{5},
|
||||
wantError: errAnonNonStruct,
|
||||
},
|
||||
"(error) empty key name": {
|
||||
input: map[string]int{"": 1},
|
||||
wantError: errAnything,
|
||||
},
|
||||
"(error) empty map name": {
|
||||
input: map[string]interface{}{
|
||||
"": map[string]int{"v": 1},
|
||||
},
|
||||
wantError: errAnything,
|
||||
},
|
||||
}
|
||||
for label, test := range tests {
|
||||
encodeExpected(t, label, test.input, test.wantOutput, test.wantError)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeNestedTableArrays(t *testing.T) {
|
||||
type song struct {
|
||||
Name string `toml:"name"`
|
||||
}
|
||||
type album struct {
|
||||
Name string `toml:"name"`
|
||||
Songs []song `toml:"songs"`
|
||||
}
|
||||
type springsteen struct {
|
||||
Albums []album `toml:"albums"`
|
||||
}
|
||||
value := springsteen{
|
||||
[]album{
|
||||
{"Born to Run",
|
||||
[]song{{"Jungleland"}, {"Meeting Across the River"}}},
|
||||
{"Born in the USA",
|
||||
[]song{{"Glory Days"}, {"Dancing in the Dark"}}},
|
||||
},
|
||||
}
|
||||
expected := `[[albums]]
|
||||
name = "Born to Run"
|
||||
|
||||
[[albums.songs]]
|
||||
name = "Jungleland"
|
||||
|
||||
[[albums.songs]]
|
||||
name = "Meeting Across the River"
|
||||
|
||||
[[albums]]
|
||||
name = "Born in the USA"
|
||||
|
||||
[[albums.songs]]
|
||||
name = "Glory Days"
|
||||
|
||||
[[albums.songs]]
|
||||
name = "Dancing in the Dark"
|
||||
`
|
||||
encodeExpected(t, "nested table arrays", value, expected, nil)
|
||||
}
|
||||
|
||||
func TestEncodeArrayHashWithNormalHashOrder(t *testing.T) {
|
||||
type Alpha struct {
|
||||
V int
|
||||
}
|
||||
type Beta struct {
|
||||
V int
|
||||
}
|
||||
type Conf struct {
|
||||
V int
|
||||
A Alpha
|
||||
B []Beta
|
||||
}
|
||||
|
||||
val := Conf{
|
||||
V: 1,
|
||||
A: Alpha{2},
|
||||
B: []Beta{{3}},
|
||||
}
|
||||
expected := "V = 1\n\n[A]\n V = 2\n\n[[B]]\n V = 3\n"
|
||||
encodeExpected(t, "array hash with normal hash order", val, expected, nil)
|
||||
}
|
||||
|
||||
func encodeExpected(
|
||||
t *testing.T, label string, val interface{}, wantStr string, wantErr error,
|
||||
) {
|
||||
var buf bytes.Buffer
|
||||
enc := NewEncoder(&buf)
|
||||
err := enc.Encode(val)
|
||||
if err != wantErr {
|
||||
if wantErr != nil {
|
||||
if wantErr == errAnything && err != nil {
|
||||
return
|
||||
}
|
||||
t.Errorf("%s: want Encode error %v, got %v", label, wantErr, err)
|
||||
} else {
|
||||
t.Errorf("%s: Encode failed: %s", label, err)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if got := buf.String(); wantStr != got {
|
||||
t.Errorf("%s: want\n-----\n%q\n-----\nbut got\n-----\n%q\n-----\n",
|
||||
label, wantStr, got)
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleEncoder_Encode() {
|
||||
date, _ := time.Parse(time.RFC822, "14 Mar 10 18:00 UTC")
|
||||
var config = map[string]interface{}{
|
||||
"date": date,
|
||||
"counts": []int{1, 1, 2, 3, 5, 8},
|
||||
"hash": map[string]string{
|
||||
"key1": "val1",
|
||||
"key2": "val2",
|
||||
},
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
if err := NewEncoder(buf).Encode(config); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Println(buf.String())
|
||||
|
||||
// Output:
|
||||
// counts = [1, 1, 2, 3, 5, 8]
|
||||
// date = 2010-03-14T18:00:00Z
|
||||
//
|
||||
// [hash]
|
||||
// key1 = "val1"
|
||||
// key2 = "val2"
|
||||
}
|
19
vendor/github.com/BurntSushi/toml/encoding_types.go
generated
vendored
19
vendor/github.com/BurntSushi/toml/encoding_types.go
generated
vendored
@ -1,19 +0,0 @@
|
||||
// +build go1.2
|
||||
|
||||
package toml
|
||||
|
||||
// In order to support Go 1.1, we define our own TextMarshaler and
|
||||
// TextUnmarshaler types. For Go 1.2+, we just alias them with the
|
||||
// standard library interfaces.
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
)
|
||||
|
||||
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
|
||||
// so that Go 1.1 can be supported.
|
||||
type TextMarshaler encoding.TextMarshaler
|
||||
|
||||
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined here
|
||||
// so that Go 1.1 can be supported.
|
||||
type TextUnmarshaler encoding.TextUnmarshaler
|
18
vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
generated
vendored
18
vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
generated
vendored
@ -1,18 +0,0 @@
|
||||
// +build !go1.2
|
||||
|
||||
package toml
|
||||
|
||||
// These interfaces were introduced in Go 1.2, so we add them manually when
|
||||
// compiling for Go 1.1.
|
||||
|
||||
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
|
||||
// so that Go 1.1 can be supported.
|
||||
type TextMarshaler interface {
|
||||
MarshalText() (text []byte, err error)
|
||||
}
|
||||
|
||||
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined here
|
||||
// so that Go 1.1 can be supported.
|
||||
type TextUnmarshaler interface {
|
||||
UnmarshalText(text []byte) error
|
||||
}
|
734
vendor/github.com/BurntSushi/toml/lex.go
generated
vendored
734
vendor/github.com/BurntSushi/toml/lex.go
generated
vendored
@ -1,734 +0,0 @@
|
||||
package toml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type itemType int
|
||||
|
||||
const (
|
||||
itemError itemType = iota
|
||||
itemNIL // used in the parser to indicate no type
|
||||
itemEOF
|
||||
itemText
|
||||
itemString
|
||||
itemBool
|
||||
itemInteger
|
||||
itemFloat
|
||||
itemDatetime
|
||||
itemArray // the start of an array
|
||||
itemArrayEnd
|
||||
itemTableStart
|
||||
itemTableEnd
|
||||
itemArrayTableStart
|
||||
itemArrayTableEnd
|
||||
itemKeyStart
|
||||
itemCommentStart
|
||||
)
|
||||
|
||||
const (
|
||||
eof = 0
|
||||
tableStart = '['
|
||||
tableEnd = ']'
|
||||
arrayTableStart = '['
|
||||
arrayTableEnd = ']'
|
||||
tableSep = '.'
|
||||
keySep = '='
|
||||
arrayStart = '['
|
||||
arrayEnd = ']'
|
||||
arrayValTerm = ','
|
||||
commentStart = '#'
|
||||
stringStart = '"'
|
||||
stringEnd = '"'
|
||||
)
|
||||
|
||||
type stateFn func(lx *lexer) stateFn
|
||||
|
||||
type lexer struct {
|
||||
input string
|
||||
start int
|
||||
pos int
|
||||
width int
|
||||
line int
|
||||
state stateFn
|
||||
items chan item
|
||||
|
||||
// A stack of state functions used to maintain context.
|
||||
// The idea is to reuse parts of the state machine in various places.
|
||||
// For example, values can appear at the top level or within arbitrarily
|
||||
// nested arrays. The last state on the stack is used after a value has
|
||||
// been lexed. Similarly for comments.
|
||||
stack []stateFn
|
||||
}
|
||||
|
||||
type item struct {
|
||||
typ itemType
|
||||
val string
|
||||
line int
|
||||
}
|
||||
|
||||
func (lx *lexer) nextItem() item {
|
||||
for {
|
||||
select {
|
||||
case item := <-lx.items:
|
||||
return item
|
||||
default:
|
||||
lx.state = lx.state(lx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func lex(input string) *lexer {
|
||||
lx := &lexer{
|
||||
input: input + "\n",
|
||||
state: lexTop,
|
||||
line: 1,
|
||||
items: make(chan item, 10),
|
||||
stack: make([]stateFn, 0, 10),
|
||||
}
|
||||
return lx
|
||||
}
|
||||
|
||||
func (lx *lexer) push(state stateFn) {
|
||||
lx.stack = append(lx.stack, state)
|
||||
}
|
||||
|
||||
func (lx *lexer) pop() stateFn {
|
||||
if len(lx.stack) == 0 {
|
||||
return lx.errorf("BUG in lexer: no states to pop.")
|
||||
}
|
||||
last := lx.stack[len(lx.stack)-1]
|
||||
lx.stack = lx.stack[0 : len(lx.stack)-1]
|
||||
return last
|
||||
}
|
||||
|
||||
func (lx *lexer) current() string {
|
||||
return lx.input[lx.start:lx.pos]
|
||||
}
|
||||
|
||||
func (lx *lexer) emit(typ itemType) {
|
||||
lx.items <- item{typ, lx.current(), lx.line}
|
||||
lx.start = lx.pos
|
||||
}
|
||||
|
||||
func (lx *lexer) emitTrim(typ itemType) {
|
||||
lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line}
|
||||
lx.start = lx.pos
|
||||
}
|
||||
|
||||
func (lx *lexer) next() (r rune) {
|
||||
if lx.pos >= len(lx.input) {
|
||||
lx.width = 0
|
||||
return eof
|
||||
}
|
||||
|
||||
if lx.input[lx.pos] == '\n' {
|
||||
lx.line++
|
||||
}
|
||||
r, lx.width = utf8.DecodeRuneInString(lx.input[lx.pos:])
|
||||
lx.pos += lx.width
|
||||
return r
|
||||
}
|
||||
|
||||
// ignore skips over the pending input before this point.
|
||||
func (lx *lexer) ignore() {
|
||||
lx.start = lx.pos
|
||||
}
|
||||
|
||||
// backup steps back one rune. Can be called only once per call of next.
|
||||
func (lx *lexer) backup() {
|
||||
lx.pos -= lx.width
|
||||
if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
|
||||
lx.line--
|
||||
}
|
||||
}
|
||||
|
||||
// accept consumes the next rune if it's equal to `valid`.
|
||||
func (lx *lexer) accept(valid rune) bool {
|
||||
if lx.next() == valid {
|
||||
return true
|
||||
}
|
||||
lx.backup()
|
||||
return false
|
||||
}
|
||||
|
||||
// peek returns but does not consume the next rune in the input.
|
||||
func (lx *lexer) peek() rune {
|
||||
r := lx.next()
|
||||
lx.backup()
|
||||
return r
|
||||
}
|
||||
|
||||
// errorf stops all lexing by emitting an error and returning `nil`.
|
||||
// Note that any value that is a character is escaped if it's a special
|
||||
// character (new lines, tabs, etc.).
|
||||
func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
|
||||
lx.items <- item{
|
||||
itemError,
|
||||
fmt.Sprintf(format, values...),
|
||||
lx.line,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// lexTop consumes elements at the top level of TOML data.
|
||||
func lexTop(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isWhitespace(r) || isNL(r) {
|
||||
return lexSkip(lx, lexTop)
|
||||
}
|
||||
|
||||
switch r {
|
||||
case commentStart:
|
||||
lx.push(lexTop)
|
||||
return lexCommentStart
|
||||
case tableStart:
|
||||
return lexTableStart
|
||||
case eof:
|
||||
if lx.pos > lx.start {
|
||||
return lx.errorf("Unexpected EOF.")
|
||||
}
|
||||
lx.emit(itemEOF)
|
||||
return nil
|
||||
}
|
||||
|
||||
// At this point, the only valid item can be a key, so we back up
|
||||
// and let the key lexer do the rest.
|
||||
lx.backup()
|
||||
lx.push(lexTopEnd)
|
||||
return lexKeyStart
|
||||
}
|
||||
|
||||
// lexTopEnd is entered whenever a top-level item has been consumed. (A value
|
||||
// or a table.) It must see only whitespace, and will turn back to lexTop
|
||||
// upon a new line. If it sees EOF, it will quit the lexer successfully.
|
||||
func lexTopEnd(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case r == commentStart:
|
||||
// a comment will read to a new line for us.
|
||||
lx.push(lexTop)
|
||||
return lexCommentStart
|
||||
case isWhitespace(r):
|
||||
return lexTopEnd
|
||||
case isNL(r):
|
||||
lx.ignore()
|
||||
return lexTop
|
||||
case r == eof:
|
||||
lx.ignore()
|
||||
return lexTop
|
||||
}
|
||||
return lx.errorf("Expected a top-level item to end with a new line, "+
|
||||
"comment or EOF, but got %q instead.", r)
|
||||
}
|
||||
|
||||
// lexTable lexes the beginning of a table. Namely, it makes sure that
|
||||
// it starts with a character other than '.' and ']'.
|
||||
// It assumes that '[' has already been consumed.
|
||||
// It also handles the case that this is an item in an array of tables.
|
||||
// e.g., '[[name]]'.
|
||||
func lexTableStart(lx *lexer) stateFn {
|
||||
if lx.peek() == arrayTableStart {
|
||||
lx.next()
|
||||
lx.emit(itemArrayTableStart)
|
||||
lx.push(lexArrayTableEnd)
|
||||
} else {
|
||||
lx.emit(itemTableStart)
|
||||
lx.push(lexTableEnd)
|
||||
}
|
||||
return lexTableNameStart
|
||||
}
|
||||
|
||||
func lexTableEnd(lx *lexer) stateFn {
|
||||
lx.emit(itemTableEnd)
|
||||
return lexTopEnd
|
||||
}
|
||||
|
||||
func lexArrayTableEnd(lx *lexer) stateFn {
|
||||
if r := lx.next(); r != arrayTableEnd {
|
||||
return lx.errorf("Expected end of table array name delimiter %q, "+
|
||||
"but got %q instead.", arrayTableEnd, r)
|
||||
}
|
||||
lx.emit(itemArrayTableEnd)
|
||||
return lexTopEnd
|
||||
}
|
||||
|
||||
func lexTableNameStart(lx *lexer) stateFn {
|
||||
switch lx.next() {
|
||||
case tableEnd, eof:
|
||||
return lx.errorf("Unexpected end of table. (Tables cannot " +
|
||||
"be empty.)")
|
||||
case tableSep:
|
||||
return lx.errorf("Unexpected table separator. (Tables cannot " +
|
||||
"be empty.)")
|
||||
}
|
||||
return lexTableName
|
||||
}
|
||||
|
||||
// lexTableName lexes the name of a table. It assumes that at least one
|
||||
// valid character for the table has already been read.
|
||||
func lexTableName(lx *lexer) stateFn {
|
||||
switch lx.peek() {
|
||||
case eof:
|
||||
return lx.errorf("Unexpected end of table name %q.", lx.current())
|
||||
case tableStart:
|
||||
return lx.errorf("Table names cannot contain %q or %q.",
|
||||
tableStart, tableEnd)
|
||||
case tableEnd:
|
||||
lx.emit(itemText)
|
||||
lx.next()
|
||||
return lx.pop()
|
||||
case tableSep:
|
||||
lx.emit(itemText)
|
||||
lx.next()
|
||||
lx.ignore()
|
||||
return lexTableNameStart
|
||||
}
|
||||
lx.next()
|
||||
return lexTableName
|
||||
}
|
||||
|
||||
// lexKeyStart consumes a key name up until the first non-whitespace character.
|
||||
// lexKeyStart will ignore whitespace.
|
||||
func lexKeyStart(lx *lexer) stateFn {
|
||||
r := lx.peek()
|
||||
switch {
|
||||
case r == keySep:
|
||||
return lx.errorf("Unexpected key separator %q.", keySep)
|
||||
case isWhitespace(r) || isNL(r):
|
||||
lx.next()
|
||||
return lexSkip(lx, lexKeyStart)
|
||||
}
|
||||
|
||||
lx.ignore()
|
||||
lx.emit(itemKeyStart)
|
||||
lx.next()
|
||||
return lexKey
|
||||
}
|
||||
|
||||
// lexKey consumes the text of a key. Assumes that the first character (which
|
||||
// is not whitespace) has already been consumed.
|
||||
func lexKey(lx *lexer) stateFn {
|
||||
r := lx.peek()
|
||||
|
||||
// Keys cannot contain a '#' character.
|
||||
if r == commentStart {
|
||||
return lx.errorf("Key cannot contain a '#' character.")
|
||||
}
|
||||
|
||||
// XXX: Possible divergence from spec?
|
||||
// "Keys start with the first non-whitespace character and end with the
|
||||
// last non-whitespace character before the equals sign."
|
||||
// Note here that whitespace is either a tab or a space.
|
||||
// But we'll call it quits if we see a new line too.
|
||||
if isNL(r) {
|
||||
lx.emitTrim(itemText)
|
||||
return lexKeyEnd
|
||||
}
|
||||
|
||||
// Let's also call it quits if we see an equals sign.
|
||||
if r == keySep {
|
||||
lx.emitTrim(itemText)
|
||||
return lexKeyEnd
|
||||
}
|
||||
|
||||
lx.next()
|
||||
return lexKey
|
||||
}
|
||||
|
||||
// lexKeyEnd consumes the end of a key (up to the key separator).
|
||||
// Assumes that any whitespace after a key has been consumed.
|
||||
func lexKeyEnd(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if r == keySep {
|
||||
return lexSkip(lx, lexValue)
|
||||
}
|
||||
return lx.errorf("Expected key separator %q, but got %q instead.",
|
||||
keySep, r)
|
||||
}
|
||||
|
||||
// lexValue starts the consumption of a value anywhere a value is expected.
|
||||
// lexValue will ignore whitespace.
|
||||
// After a value is lexed, the last state on the next is popped and returned.
|
||||
func lexValue(lx *lexer) stateFn {
|
||||
// We allow whitespace to precede a value, but NOT new lines.
|
||||
// In array syntax, the array states are responsible for ignoring new lines.
|
||||
r := lx.next()
|
||||
if isWhitespace(r) {
|
||||
return lexSkip(lx, lexValue)
|
||||
}
|
||||
|
||||
switch {
|
||||
case r == arrayStart:
|
||||
lx.ignore()
|
||||
lx.emit(itemArray)
|
||||
return lexArrayValue
|
||||
case r == stringStart:
|
||||
lx.ignore() // ignore the '"'
|
||||
return lexString
|
||||
case r == 't':
|
||||
return lexTrue
|
||||
case r == 'f':
|
||||
return lexFalse
|
||||
case r == '-':
|
||||
return lexNumberStart
|
||||
case isDigit(r):
|
||||
lx.backup() // avoid an extra state and use the same as above
|
||||
return lexNumberOrDateStart
|
||||
case r == '.': // special error case, be kind to users
|
||||
return lx.errorf("Floats must start with a digit, not '.'.")
|
||||
}
|
||||
return lx.errorf("Expected value but found %q instead.", r)
|
||||
}
|
||||
|
||||
// lexArrayValue consumes one value in an array. It assumes that '[' or ','
|
||||
// have already been consumed. All whitespace and new lines are ignored.
|
||||
func lexArrayValue(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isWhitespace(r) || isNL(r):
|
||||
return lexSkip(lx, lexArrayValue)
|
||||
case r == commentStart:
|
||||
lx.push(lexArrayValue)
|
||||
return lexCommentStart
|
||||
case r == arrayValTerm:
|
||||
return lx.errorf("Unexpected array value terminator %q.",
|
||||
arrayValTerm)
|
||||
case r == arrayEnd:
|
||||
return lexArrayEnd
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.push(lexArrayValueEnd)
|
||||
return lexValue
|
||||
}
|
||||
|
||||
// lexArrayValueEnd consumes the cruft between values of an array. Namely,
|
||||
// it ignores whitespace and expects either a ',' or a ']'.
|
||||
func lexArrayValueEnd(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isWhitespace(r) || isNL(r):
|
||||
return lexSkip(lx, lexArrayValueEnd)
|
||||
case r == commentStart:
|
||||
lx.push(lexArrayValueEnd)
|
||||
return lexCommentStart
|
||||
case r == arrayValTerm:
|
||||
lx.ignore()
|
||||
return lexArrayValue // move on to the next value
|
||||
case r == arrayEnd:
|
||||
return lexArrayEnd
|
||||
}
|
||||
return lx.errorf("Expected an array value terminator %q or an array "+
|
||||
"terminator %q, but got %q instead.", arrayValTerm, arrayEnd, r)
|
||||
}
|
||||
|
||||
// lexArrayEnd finishes the lexing of an array. It assumes that a ']' has
|
||||
// just been consumed.
|
||||
func lexArrayEnd(lx *lexer) stateFn {
|
||||
lx.ignore()
|
||||
lx.emit(itemArrayEnd)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexString consumes the inner contents of a string. It assumes that the
|
||||
// beginning '"' has already been consumed and ignored.
|
||||
func lexString(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isNL(r):
|
||||
return lx.errorf("Strings cannot contain new lines.")
|
||||
case r == '\\':
|
||||
return lexStringEscape
|
||||
case r == stringEnd:
|
||||
lx.backup()
|
||||
lx.emit(itemString)
|
||||
lx.next()
|
||||
lx.ignore()
|
||||
return lx.pop()
|
||||
}
|
||||
return lexString
|
||||
}
|
||||
|
||||
// lexStringEscape consumes an escaped character. It assumes that the preceding
|
||||
// '\\' has already been consumed.
|
||||
func lexStringEscape(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch r {
|
||||
case 'b':
|
||||
fallthrough
|
||||
case 't':
|
||||
fallthrough
|
||||
case 'n':
|
||||
fallthrough
|
||||
case 'f':
|
||||
fallthrough
|
||||
case 'r':
|
||||
fallthrough
|
||||
case '"':
|
||||
fallthrough
|
||||
case '/':
|
||||
fallthrough
|
||||
case '\\':
|
||||
return lexString
|
||||
case 'u':
|
||||
return lexStringUnicode
|
||||
}
|
||||
return lx.errorf("Invalid escape character %q. Only the following "+
|
||||
"escape characters are allowed: "+
|
||||
"\\b, \\t, \\n, \\f, \\r, \\\", \\/, \\\\, and \\uXXXX.", r)
|
||||
}
|
||||
|
||||
// lexStringBinary consumes two hexadecimal digits following '\x'. It assumes
|
||||
// that the '\x' has already been consumed.
|
||||
func lexStringUnicode(lx *lexer) stateFn {
|
||||
var r rune
|
||||
|
||||
for i := 0; i < 4; i++ {
|
||||
r = lx.next()
|
||||
if !isHexadecimal(r) {
|
||||
return lx.errorf("Expected four hexadecimal digits after '\\x', "+
|
||||
"but got '%s' instead.", lx.current())
|
||||
}
|
||||
}
|
||||
return lexString
|
||||
}
|
||||
|
||||
// lexNumberOrDateStart consumes either a (positive) integer, float or datetime.
|
||||
// It assumes that NO negative sign has been consumed.
|
||||
func lexNumberOrDateStart(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if !isDigit(r) {
|
||||
if r == '.' {
|
||||
return lx.errorf("Floats must start with a digit, not '.'.")
|
||||
} else {
|
||||
return lx.errorf("Expected a digit but got %q.", r)
|
||||
}
|
||||
}
|
||||
return lexNumberOrDate
|
||||
}
|
||||
|
||||
// lexNumberOrDate consumes either a (positive) integer, float or datetime.
|
||||
func lexNumberOrDate(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case r == '-':
|
||||
if lx.pos-lx.start != 5 {
|
||||
return lx.errorf("All ISO8601 dates must be in full Zulu form.")
|
||||
}
|
||||
return lexDateAfterYear
|
||||
case isDigit(r):
|
||||
return lexNumberOrDate
|
||||
case r == '.':
|
||||
return lexFloatStart
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.emit(itemInteger)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexDateAfterYear consumes a full Zulu Datetime in ISO8601 format.
|
||||
// It assumes that "YYYY-" has already been consumed.
|
||||
func lexDateAfterYear(lx *lexer) stateFn {
|
||||
formats := []rune{
|
||||
// digits are '0'.
|
||||
// everything else is direct equality.
|
||||
'0', '0', '-', '0', '0',
|
||||
'T',
|
||||
'0', '0', ':', '0', '0', ':', '0', '0',
|
||||
'Z',
|
||||
}
|
||||
for _, f := range formats {
|
||||
r := lx.next()
|
||||
if f == '0' {
|
||||
if !isDigit(r) {
|
||||
return lx.errorf("Expected digit in ISO8601 datetime, "+
|
||||
"but found %q instead.", r)
|
||||
}
|
||||
} else if f != r {
|
||||
return lx.errorf("Expected %q in ISO8601 datetime, "+
|
||||
"but found %q instead.", f, r)
|
||||
}
|
||||
}
|
||||
lx.emit(itemDatetime)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexNumberStart consumes either an integer or a float. It assumes that a
|
||||
// negative sign has already been read, but that *no* digits have been consumed.
|
||||
// lexNumberStart will move to the appropriate integer or float states.
|
||||
func lexNumberStart(lx *lexer) stateFn {
|
||||
// we MUST see a digit. Even floats have to start with a digit.
|
||||
r := lx.next()
|
||||
if !isDigit(r) {
|
||||
if r == '.' {
|
||||
return lx.errorf("Floats must start with a digit, not '.'.")
|
||||
} else {
|
||||
return lx.errorf("Expected a digit but got %q.", r)
|
||||
}
|
||||
}
|
||||
return lexNumber
|
||||
}
|
||||
|
||||
// lexNumber consumes an integer or a float after seeing the first digit.
|
||||
func lexNumber(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isDigit(r):
|
||||
return lexNumber
|
||||
case r == '.':
|
||||
return lexFloatStart
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.emit(itemInteger)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexFloatStart starts the consumption of digits of a float after a '.'.
|
||||
// Namely, at least one digit is required.
|
||||
func lexFloatStart(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if !isDigit(r) {
|
||||
return lx.errorf("Floats must have a digit after the '.', but got "+
|
||||
"%q instead.", r)
|
||||
}
|
||||
return lexFloat
|
||||
}
|
||||
|
||||
// lexFloat consumes the digits of a float after a '.'.
|
||||
// Assumes that one digit has been consumed after a '.' already.
|
||||
func lexFloat(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isDigit(r) {
|
||||
return lexFloat
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.emit(itemFloat)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexConst consumes the s[1:] in s. It assumes that s[0] has already been
|
||||
// consumed.
|
||||
func lexConst(lx *lexer, s string) stateFn {
|
||||
for i := range s[1:] {
|
||||
if r := lx.next(); r != rune(s[i+1]) {
|
||||
return lx.errorf("Expected %q, but found %q instead.", s[:i+1],
|
||||
s[:i]+string(r))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// lexTrue consumes the "rue" in "true". It assumes that 't' has already
|
||||
// been consumed.
|
||||
func lexTrue(lx *lexer) stateFn {
|
||||
if fn := lexConst(lx, "true"); fn != nil {
|
||||
return fn
|
||||
}
|
||||
lx.emit(itemBool)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexFalse consumes the "alse" in "false". It assumes that 'f' has already
|
||||
// been consumed.
|
||||
func lexFalse(lx *lexer) stateFn {
|
||||
if fn := lexConst(lx, "false"); fn != nil {
|
||||
return fn
|
||||
}
|
||||
lx.emit(itemBool)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexCommentStart begins the lexing of a comment. It will emit
|
||||
// itemCommentStart and consume no characters, passing control to lexComment.
|
||||
func lexCommentStart(lx *lexer) stateFn {
|
||||
lx.ignore()
|
||||
lx.emit(itemCommentStart)
|
||||
return lexComment
|
||||
}
|
||||
|
||||
// lexComment lexes an entire comment. It assumes that '#' has been consumed.
|
||||
// It will consume *up to* the first new line character, and pass control
|
||||
// back to the last state on the stack.
|
||||
func lexComment(lx *lexer) stateFn {
|
||||
r := lx.peek()
|
||||
if isNL(r) || r == eof {
|
||||
lx.emit(itemText)
|
||||
return lx.pop()
|
||||
}
|
||||
lx.next()
|
||||
return lexComment
|
||||
}
|
||||
|
||||
// lexSkip ignores all slurped input and moves on to the next state.
|
||||
func lexSkip(lx *lexer, nextState stateFn) stateFn {
|
||||
return func(lx *lexer) stateFn {
|
||||
lx.ignore()
|
||||
return nextState
|
||||
}
|
||||
}
|
||||
|
||||
// isWhitespace returns true if `r` is a whitespace character according
|
||||
// to the spec.
|
||||
func isWhitespace(r rune) bool {
|
||||
return r == '\t' || r == ' '
|
||||
}
|
||||
|
||||
func isNL(r rune) bool {
|
||||
return r == '\n' || r == '\r'
|
||||
}
|
||||
|
||||
func isDigit(r rune) bool {
|
||||
return r >= '0' && r <= '9'
|
||||
}
|
||||
|
||||
func isHexadecimal(r rune) bool {
|
||||
return (r >= '0' && r <= '9') ||
|
||||
(r >= 'a' && r <= 'f') ||
|
||||
(r >= 'A' && r <= 'F')
|
||||
}
|
||||
|
||||
func (itype itemType) String() string {
|
||||
switch itype {
|
||||
case itemError:
|
||||
return "Error"
|
||||
case itemNIL:
|
||||
return "NIL"
|
||||
case itemEOF:
|
||||
return "EOF"
|
||||
case itemText:
|
||||
return "Text"
|
||||
case itemString:
|
||||
return "String"
|
||||
case itemBool:
|
||||
return "Bool"
|
||||
case itemInteger:
|
||||
return "Integer"
|
||||
case itemFloat:
|
||||
return "Float"
|
||||
case itemDatetime:
|
||||
return "DateTime"
|
||||
case itemTableStart:
|
||||
return "TableStart"
|
||||
case itemTableEnd:
|
||||
return "TableEnd"
|
||||
case itemKeyStart:
|
||||
return "KeyStart"
|
||||
case itemArray:
|
||||
return "Array"
|
||||
case itemArrayEnd:
|
||||
return "ArrayEnd"
|
||||
case itemCommentStart:
|
||||
return "CommentStart"
|
||||
}
|
||||
panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype)))
|
||||
}
|
||||
|
||||
func (item item) String() string {
|
||||
return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
|
||||
}
|
417
vendor/github.com/BurntSushi/toml/parse.go
generated
vendored
417
vendor/github.com/BurntSushi/toml/parse.go
generated
vendored
@ -1,417 +0,0 @@
|
||||
package toml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type parser struct {
|
||||
mapping map[string]interface{}
|
||||
types map[string]tomlType
|
||||
lx *lexer
|
||||
|
||||
// A list of keys in the order that they appear in the TOML data.
|
||||
ordered []Key
|
||||
|
||||
// the full key for the current hash in scope
|
||||
context Key
|
||||
|
||||
// the base key name for everything except hashes
|
||||
currentKey string
|
||||
|
||||
// rough approximation of line number
|
||||
approxLine int
|
||||
|
||||
// A map of 'key.group.names' to whether they were created implicitly.
|
||||
implicits map[string]bool
|
||||
}
|
||||
|
||||
type parseError string
|
||||
|
||||
func (pe parseError) Error() string {
|
||||
return string(pe)
|
||||
}
|
||||
|
||||
func parse(data string) (p *parser, err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
var ok bool
|
||||
if err, ok = r.(parseError); ok {
|
||||
return
|
||||
}
|
||||
panic(r)
|
||||
}
|
||||
}()
|
||||
|
||||
p = &parser{
|
||||
mapping: make(map[string]interface{}),
|
||||
types: make(map[string]tomlType),
|
||||
lx: lex(data),
|
||||
ordered: make([]Key, 0),
|
||||
implicits: make(map[string]bool),
|
||||
}
|
||||
for {
|
||||
item := p.next()
|
||||
if item.typ == itemEOF {
|
||||
break
|
||||
}
|
||||
p.topLevel(item)
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func (p *parser) panicf(format string, v ...interface{}) {
|
||||
msg := fmt.Sprintf("Near line %d, key '%s': %s",
|
||||
p.approxLine, p.current(), fmt.Sprintf(format, v...))
|
||||
panic(parseError(msg))
|
||||
}
|
||||
|
||||
func (p *parser) next() item {
|
||||
it := p.lx.nextItem()
|
||||
if it.typ == itemError {
|
||||
p.panicf("Near line %d: %s", it.line, it.val)
|
||||
}
|
||||
return it
|
||||
}
|
||||
|
||||
func (p *parser) bug(format string, v ...interface{}) {
|
||||
log.Fatalf("BUG: %s\n\n", fmt.Sprintf(format, v...))
|
||||
}
|
||||
|
||||
func (p *parser) expect(typ itemType) item {
|
||||
it := p.next()
|
||||
p.assertEqual(typ, it.typ)
|
||||
return it
|
||||
}
|
||||
|
||||
func (p *parser) assertEqual(expected, got itemType) {
|
||||
if expected != got {
|
||||
p.bug("Expected '%s' but got '%s'.", expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *parser) topLevel(item item) {
|
||||
switch item.typ {
|
||||
case itemCommentStart:
|
||||
p.approxLine = item.line
|
||||
p.expect(itemText)
|
||||
case itemTableStart:
|
||||
kg := p.expect(itemText)
|
||||
p.approxLine = kg.line
|
||||
|
||||
key := make(Key, 0)
|
||||
for ; kg.typ == itemText; kg = p.next() {
|
||||
key = append(key, kg.val)
|
||||
}
|
||||
p.assertEqual(itemTableEnd, kg.typ)
|
||||
|
||||
p.establishContext(key, false)
|
||||
p.setType("", tomlHash)
|
||||
p.ordered = append(p.ordered, key)
|
||||
case itemArrayTableStart:
|
||||
kg := p.expect(itemText)
|
||||
p.approxLine = kg.line
|
||||
|
||||
key := make(Key, 0)
|
||||
for ; kg.typ == itemText; kg = p.next() {
|
||||
key = append(key, kg.val)
|
||||
}
|
||||
p.assertEqual(itemArrayTableEnd, kg.typ)
|
||||
|
||||
p.establishContext(key, true)
|
||||
p.setType("", tomlArrayHash)
|
||||
p.ordered = append(p.ordered, key)
|
||||
case itemKeyStart:
|
||||
kname := p.expect(itemText)
|
||||
p.currentKey = kname.val
|
||||
p.approxLine = kname.line
|
||||
|
||||
val, typ := p.value(p.next())
|
||||
p.setValue(p.currentKey, val)
|
||||
p.setType(p.currentKey, typ)
|
||||
p.ordered = append(p.ordered, p.context.add(p.currentKey))
|
||||
|
||||
p.currentKey = ""
|
||||
default:
|
||||
p.bug("Unexpected type at top level: %s", item.typ)
|
||||
}
|
||||
}
|
||||
|
||||
// value translates an expected value from the lexer into a Go value wrapped
|
||||
// as an empty interface.
|
||||
func (p *parser) value(it item) (interface{}, tomlType) {
|
||||
switch it.typ {
|
||||
case itemString:
|
||||
return p.replaceUnicode(replaceEscapes(it.val)), p.typeOfPrimitive(it)
|
||||
case itemBool:
|
||||
switch it.val {
|
||||
case "true":
|
||||
return true, p.typeOfPrimitive(it)
|
||||
case "false":
|
||||
return false, p.typeOfPrimitive(it)
|
||||
}
|
||||
p.bug("Expected boolean value, but got '%s'.", it.val)
|
||||
case itemInteger:
|
||||
num, err := strconv.ParseInt(it.val, 10, 64)
|
||||
if err != nil {
|
||||
// See comment below for floats describing why we make a
|
||||
// distinction between a bug and a user error.
|
||||
if e, ok := err.(*strconv.NumError); ok &&
|
||||
e.Err == strconv.ErrRange {
|
||||
|
||||
p.panicf("Integer '%s' is out of the range of 64-bit "+
|
||||
"signed integers.", it.val)
|
||||
} else {
|
||||
p.bug("Expected integer value, but got '%s'.", it.val)
|
||||
}
|
||||
}
|
||||
return num, p.typeOfPrimitive(it)
|
||||
case itemFloat:
|
||||
num, err := strconv.ParseFloat(it.val, 64)
|
||||
if err != nil {
|
||||
// Distinguish float values. Normally, it'd be a bug if the lexer
|
||||
// provides an invalid float, but it's possible that the float is
|
||||
// out of range of valid values (which the lexer cannot determine).
|
||||
// So mark the former as a bug but the latter as a legitimate user
|
||||
// error.
|
||||
//
|
||||
// This is also true for integers.
|
||||
if e, ok := err.(*strconv.NumError); ok &&
|
||||
e.Err == strconv.ErrRange {
|
||||
|
||||
p.panicf("Float '%s' is out of the range of 64-bit "+
|
||||
"IEEE-754 floating-point numbers.", it.val)
|
||||
} else {
|
||||
p.bug("Expected float value, but got '%s'.", it.val)
|
||||
}
|
||||
}
|
||||
return num, p.typeOfPrimitive(it)
|
||||
case itemDatetime:
|
||||
t, err := time.Parse("2006-01-02T15:04:05Z", it.val)
|
||||
if err != nil {
|
||||
p.bug("Expected Zulu formatted DateTime, but got '%s'.", it.val)
|
||||
}
|
||||
return t, p.typeOfPrimitive(it)
|
||||
case itemArray:
|
||||
array := make([]interface{}, 0)
|
||||
types := make([]tomlType, 0)
|
||||
|
||||
for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
|
||||
if it.typ == itemCommentStart {
|
||||
p.expect(itemText)
|
||||
continue
|
||||
}
|
||||
|
||||
val, typ := p.value(it)
|
||||
array = append(array, val)
|
||||
types = append(types, typ)
|
||||
}
|
||||
return array, p.typeOfArray(types)
|
||||
}
|
||||
p.bug("Unexpected value type: %s", it.typ)
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// establishContext sets the current context of the parser,
|
||||
// where the context is either a hash or an array of hashes. Which one is
|
||||
// set depends on the value of the `array` parameter.
|
||||
//
|
||||
// Establishing the context also makes sure that the key isn't a duplicate, and
|
||||
// will create implicit hashes automatically.
|
||||
func (p *parser) establishContext(key Key, array bool) {
|
||||
var ok bool
|
||||
|
||||
// Always start at the top level and drill down for our context.
|
||||
hashContext := p.mapping
|
||||
keyContext := make(Key, 0)
|
||||
|
||||
// We only need implicit hashes for key[0:-1]
|
||||
for _, k := range key[0 : len(key)-1] {
|
||||
_, ok = hashContext[k]
|
||||
keyContext = append(keyContext, k)
|
||||
|
||||
// No key? Make an implicit hash and move on.
|
||||
if !ok {
|
||||
p.addImplicit(keyContext)
|
||||
hashContext[k] = make(map[string]interface{})
|
||||
}
|
||||
|
||||
// If the hash context is actually an array of tables, then set
|
||||
// the hash context to the last element in that array.
|
||||
//
|
||||
// Otherwise, it better be a table, since this MUST be a key group (by
|
||||
// virtue of it not being the last element in a key).
|
||||
switch t := hashContext[k].(type) {
|
||||
case []map[string]interface{}:
|
||||
hashContext = t[len(t)-1]
|
||||
case map[string]interface{}:
|
||||
hashContext = t
|
||||
default:
|
||||
p.panicf("Key '%s' was already created as a hash.", keyContext)
|
||||
}
|
||||
}
|
||||
|
||||
p.context = keyContext
|
||||
if array {
|
||||
// If this is the first element for this array, then allocate a new
|
||||
// list of tables for it.
|
||||
k := key[len(key)-1]
|
||||
if _, ok := hashContext[k]; !ok {
|
||||
hashContext[k] = make([]map[string]interface{}, 0, 5)
|
||||
}
|
||||
|
||||
// Add a new table. But make sure the key hasn't already been used
|
||||
// for something else.
|
||||
if hash, ok := hashContext[k].([]map[string]interface{}); ok {
|
||||
hashContext[k] = append(hash, make(map[string]interface{}))
|
||||
} else {
|
||||
p.panicf("Key '%s' was already created and cannot be used as "+
|
||||
"an array.", keyContext)
|
||||
}
|
||||
} else {
|
||||
p.setValue(key[len(key)-1], make(map[string]interface{}))
|
||||
}
|
||||
p.context = append(p.context, key[len(key)-1])
|
||||
}
|
||||
|
||||
// setValue sets the given key to the given value in the current context.
|
||||
// It will make sure that the key hasn't already been defined, account for
|
||||
// implicit key groups.
|
||||
func (p *parser) setValue(key string, value interface{}) {
|
||||
var tmpHash interface{}
|
||||
var ok bool
|
||||
|
||||
hash := p.mapping
|
||||
keyContext := make(Key, 0)
|
||||
for _, k := range p.context {
|
||||
keyContext = append(keyContext, k)
|
||||
if tmpHash, ok = hash[k]; !ok {
|
||||
p.bug("Context for key '%s' has not been established.", keyContext)
|
||||
}
|
||||
switch t := tmpHash.(type) {
|
||||
case []map[string]interface{}:
|
||||
// The context is a table of hashes. Pick the most recent table
|
||||
// defined as the current hash.
|
||||
hash = t[len(t)-1]
|
||||
case map[string]interface{}:
|
||||
hash = t
|
||||
default:
|
||||
p.bug("Expected hash to have type 'map[string]interface{}', but "+
|
||||
"it has '%T' instead.", tmpHash)
|
||||
}
|
||||
}
|
||||
keyContext = append(keyContext, key)
|
||||
|
||||
if _, ok := hash[key]; ok {
|
||||
// Typically, if the given key has already been set, then we have
|
||||
// to raise an error since duplicate keys are disallowed. However,
|
||||
// it's possible that a key was previously defined implicitly. In this
|
||||
// case, it is allowed to be redefined concretely. (See the
|
||||
// `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.)
|
||||
//
|
||||
// But we have to make sure to stop marking it as an implicit. (So that
|
||||
// another redefinition provokes an error.)
|
||||
//
|
||||
// Note that since it has already been defined (as a hash), we don't
|
||||
// want to overwrite it. So our business is done.
|
||||
if p.isImplicit(keyContext) {
|
||||
p.removeImplicit(keyContext)
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise, we have a concrete key trying to override a previous
|
||||
// key, which is *always* wrong.
|
||||
p.panicf("Key '%s' has already been defined.", keyContext)
|
||||
}
|
||||
hash[key] = value
|
||||
}
|
||||
|
||||
// setType sets the type of a particular value at a given key.
|
||||
// It should be called immediately AFTER setValue.
|
||||
//
|
||||
// Note that if `key` is empty, then the type given will be applied to the
|
||||
// current context (which is either a table or an array of tables).
|
||||
func (p *parser) setType(key string, typ tomlType) {
|
||||
keyContext := make(Key, 0, len(p.context)+1)
|
||||
for _, k := range p.context {
|
||||
keyContext = append(keyContext, k)
|
||||
}
|
||||
if len(key) > 0 { // allow type setting for hashes
|
||||
keyContext = append(keyContext, key)
|
||||
}
|
||||
p.types[keyContext.String()] = typ
|
||||
}
|
||||
|
||||
// addImplicit sets the given Key as having been created implicitly.
|
||||
func (p *parser) addImplicit(key Key) {
|
||||
p.implicits[key.String()] = true
|
||||
}
|
||||
|
||||
// removeImplicit stops tagging the given key as having been implicitly created.
|
||||
func (p *parser) removeImplicit(key Key) {
|
||||
p.implicits[key.String()] = false
|
||||
}
|
||||
|
||||
// isImplicit returns true if the key group pointed to by the key was created
|
||||
// implicitly.
|
||||
func (p *parser) isImplicit(key Key) bool {
|
||||
return p.implicits[key.String()]
|
||||
}
|
||||
|
||||
// current returns the full key name of the current context.
|
||||
func (p *parser) current() string {
|
||||
if len(p.currentKey) == 0 {
|
||||
return p.context.String()
|
||||
}
|
||||
if len(p.context) == 0 {
|
||||
return p.currentKey
|
||||
}
|
||||
return fmt.Sprintf("%s.%s", p.context, p.currentKey)
|
||||
}
|
||||
|
||||
func replaceEscapes(s string) string {
|
||||
return strings.NewReplacer(
|
||||
"\\b", "\u0008",
|
||||
"\\t", "\u0009",
|
||||
"\\n", "\u000A",
|
||||
"\\f", "\u000C",
|
||||
"\\r", "\u000D",
|
||||
"\\\"", "\u0022",
|
||||
"\\/", "\u002F",
|
||||
"\\\\", "\u005C",
|
||||
).Replace(s)
|
||||
}
|
||||
|
||||
func (p *parser) replaceUnicode(s string) string {
|
||||
indexEsc := func() int {
|
||||
return strings.Index(s, "\\u")
|
||||
}
|
||||
for i := indexEsc(); i != -1; i = indexEsc() {
|
||||
asciiBytes := s[i+2 : i+6]
|
||||
s = strings.Replace(s, s[i:i+6], p.asciiEscapeToUnicode(asciiBytes), -1)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (p *parser) asciiEscapeToUnicode(s string) string {
|
||||
hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
|
||||
if err != nil {
|
||||
p.bug("Could not parse '%s' as a hexadecimal number, but the "+
|
||||
"lexer claims it's OK: %s", s, err)
|
||||
}
|
||||
|
||||
// BUG(burntsushi)
|
||||
// I honestly don't understand how this works. I can't seem
|
||||
// to find a way to make this fail. I figured this would fail on invalid
|
||||
// UTF-8 characters like U+DCFF, but it doesn't.
|
||||
r := string(rune(hex))
|
||||
if !utf8.ValidString(r) {
|
||||
p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s)
|
||||
}
|
||||
return string(r)
|
||||
}
|
1
vendor/github.com/BurntSushi/toml/session.vim
generated
vendored
1
vendor/github.com/BurntSushi/toml/session.vim
generated
vendored
@ -1 +0,0 @@
|
||||
au BufWritePost *.go silent!make tags > /dev/null 2>&1
|
85
vendor/github.com/BurntSushi/toml/type_check.go
generated
vendored
85
vendor/github.com/BurntSushi/toml/type_check.go
generated
vendored
@ -1,85 +0,0 @@
|
||||
package toml
|
||||
|
||||
// tomlType represents any Go type that corresponds to a TOML type.
|
||||
// While the first draft of the TOML spec has a simplistic type system that
|
||||
// probably doesn't need this level of sophistication, we seem to be militating
|
||||
// toward adding real composite types.
|
||||
type tomlType interface {
|
||||
typeString() string
|
||||
}
|
||||
|
||||
// typeEqual accepts any two types and returns true if they are equal.
|
||||
func typeEqual(t1, t2 tomlType) bool {
|
||||
if t1 == nil || t2 == nil {
|
||||
return false
|
||||
}
|
||||
return t1.typeString() == t2.typeString()
|
||||
}
|
||||
|
||||
func typeIsHash(t tomlType) bool {
|
||||
return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
|
||||
}
|
||||
|
||||
type tomlBaseType string
|
||||
|
||||
func (btype tomlBaseType) typeString() string {
|
||||
return string(btype)
|
||||
}
|
||||
|
||||
func (btype tomlBaseType) String() string {
|
||||
return btype.typeString()
|
||||
}
|
||||
|
||||
var (
|
||||
tomlInteger tomlBaseType = "Integer"
|
||||
tomlFloat tomlBaseType = "Float"
|
||||
tomlDatetime tomlBaseType = "Datetime"
|
||||
tomlString tomlBaseType = "String"
|
||||
tomlBool tomlBaseType = "Bool"
|
||||
tomlArray tomlBaseType = "Array"
|
||||
tomlHash tomlBaseType = "Hash"
|
||||
tomlArrayHash tomlBaseType = "ArrayHash"
|
||||
)
|
||||
|
||||
// typeOfPrimitive returns a tomlType of any primitive value in TOML.
|
||||
// Primitive values are: Integer, Float, Datetime, String and Bool.
|
||||
//
|
||||
// Passing a lexer item other than the following will cause a BUG message
|
||||
// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime.
|
||||
func (p *parser) typeOfPrimitive(lexItem item) tomlType {
|
||||
switch lexItem.typ {
|
||||
case itemInteger:
|
||||
return tomlInteger
|
||||
case itemFloat:
|
||||
return tomlFloat
|
||||
case itemDatetime:
|
||||
return tomlDatetime
|
||||
case itemString:
|
||||
return tomlString
|
||||
case itemBool:
|
||||
return tomlBool
|
||||
}
|
||||
p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// typeOfArray returns a tomlType for an array given a list of types of its
|
||||
// values.
|
||||
//
|
||||
// In the current spec, if an array is homogeneous, then its type is always
|
||||
// "Array". If the array is not homogeneous, an error is generated.
|
||||
func (p *parser) typeOfArray(types []tomlType) tomlType {
|
||||
// Empty arrays are cool.
|
||||
if len(types) == 0 {
|
||||
return tomlArray
|
||||
}
|
||||
|
||||
theType := types[0]
|
||||
for _, t := range types[1:] {
|
||||
if !typeEqual(theType, t) {
|
||||
p.panicf("Array contains values of type '%s' and '%s', but arrays "+
|
||||
"must be homogeneous.", theType, t)
|
||||
}
|
||||
}
|
||||
return tomlArray
|
||||
}
|
241
vendor/github.com/BurntSushi/toml/type_fields.go
generated
vendored
241
vendor/github.com/BurntSushi/toml/type_fields.go
generated
vendored
@ -1,241 +0,0 @@
|
||||
package toml
|
||||
|
||||
// Struct field handling is adapted from code in encoding/json:
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the Go distribution.
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// A field represents a single field found in a struct.
|
||||
type field struct {
|
||||
name string // the name of the field (`toml` tag included)
|
||||
tag bool // whether field has a `toml` tag
|
||||
index []int // represents the depth of an anonymous field
|
||||
typ reflect.Type // the type of the field
|
||||
}
|
||||
|
||||
// byName sorts field by name, breaking ties with depth,
|
||||
// then breaking ties with "name came from toml tag", then
|
||||
// breaking ties with index sequence.
|
||||
type byName []field
|
||||
|
||||
func (x byName) Len() int { return len(x) }
|
||||
|
||||
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
func (x byName) Less(i, j int) bool {
|
||||
if x[i].name != x[j].name {
|
||||
return x[i].name < x[j].name
|
||||
}
|
||||
if len(x[i].index) != len(x[j].index) {
|
||||
return len(x[i].index) < len(x[j].index)
|
||||
}
|
||||
if x[i].tag != x[j].tag {
|
||||
return x[i].tag
|
||||
}
|
||||
return byIndex(x).Less(i, j)
|
||||
}
|
||||
|
||||
// byIndex sorts field by index sequence.
|
||||
type byIndex []field
|
||||
|
||||
func (x byIndex) Len() int { return len(x) }
|
||||
|
||||
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
func (x byIndex) Less(i, j int) bool {
|
||||
for k, xik := range x[i].index {
|
||||
if k >= len(x[j].index) {
|
||||
return false
|
||||
}
|
||||
if xik != x[j].index[k] {
|
||||
return xik < x[j].index[k]
|
||||
}
|
||||
}
|
||||
return len(x[i].index) < len(x[j].index)
|
||||
}
|
||||
|
||||
// typeFields returns a list of fields that TOML should recognize for the given
|
||||
// type. The algorithm is breadth-first search over the set of structs to
|
||||
// include - the top struct and then any reachable anonymous structs.
|
||||
func typeFields(t reflect.Type) []field {
|
||||
// Anonymous fields to explore at the current level and the next.
|
||||
current := []field{}
|
||||
next := []field{{typ: t}}
|
||||
|
||||
// Count of queued names for current level and the next.
|
||||
count := map[reflect.Type]int{}
|
||||
nextCount := map[reflect.Type]int{}
|
||||
|
||||
// Types already visited at an earlier level.
|
||||
visited := map[reflect.Type]bool{}
|
||||
|
||||
// Fields found.
|
||||
var fields []field
|
||||
|
||||
for len(next) > 0 {
|
||||
current, next = next, current[:0]
|
||||
count, nextCount = nextCount, map[reflect.Type]int{}
|
||||
|
||||
for _, f := range current {
|
||||
if visited[f.typ] {
|
||||
continue
|
||||
}
|
||||
visited[f.typ] = true
|
||||
|
||||
// Scan f.typ for fields to include.
|
||||
for i := 0; i < f.typ.NumField(); i++ {
|
||||
sf := f.typ.Field(i)
|
||||
if sf.PkgPath != "" { // unexported
|
||||
continue
|
||||
}
|
||||
name := sf.Tag.Get("toml")
|
||||
if name == "-" {
|
||||
continue
|
||||
}
|
||||
index := make([]int, len(f.index)+1)
|
||||
copy(index, f.index)
|
||||
index[len(f.index)] = i
|
||||
|
||||
ft := sf.Type
|
||||
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
|
||||
// Follow pointer.
|
||||
ft = ft.Elem()
|
||||
}
|
||||
|
||||
// Record found field and index sequence.
|
||||
if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
|
||||
tagged := name != ""
|
||||
if name == "" {
|
||||
name = sf.Name
|
||||
}
|
||||
fields = append(fields, field{name, tagged, index, ft})
|
||||
if count[f.typ] > 1 {
|
||||
// If there were multiple instances, add a second,
|
||||
// so that the annihilation code will see a duplicate.
|
||||
// It only cares about the distinction between 1 or 2,
|
||||
// so don't bother generating any more copies.
|
||||
fields = append(fields, fields[len(fields)-1])
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Record new anonymous struct to explore in next round.
|
||||
nextCount[ft]++
|
||||
if nextCount[ft] == 1 {
|
||||
f := field{name: ft.Name(), index: index, typ: ft}
|
||||
next = append(next, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(byName(fields))
|
||||
|
||||
// Delete all fields that are hidden by the Go rules for embedded fields,
|
||||
// except that fields with TOML tags are promoted.
|
||||
|
||||
// The fields are sorted in primary order of name, secondary order
|
||||
// of field index length. Loop over names; for each name, delete
|
||||
// hidden fields by choosing the one dominant field that survives.
|
||||
out := fields[:0]
|
||||
for advance, i := 0, 0; i < len(fields); i += advance {
|
||||
// One iteration per name.
|
||||
// Find the sequence of fields with the name of this first field.
|
||||
fi := fields[i]
|
||||
name := fi.name
|
||||
for advance = 1; i+advance < len(fields); advance++ {
|
||||
fj := fields[i+advance]
|
||||
if fj.name != name {
|
||||
break
|
||||
}
|
||||
}
|
||||
if advance == 1 { // Only one field with this name
|
||||
out = append(out, fi)
|
||||
continue
|
||||
}
|
||||
dominant, ok := dominantField(fields[i : i+advance])
|
||||
if ok {
|
||||
out = append(out, dominant)
|
||||
}
|
||||
}
|
||||
|
||||
fields = out
|
||||
sort.Sort(byIndex(fields))
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
// dominantField looks through the fields, all of which are known to
|
||||
// have the same name, to find the single field that dominates the
|
||||
// others using Go's embedding rules, modified by the presence of
|
||||
// TOML tags. If there are multiple top-level fields, the boolean
|
||||
// will be false: This condition is an error in Go and we skip all
|
||||
// the fields.
|
||||
func dominantField(fields []field) (field, bool) {
|
||||
// The fields are sorted in increasing index-length order. The winner
|
||||
// must therefore be one with the shortest index length. Drop all
|
||||
// longer entries, which is easy: just truncate the slice.
|
||||
length := len(fields[0].index)
|
||||
tagged := -1 // Index of first tagged field.
|
||||
for i, f := range fields {
|
||||
if len(f.index) > length {
|
||||
fields = fields[:i]
|
||||
break
|
||||
}
|
||||
if f.tag {
|
||||
if tagged >= 0 {
|
||||
// Multiple tagged fields at the same level: conflict.
|
||||
// Return no field.
|
||||
return field{}, false
|
||||
}
|
||||
tagged = i
|
||||
}
|
||||
}
|
||||
if tagged >= 0 {
|
||||
return fields[tagged], true
|
||||
}
|
||||
// All remaining fields have the same length. If there's more than one,
|
||||
// we have a conflict (two fields named "X" at the same level) and we
|
||||
// return no field.
|
||||
if len(fields) > 1 {
|
||||
return field{}, false
|
||||
}
|
||||
return fields[0], true
|
||||
}
|
||||
|
||||
var fieldCache struct {
|
||||
sync.RWMutex
|
||||
m map[reflect.Type][]field
|
||||
}
|
||||
|
||||
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
|
||||
func cachedTypeFields(t reflect.Type) []field {
|
||||
fieldCache.RLock()
|
||||
f := fieldCache.m[t]
|
||||
fieldCache.RUnlock()
|
||||
if f != nil {
|
||||
return f
|
||||
}
|
||||
|
||||
// Compute fields without lock.
|
||||
// Might duplicate effort but won't hold other computations back.
|
||||
f = typeFields(t)
|
||||
if f == nil {
|
||||
f = []field{}
|
||||
}
|
||||
|
||||
fieldCache.Lock()
|
||||
if fieldCache.m == nil {
|
||||
fieldCache.m = map[reflect.Type][]field{}
|
||||
}
|
||||
fieldCache.m[t] = f
|
||||
fieldCache.Unlock()
|
||||
return f
|
||||
}
|
8
vendor/github.com/go-sql-driver/mysql/.gitignore
generated
vendored
8
vendor/github.com/go-sql-driver/mysql/.gitignore
generated
vendored
@ -1,8 +0,0 @@
|
||||
.DS_Store
|
||||
.DS_Store?
|
||||
._*
|
||||
.Spotlight-V100
|
||||
.Trashes
|
||||
Icon?
|
||||
ehthumbs.db
|
||||
Thumbs.db
|
10
vendor/github.com/go-sql-driver/mysql/.travis.yml
generated
vendored
10
vendor/github.com/go-sql-driver/mysql/.travis.yml
generated
vendored
@ -1,10 +0,0 @@
|
||||
sudo: false
|
||||
language: go
|
||||
go:
|
||||
- 1.2
|
||||
- 1.3
|
||||
- 1.4
|
||||
- tip
|
||||
|
||||
before_script:
|
||||
- mysql -e 'create database gotest;'
|
46
vendor/github.com/go-sql-driver/mysql/AUTHORS
generated
vendored
46
vendor/github.com/go-sql-driver/mysql/AUTHORS
generated
vendored
@ -1,46 +0,0 @@
|
||||
# This is the official list of Go-MySQL-Driver authors for copyright purposes.
|
||||
|
||||
# If you are submitting a patch, please add your name or the name of the
|
||||
# organization which holds the copyright to this list in alphabetical order.
|
||||
|
||||
# Names should be added to this file as
|
||||
# Name <email address>
|
||||
# The email address is not required for organizations.
|
||||
# Please keep the list sorted.
|
||||
|
||||
|
||||
# Individual Persons
|
||||
|
||||
Aaron Hopkins <go-sql-driver at die.net>
|
||||
Arne Hormann <arnehormann at gmail.com>
|
||||
Carlos Nieto <jose.carlos at menteslibres.net>
|
||||
Chris Moos <chris at tech9computers.com>
|
||||
DisposaBoy <disposaboy at dby.me>
|
||||
Frederick Mayle <frederickmayle at gmail.com>
|
||||
Gustavo Kristic <gkristic at gmail.com>
|
||||
Hanno Braun <mail at hannobraun.com>
|
||||
Henri Yandell <flamefew at gmail.com>
|
||||
Hirotaka Yamamoto <ymmt2005 at gmail.com>
|
||||
INADA Naoki <songofacandy at gmail.com>
|
||||
James Harr <james.harr at gmail.com>
|
||||
Jian Zhen <zhenjl at gmail.com>
|
||||
Joshua Prunier <joshua.prunier at gmail.com>
|
||||
Julien Schmidt <go-sql-driver at julienschmidt.com>
|
||||
Kamil Dziedzic <kamil at klecza.pl>
|
||||
Leonardo YongUk Kim <dalinaum at gmail.com>
|
||||
Lucas Liu <extrafliu at gmail.com>
|
||||
Luke Scott <luke at webconnex.com>
|
||||
Michael Woolnough <michael.woolnough at gmail.com>
|
||||
Nicola Peduzzi <thenikso at gmail.com>
|
||||
Runrioter Wung <runrioter at gmail.com>
|
||||
Soroush Pour <me at soroushjp.com>
|
||||
Stan Putrya <root.vagner at gmail.com>
|
||||
Xiaobing Jiang <s7v7nislands at gmail.com>
|
||||
Xiuming Chen <cc at cxm.cc>
|
||||
Julien Lefevre <julien.lefevr at gmail.com>
|
||||
|
||||
# Organizations
|
||||
|
||||
Barracuda Networks, Inc.
|
||||
Google Inc.
|
||||
Stripe Inc.
|
92
vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
generated
vendored
92
vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
generated
vendored
@ -1,92 +0,0 @@
|
||||
## HEAD
|
||||
|
||||
Changes:
|
||||
|
||||
- Go 1.1 is no longer supported
|
||||
- Use decimals field from MySQL to format time types (#249)
|
||||
- Buffer optimizations (#269)
|
||||
- TLS ServerName defaults to the host (#283)
|
||||
|
||||
Bugfixes:
|
||||
|
||||
- Enable microsecond resolution on TIME, DATETIME and TIMESTAMP (#249)
|
||||
- Fixed handling of queries without columns and rows (#255)
|
||||
- Fixed a panic when SetKeepAlive() failed (#298)
|
||||
|
||||
New Features:
|
||||
- Support for returning table alias on Columns() (#289)
|
||||
- Placeholder interpolation, can be actived with the DSN parameter `interpolateParams=true` (#309, #318)
|
||||
|
||||
|
||||
## Version 1.2 (2014-06-03)
|
||||
|
||||
Changes:
|
||||
|
||||
- We switched back to a "rolling release". `go get` installs the current master branch again
|
||||
- Version v1 of the driver will not be maintained anymore. Go 1.0 is no longer supported by this driver
|
||||
- Exported errors to allow easy checking from application code
|
||||
- Enabled TCP Keepalives on TCP connections
|
||||
- Optimized INFILE handling (better buffer size calculation, lazy init, ...)
|
||||
- The DSN parser also checks for a missing separating slash
|
||||
- Faster binary date / datetime to string formatting
|
||||
- Also exported the MySQLWarning type
|
||||
- mysqlConn.Close returns the first error encountered instead of ignoring all errors
|
||||
- writePacket() automatically writes the packet size to the header
|
||||
- readPacket() uses an iterative approach instead of the recursive approach to merge splitted packets
|
||||
|
||||
New Features:
|
||||
|
||||
- `RegisterDial` allows the usage of a custom dial function to establish the network connection
|
||||
- Setting the connection collation is possible with the `collation` DSN parameter. This parameter should be preferred over the `charset` parameter
|
||||
- Logging of critical errors is configurable with `SetLogger`
|
||||
- Google CloudSQL support
|
||||
|
||||
Bugfixes:
|
||||
|
||||
- Allow more than 32 parameters in prepared statements
|
||||
- Various old_password fixes
|
||||
- Fixed TestConcurrent test to pass Go's race detection
|
||||
- Fixed appendLengthEncodedInteger for large numbers
|
||||
- Renamed readLengthEnodedString to readLengthEncodedString and skipLengthEnodedString to skipLengthEncodedString (fixed typo)
|
||||
|
||||
|
||||
## Version 1.1 (2013-11-02)
|
||||
|
||||
Changes:
|
||||
|
||||
- Go-MySQL-Driver now requires Go 1.1
|
||||
- Connections now use the collation `utf8_general_ci` by default. Adding `&charset=UTF8` to the DSN should not be necessary anymore
|
||||
- Made closing rows and connections error tolerant. This allows for example deferring rows.Close() without checking for errors
|
||||
- `[]byte(nil)` is now treated as a NULL value. Before, it was treated like an empty string / `[]byte("")`
|
||||
- DSN parameter values must now be url.QueryEscape'ed. This allows text values to contain special characters, such as '&'.
|
||||
- Use the IO buffer also for writing. This results in zero allocations (by the driver) for most queries
|
||||
- Optimized the buffer for reading
|
||||
- stmt.Query now caches column metadata
|
||||
- New Logo
|
||||
- Changed the copyright header to include all contributors
|
||||
- Improved the LOAD INFILE documentation
|
||||
- The driver struct is now exported to make the driver directly accessible
|
||||
- Refactored the driver tests
|
||||
- Added more benchmarks and moved all to a separate file
|
||||
- Other small refactoring
|
||||
|
||||
New Features:
|
||||
|
||||
- Added *old_passwords* support: Required in some cases, but must be enabled by adding `allowOldPasswords=true` to the DSN since it is insecure
|
||||
- Added a `clientFoundRows` parameter: Return the number of matching rows instead of the number of rows changed on UPDATEs
|
||||
- Added TLS/SSL support: Use a TLS/SSL encrypted connection to the server. Custom TLS configs can be registered and used
|
||||
|
||||
Bugfixes:
|
||||
|
||||
- Fixed MySQL 4.1 support: MySQL 4.1 sends packets with lengths which differ from the specification
|
||||
- Convert to DB timezone when inserting `time.Time`
|
||||
- Splitted packets (more than 16MB) are now merged correctly
|
||||
- Fixed false positive `io.EOF` errors when the data was fully read
|
||||
- Avoid panics on reuse of closed connections
|
||||
- Fixed empty string producing false nil values
|
||||
- Fixed sign byte for positive TIME fields
|
||||
|
||||
|
||||
## Version 1.0 (2013-05-14)
|
||||
|
||||
Initial Release
|
40
vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md
generated
vendored
40
vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md
generated
vendored
@ -1,40 +0,0 @@
|
||||
# Contributing Guidelines
|
||||
|
||||
## Reporting Issues
|
||||
|
||||
Before creating a new Issue, please check first if a similar Issue [already exists](https://github.com/go-sql-driver/mysql/issues?state=open) or was [recently closed](https://github.com/go-sql-driver/mysql/issues?direction=desc&page=1&sort=updated&state=closed).
|
||||
|
||||
Please provide the following minimum information:
|
||||
* Your Go-MySQL-Driver version (or git SHA)
|
||||
* Your Go version (run `go version` in your console)
|
||||
* A detailed issue description
|
||||
* Error Log if present
|
||||
* If possible, a short example
|
||||
|
||||
|
||||
## Contributing Code
|
||||
|
||||
By contributing to this project, you share your code under the Mozilla Public License 2, as specified in the LICENSE file.
|
||||
Don't forget to add yourself to the AUTHORS file.
|
||||
|
||||
### Pull Requests Checklist
|
||||
|
||||
Please check the following points before submitting your pull request:
|
||||
- [x] Code compiles correctly
|
||||
- [x] Created tests, if possible
|
||||
- [x] All tests pass
|
||||
- [x] Extended the README / documentation, if necessary
|
||||
- [x] Added yourself to the AUTHORS file
|
||||
|
||||
### Code Review
|
||||
|
||||
Everyone is invited to review and comment on pull requests.
|
||||
If it looks fine to you, comment with "LGTM" (Looks good to me).
|
||||
|
||||
If changes are required, notice the reviewers with "PTAL" (Please take another look) after committing the fixes.
|
||||
|
||||
Before merging the Pull Request, at least one [team member](https://github.com/go-sql-driver?tab=members) must have commented with "LGTM".
|
||||
|
||||
## Development Ideas
|
||||
|
||||
If you are looking for ideas for code contributions, please check our [Development Ideas](https://github.com/go-sql-driver/mysql/wiki/Development-Ideas) Wiki page.
|
373
vendor/github.com/go-sql-driver/mysql/LICENSE
generated
vendored
373
vendor/github.com/go-sql-driver/mysql/LICENSE
generated
vendored
@ -1,373 +0,0 @@
|
||||
Mozilla Public License Version 2.0
|
||||
==================================
|
||||
|
||||
1. Definitions
|
||||
--------------
|
||||
|
||||
1.1. "Contributor"
|
||||
means each individual or legal entity that creates, contributes to
|
||||
the creation of, or owns Covered Software.
|
||||
|
||||
1.2. "Contributor Version"
|
||||
means the combination of the Contributions of others (if any) used
|
||||
by a Contributor and that particular Contributor's Contribution.
|
||||
|
||||
1.3. "Contribution"
|
||||
means Covered Software of a particular Contributor.
|
||||
|
||||
1.4. "Covered Software"
|
||||
means Source Code Form to which the initial Contributor has attached
|
||||
the notice in Exhibit A, the Executable Form of such Source Code
|
||||
Form, and Modifications of such Source Code Form, in each case
|
||||
including portions thereof.
|
||||
|
||||
1.5. "Incompatible With Secondary Licenses"
|
||||
means
|
||||
|
||||
(a) that the initial Contributor has attached the notice described
|
||||
in Exhibit B to the Covered Software; or
|
||||
|
||||
(b) that the Covered Software was made available under the terms of
|
||||
version 1.1 or earlier of the License, but not also under the
|
||||
terms of a Secondary License.
|
||||
|
||||
1.6. "Executable Form"
|
||||
means any form of the work other than Source Code Form.
|
||||
|
||||
1.7. "Larger Work"
|
||||
means a work that combines Covered Software with other material, in
|
||||
a separate file or files, that is not Covered Software.
|
||||
|
||||
1.8. "License"
|
||||
means this document.
|
||||
|
||||
1.9. "Licensable"
|
||||
means having the right to grant, to the maximum extent possible,
|
||||
whether at the time of the initial grant or subsequently, any and
|
||||
all of the rights conveyed by this License.
|
||||
|
||||
1.10. "Modifications"
|
||||
means any of the following:
|
||||
|
||||
(a) any file in Source Code Form that results from an addition to,
|
||||
deletion from, or modification of the contents of Covered
|
||||
Software; or
|
||||
|
||||
(b) any new file in Source Code Form that contains any Covered
|
||||
Software.
|
||||
|
||||
1.11. "Patent Claims" of a Contributor
|
||||
means any patent claim(s), including without limitation, method,
|
||||
process, and apparatus claims, in any patent Licensable by such
|
||||
Contributor that would be infringed, but for the grant of the
|
||||
License, by the making, using, selling, offering for sale, having
|
||||
made, import, or transfer of either its Contributions or its
|
||||
Contributor Version.
|
||||
|
||||
1.12. "Secondary License"
|
||||
means either the GNU General Public License, Version 2.0, the GNU
|
||||
Lesser General Public License, Version 2.1, the GNU Affero General
|
||||
Public License, Version 3.0, or any later versions of those
|
||||
licenses.
|
||||
|
||||
1.13. "Source Code Form"
|
||||
means the form of the work preferred for making modifications.
|
||||
|
||||
1.14. "You" (or "Your")
|
||||
means an individual or a legal entity exercising rights under this
|
||||
License. For legal entities, "You" includes any entity that
|
||||
controls, is controlled by, or is under common control with You. For
|
||||
purposes of this definition, "control" means (a) the power, direct
|
||||
or indirect, to cause the direction or management of such entity,
|
||||
whether by contract or otherwise, or (b) ownership of more than
|
||||
fifty percent (50%) of the outstanding shares or beneficial
|
||||
ownership of such entity.
|
||||
|
||||
2. License Grants and Conditions
|
||||
--------------------------------
|
||||
|
||||
2.1. Grants
|
||||
|
||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||
non-exclusive license:
|
||||
|
||||
(a) under intellectual property rights (other than patent or trademark)
|
||||
Licensable by such Contributor to use, reproduce, make available,
|
||||
modify, display, perform, distribute, and otherwise exploit its
|
||||
Contributions, either on an unmodified basis, with Modifications, or
|
||||
as part of a Larger Work; and
|
||||
|
||||
(b) under Patent Claims of such Contributor to make, use, sell, offer
|
||||
for sale, have made, import, and otherwise transfer either its
|
||||
Contributions or its Contributor Version.
|
||||
|
||||
2.2. Effective Date
|
||||
|
||||
The licenses granted in Section 2.1 with respect to any Contribution
|
||||
become effective for each Contribution on the date the Contributor first
|
||||
distributes such Contribution.
|
||||
|
||||
2.3. Limitations on Grant Scope
|
||||
|
||||
The licenses granted in this Section 2 are the only rights granted under
|
||||
this License. No additional rights or licenses will be implied from the
|
||||
distribution or licensing of Covered Software under this License.
|
||||
Notwithstanding Section 2.1(b) above, no patent license is granted by a
|
||||
Contributor:
|
||||
|
||||
(a) for any code that a Contributor has removed from Covered Software;
|
||||
or
|
||||
|
||||
(b) for infringements caused by: (i) Your and any other third party's
|
||||
modifications of Covered Software, or (ii) the combination of its
|
||||
Contributions with other software (except as part of its Contributor
|
||||
Version); or
|
||||
|
||||
(c) under Patent Claims infringed by Covered Software in the absence of
|
||||
its Contributions.
|
||||
|
||||
This License does not grant any rights in the trademarks, service marks,
|
||||
or logos of any Contributor (except as may be necessary to comply with
|
||||
the notice requirements in Section 3.4).
|
||||
|
||||
2.4. Subsequent Licenses
|
||||
|
||||
No Contributor makes additional grants as a result of Your choice to
|
||||
distribute the Covered Software under a subsequent version of this
|
||||
License (see Section 10.2) or under the terms of a Secondary License (if
|
||||
permitted under the terms of Section 3.3).
|
||||
|
||||
2.5. Representation
|
||||
|
||||
Each Contributor represents that the Contributor believes its
|
||||
Contributions are its original creation(s) or it has sufficient rights
|
||||
to grant the rights to its Contributions conveyed by this License.
|
||||
|
||||
2.6. Fair Use
|
||||
|
||||
This License is not intended to limit any rights You have under
|
||||
applicable copyright doctrines of fair use, fair dealing, or other
|
||||
equivalents.
|
||||
|
||||
2.7. Conditions
|
||||
|
||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
|
||||
in Section 2.1.
|
||||
|
||||
3. Responsibilities
|
||||
-------------------
|
||||
|
||||
3.1. Distribution of Source Form
|
||||
|
||||
All distribution of Covered Software in Source Code Form, including any
|
||||
Modifications that You create or to which You contribute, must be under
|
||||
the terms of this License. You must inform recipients that the Source
|
||||
Code Form of the Covered Software is governed by the terms of this
|
||||
License, and how they can obtain a copy of this License. You may not
|
||||
attempt to alter or restrict the recipients' rights in the Source Code
|
||||
Form.
|
||||
|
||||
3.2. Distribution of Executable Form
|
||||
|
||||
If You distribute Covered Software in Executable Form then:
|
||||
|
||||
(a) such Covered Software must also be made available in Source Code
|
||||
Form, as described in Section 3.1, and You must inform recipients of
|
||||
the Executable Form how they can obtain a copy of such Source Code
|
||||
Form by reasonable means in a timely manner, at a charge no more
|
||||
than the cost of distribution to the recipient; and
|
||||
|
||||
(b) You may distribute such Executable Form under the terms of this
|
||||
License, or sublicense it under different terms, provided that the
|
||||
license for the Executable Form does not attempt to limit or alter
|
||||
the recipients' rights in the Source Code Form under this License.
|
||||
|
||||
3.3. Distribution of a Larger Work
|
||||
|
||||
You may create and distribute a Larger Work under terms of Your choice,
|
||||
provided that You also comply with the requirements of this License for
|
||||
the Covered Software. If the Larger Work is a combination of Covered
|
||||
Software with a work governed by one or more Secondary Licenses, and the
|
||||
Covered Software is not Incompatible With Secondary Licenses, this
|
||||
License permits You to additionally distribute such Covered Software
|
||||
under the terms of such Secondary License(s), so that the recipient of
|
||||
the Larger Work may, at their option, further distribute the Covered
|
||||
Software under the terms of either this License or such Secondary
|
||||
License(s).
|
||||
|
||||
3.4. Notices
|
||||
|
||||
You may not remove or alter the substance of any license notices
|
||||
(including copyright notices, patent notices, disclaimers of warranty,
|
||||
or limitations of liability) contained within the Source Code Form of
|
||||
the Covered Software, except that You may alter any license notices to
|
||||
the extent required to remedy known factual inaccuracies.
|
||||
|
||||
3.5. Application of Additional Terms
|
||||
|
||||
You may choose to offer, and to charge a fee for, warranty, support,
|
||||
indemnity or liability obligations to one or more recipients of Covered
|
||||
Software. However, You may do so only on Your own behalf, and not on
|
||||
behalf of any Contributor. You must make it absolutely clear that any
|
||||
such warranty, support, indemnity, or liability obligation is offered by
|
||||
You alone, and You hereby agree to indemnify every Contributor for any
|
||||
liability incurred by such Contributor as a result of warranty, support,
|
||||
indemnity or liability terms You offer. You may include additional
|
||||
disclaimers of warranty and limitations of liability specific to any
|
||||
jurisdiction.
|
||||
|
||||
4. Inability to Comply Due to Statute or Regulation
|
||||
---------------------------------------------------
|
||||
|
||||
If it is impossible for You to comply with any of the terms of this
|
||||
License with respect to some or all of the Covered Software due to
|
||||
statute, judicial order, or regulation then You must: (a) comply with
|
||||
the terms of this License to the maximum extent possible; and (b)
|
||||
describe the limitations and the code they affect. Such description must
|
||||
be placed in a text file included with all distributions of the Covered
|
||||
Software under this License. Except to the extent prohibited by statute
|
||||
or regulation, such description must be sufficiently detailed for a
|
||||
recipient of ordinary skill to be able to understand it.
|
||||
|
||||
5. Termination
|
||||
--------------
|
||||
|
||||
5.1. The rights granted under this License will terminate automatically
|
||||
if You fail to comply with any of its terms. However, if You become
|
||||
compliant, then the rights granted under this License from a particular
|
||||
Contributor are reinstated (a) provisionally, unless and until such
|
||||
Contributor explicitly and finally terminates Your grants, and (b) on an
|
||||
ongoing basis, if such Contributor fails to notify You of the
|
||||
non-compliance by some reasonable means prior to 60 days after You have
|
||||
come back into compliance. Moreover, Your grants from a particular
|
||||
Contributor are reinstated on an ongoing basis if such Contributor
|
||||
notifies You of the non-compliance by some reasonable means, this is the
|
||||
first time You have received notice of non-compliance with this License
|
||||
from such Contributor, and You become compliant prior to 30 days after
|
||||
Your receipt of the notice.
|
||||
|
||||
5.2. If You initiate litigation against any entity by asserting a patent
|
||||
infringement claim (excluding declaratory judgment actions,
|
||||
counter-claims, and cross-claims) alleging that a Contributor Version
|
||||
directly or indirectly infringes any patent, then the rights granted to
|
||||
You by any and all Contributors for the Covered Software under Section
|
||||
2.1 of this License shall terminate.
|
||||
|
||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all
|
||||
end user license agreements (excluding distributors and resellers) which
|
||||
have been validly granted by You or Your distributors under this License
|
||||
prior to termination shall survive termination.
|
||||
|
||||
************************************************************************
|
||||
* *
|
||||
* 6. Disclaimer of Warranty *
|
||||
* ------------------------- *
|
||||
* *
|
||||
* Covered Software is provided under this License on an "as is" *
|
||||
* basis, without warranty of any kind, either expressed, implied, or *
|
||||
* statutory, including, without limitation, warranties that the *
|
||||
* Covered Software is free of defects, merchantable, fit for a *
|
||||
* particular purpose or non-infringing. The entire risk as to the *
|
||||
* quality and performance of the Covered Software is with You. *
|
||||
* Should any Covered Software prove defective in any respect, You *
|
||||
* (not any Contributor) assume the cost of any necessary servicing, *
|
||||
* repair, or correction. This disclaimer of warranty constitutes an *
|
||||
* essential part of this License. No use of any Covered Software is *
|
||||
* authorized under this License except under this disclaimer. *
|
||||
* *
|
||||
************************************************************************
|
||||
|
||||
************************************************************************
|
||||
* *
|
||||
* 7. Limitation of Liability *
|
||||
* -------------------------- *
|
||||
* *
|
||||
* Under no circumstances and under no legal theory, whether tort *
|
||||
* (including negligence), contract, or otherwise, shall any *
|
||||
* Contributor, or anyone who distributes Covered Software as *
|
||||
* permitted above, be liable to You for any direct, indirect, *
|
||||
* special, incidental, or consequential damages of any character *
|
||||
* including, without limitation, damages for lost profits, loss of *
|
||||
* goodwill, work stoppage, computer failure or malfunction, or any *
|
||||
* and all other commercial damages or losses, even if such party *
|
||||
* shall have been informed of the possibility of such damages. This *
|
||||
* limitation of liability shall not apply to liability for death or *
|
||||
* personal injury resulting from such party's negligence to the *
|
||||
* extent applicable law prohibits such limitation. Some *
|
||||
* jurisdictions do not allow the exclusion or limitation of *
|
||||
* incidental or consequential damages, so this exclusion and *
|
||||
* limitation may not apply to You. *
|
||||
* *
|
||||
************************************************************************
|
||||
|
||||
8. Litigation
|
||||
-------------
|
||||
|
||||
Any litigation relating to this License may be brought only in the
|
||||
courts of a jurisdiction where the defendant maintains its principal
|
||||
place of business and such litigation shall be governed by laws of that
|
||||
jurisdiction, without reference to its conflict-of-law provisions.
|
||||
Nothing in this Section shall prevent a party's ability to bring
|
||||
cross-claims or counter-claims.
|
||||
|
||||
9. Miscellaneous
|
||||
----------------
|
||||
|
||||
This License represents the complete agreement concerning the subject
|
||||
matter hereof. If any provision of this License is held to be
|
||||
unenforceable, such provision shall be reformed only to the extent
|
||||
necessary to make it enforceable. Any law or regulation which provides
|
||||
that the language of a contract shall be construed against the drafter
|
||||
shall not be used to construe this License against a Contributor.
|
||||
|
||||
10. Versions of the License
|
||||
---------------------------
|
||||
|
||||
10.1. New Versions
|
||||
|
||||
Mozilla Foundation is the license steward. Except as provided in Section
|
||||
10.3, no one other than the license steward has the right to modify or
|
||||
publish new versions of this License. Each version will be given a
|
||||
distinguishing version number.
|
||||
|
||||
10.2. Effect of New Versions
|
||||
|
||||
You may distribute the Covered Software under the terms of the version
|
||||
of the License under which You originally received the Covered Software,
|
||||
or under the terms of any subsequent version published by the license
|
||||
steward.
|
||||
|
||||
10.3. Modified Versions
|
||||
|
||||
If you create software not governed by this License, and you want to
|
||||
create a new license for such software, you may create and use a
|
||||
modified version of this License if you rename the license and remove
|
||||
any references to the name of the license steward (except to note that
|
||||
such modified license differs from this License).
|
||||
|
||||
10.4. Distributing Source Code Form that is Incompatible With Secondary
|
||||
Licenses
|
||||
|
||||
If You choose to distribute Source Code Form that is Incompatible With
|
||||
Secondary Licenses under the terms of this version of the License, the
|
||||
notice described in Exhibit B of this License must be attached.
|
||||
|
||||
Exhibit A - Source Code Form License Notice
|
||||
-------------------------------------------
|
||||
|
||||
This Source Code Form is subject to the terms of the Mozilla Public
|
||||
License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
If it is not possible or desirable to put the notice in a particular
|
||||
file, then You may include the notice in a location (such as a LICENSE
|
||||
file in a relevant directory) where a recipient would be likely to look
|
||||
for such a notice.
|
||||
|
||||
You may add additional accurate notices of copyright ownership.
|
||||
|
||||
Exhibit B - "Incompatible With Secondary Licenses" Notice
|
||||
---------------------------------------------------------
|
||||
|
||||
This Source Code Form is "Incompatible With Secondary Licenses", as
|
||||
defined by the Mozilla Public License, v. 2.0.
|
386
vendor/github.com/go-sql-driver/mysql/README.md
generated
vendored
386
vendor/github.com/go-sql-driver/mysql/README.md
generated
vendored
@ -1,386 +0,0 @@
|
||||
# Go-MySQL-Driver
|
||||
|
||||
A MySQL-Driver for Go's [database/sql](http://golang.org/pkg/database/sql) package
|
||||
|
||||
![Go-MySQL-Driver logo](https://raw.github.com/wiki/go-sql-driver/mysql/gomysql_m.png "Golang Gopher holding the MySQL Dolphin")
|
||||
|
||||
**Latest stable Release:** [Version 1.2 (June 03, 2014)](https://github.com/go-sql-driver/mysql/releases)
|
||||
|
||||
[![Build Status](https://travis-ci.org/go-sql-driver/mysql.png?branch=master)](https://travis-ci.org/go-sql-driver/mysql)
|
||||
|
||||
---------------------------------------
|
||||
* [Features](#features)
|
||||
* [Requirements](#requirements)
|
||||
* [Installation](#installation)
|
||||
* [Usage](#usage)
|
||||
* [DSN (Data Source Name)](#dsn-data-source-name)
|
||||
* [Password](#password)
|
||||
* [Protocol](#protocol)
|
||||
* [Address](#address)
|
||||
* [Parameters](#parameters)
|
||||
* [Examples](#examples)
|
||||
* [LOAD DATA LOCAL INFILE support](#load-data-local-infile-support)
|
||||
* [time.Time support](#timetime-support)
|
||||
* [Unicode support](#unicode-support)
|
||||
* [Testing / Development](#testing--development)
|
||||
* [License](#license)
|
||||
|
||||
---------------------------------------
|
||||
|
||||
## Features
|
||||
* Lightweight and [fast](https://github.com/go-sql-driver/sql-benchmark "golang MySQL-Driver performance")
|
||||
* Native Go implementation. No C-bindings, just pure Go
|
||||
* Connections over TCP/IPv4, TCP/IPv6, Unix domain sockets or [custom protocols](http://godoc.org/github.com/go-sql-driver/mysql#DialFunc)
|
||||
* Automatic handling of broken connections
|
||||
* Automatic Connection Pooling *(by database/sql package)*
|
||||
* Supports queries larger than 16MB
|
||||
* Full [`sql.RawBytes`](http://golang.org/pkg/database/sql/#RawBytes) support.
|
||||
* Intelligent `LONG DATA` handling in prepared statements
|
||||
* Secure `LOAD DATA LOCAL INFILE` support with file Whitelisting and `io.Reader` support
|
||||
* Optional `time.Time` parsing
|
||||
* Optional placeholder interpolation
|
||||
|
||||
## Requirements
|
||||
* Go 1.2 or higher
|
||||
* MySQL (4.1+), MariaDB, Percona Server, Google CloudSQL or Sphinx (2.2.3+)
|
||||
|
||||
---------------------------------------
|
||||
|
||||
## Installation
|
||||
Simple install the package to your [$GOPATH](http://code.google.com/p/go-wiki/wiki/GOPATH "GOPATH") with the [go tool](http://golang.org/cmd/go/ "go command") from shell:
|
||||
```bash
|
||||
$ go get github.com/go-sql-driver/mysql
|
||||
```
|
||||
Make sure [Git is installed](http://git-scm.com/downloads) on your machine and in your system's `PATH`.
|
||||
|
||||
## Usage
|
||||
_Go MySQL Driver_ is an implementation of Go's `database/sql/driver` interface. You only need to import the driver and can use the full [`database/sql`](http://golang.org/pkg/database/sql) API then.
|
||||
|
||||
Use `mysql` as `driverName` and a valid [DSN](#dsn-data-source-name) as `dataSourceName`:
|
||||
```go
|
||||
import "database/sql"
|
||||
import _ "github.com/go-sql-driver/mysql"
|
||||
|
||||
db, err := sql.Open("mysql", "user:password@/dbname")
|
||||
```
|
||||
|
||||
[Examples are available in our Wiki](https://github.com/go-sql-driver/mysql/wiki/Examples "Go-MySQL-Driver Examples").
|
||||
|
||||
|
||||
### DSN (Data Source Name)
|
||||
|
||||
The Data Source Name has a common format, like e.g. [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php) uses it, but without type-prefix (optional parts marked by squared brackets):
|
||||
```
|
||||
[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN]
|
||||
```
|
||||
|
||||
A DSN in its fullest form:
|
||||
```
|
||||
username:password@protocol(address)/dbname?param=value
|
||||
```
|
||||
|
||||
Except for the databasename, all values are optional. So the minimal DSN is:
|
||||
```
|
||||
/dbname
|
||||
```
|
||||
|
||||
If you do not want to preselect a database, leave `dbname` empty:
|
||||
```
|
||||
/
|
||||
```
|
||||
This has the same effect as an empty DSN string:
|
||||
```
|
||||
|
||||
```
|
||||
|
||||
#### Password
|
||||
Passwords can consist of any character. Escaping is **not** necessary.
|
||||
|
||||
#### Protocol
|
||||
See [net.Dial](http://golang.org/pkg/net/#Dial) for more information which networks are available.
|
||||
In general you should use an Unix domain socket if available and TCP otherwise for best performance.
|
||||
|
||||
#### Address
|
||||
For TCP and UDP networks, addresses have the form `host:port`.
|
||||
If `host` is a literal IPv6 address, it must be enclosed in square brackets.
|
||||
The functions [net.JoinHostPort](http://golang.org/pkg/net/#JoinHostPort) and [net.SplitHostPort](http://golang.org/pkg/net/#SplitHostPort) manipulate addresses in this form.
|
||||
|
||||
For Unix domain sockets the address is the absolute path to the MySQL-Server-socket, e.g. `/var/run/mysqld/mysqld.sock` or `/tmp/mysql.sock`.
|
||||
|
||||
#### Parameters
|
||||
*Parameters are case-sensitive!*
|
||||
|
||||
Notice that any of `true`, `TRUE`, `True` or `1` is accepted to stand for a true boolean value. Not surprisingly, false can be specified as any of: `false`, `FALSE`, `False` or `0`.
|
||||
|
||||
##### `allowAllFiles`
|
||||
|
||||
```
|
||||
Type: bool
|
||||
Valid Values: true, false
|
||||
Default: false
|
||||
```
|
||||
|
||||
`allowAllFiles=true` disables the file Whitelist for `LOAD DATA LOCAL INFILE` and allows *all* files.
|
||||
[*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)
|
||||
|
||||
##### `allowCleartextPasswords`
|
||||
|
||||
```
|
||||
Type: bool
|
||||
Valid Values: true, false
|
||||
Default: false
|
||||
```
|
||||
|
||||
`allowCleartextPasswords=true` allows using the [cleartext client side plugin](http://dev.mysql.com/doc/en/cleartext-authentication-plugin.html) if required by an account, such as one defined with the [PAM authentication plugin](http://dev.mysql.com/doc/en/pam-authentication-plugin.html). Sending passwords in clear text may be a security problem in some configurations. To avoid problems if there is any possibility that the password would be intercepted, clients should connect to MySQL Server using a method that protects the password. Possibilities include [TLS / SSL](#tls), IPsec, or a private network.
|
||||
|
||||
##### `allowOldPasswords`
|
||||
|
||||
```
|
||||
Type: bool
|
||||
Valid Values: true, false
|
||||
Default: false
|
||||
```
|
||||
`allowOldPasswords=true` allows the usage of the insecure old password method. This should be avoided, but is necessary in some cases. See also [the old_passwords wiki page](https://github.com/go-sql-driver/mysql/wiki/old_passwords).
|
||||
|
||||
##### `charset`
|
||||
|
||||
```
|
||||
Type: string
|
||||
Valid Values: <name>
|
||||
Default: none
|
||||
```
|
||||
|
||||
Sets the charset used for client-server interaction (`"SET NAMES <value>"`). If multiple charsets are set (separated by a comma), the following charset is used if setting the charset failes. This enables for example support for `utf8mb4` ([introduced in MySQL 5.5.3](http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html)) with fallback to `utf8` for older servers (`charset=utf8mb4,utf8`).
|
||||
|
||||
Usage of the `charset` parameter is discouraged because it issues additional queries to the server.
|
||||
Unless you need the fallback behavior, please use `collation` instead.
|
||||
|
||||
##### `collation`
|
||||
|
||||
```
|
||||
Type: string
|
||||
Valid Values: <name>
|
||||
Default: utf8_general_ci
|
||||
```
|
||||
|
||||
Sets the collation used for client-server interaction on connection. In contrast to `charset`, `collation` does not issue additional queries. If the specified collation is unavailable on the target server, the connection will fail.
|
||||
|
||||
A list of valid charsets for a server is retrievable with `SHOW COLLATION`.
|
||||
|
||||
##### `clientFoundRows`
|
||||
|
||||
```
|
||||
Type: bool
|
||||
Valid Values: true, false
|
||||
Default: false
|
||||
```
|
||||
|
||||
`clientFoundRows=true` causes an UPDATE to return the number of matching rows instead of the number of rows changed.
|
||||
|
||||
##### `columnsWithAlias`
|
||||
|
||||
```
|
||||
Type: bool
|
||||
Valid Values: true, false
|
||||
Default: false
|
||||
```
|
||||
|
||||
When `columnsWithAlias` is true, calls to `sql.Rows.Columns()` will return the table alias and the column name separated by a dot. For example:
|
||||
|
||||
```
|
||||
SELECT u.id FROM users as u
|
||||
```
|
||||
|
||||
will return `u.id` instead of just `id` if `columnsWithAlias=true`.
|
||||
|
||||
##### `interpolateParams`
|
||||
|
||||
```
|
||||
Type: bool
|
||||
Valid Values: true, false
|
||||
Default: false
|
||||
```
|
||||
|
||||
If `interpolateParams` is true, placeholders (`?`) in calls to `db.Query()` and `db.Exec()` are interpolated into a single query string with given parameters. This reduces the number of roundtrips, since the driver has to prepare a statement, execute it with given parameters and close the statement again with `interpolateParams=false`.
|
||||
|
||||
*This can not be used together with the multibyte encodings BIG5, CP932, GB2312, GBK or SJIS. These are blacklisted as they may [introduce a SQL injection vulnerability](http://stackoverflow.com/a/12118602/3430118)!*
|
||||
|
||||
##### `loc`
|
||||
|
||||
```
|
||||
Type: string
|
||||
Valid Values: <escaped name>
|
||||
Default: UTC
|
||||
```
|
||||
|
||||
Sets the location for time.Time values (when using `parseTime=true`). *"Local"* sets the system's location. See [time.LoadLocation](http://golang.org/pkg/time/#LoadLocation) for details.
|
||||
|
||||
Note that this sets the location for time.Time values but does not change MySQL's [time_zone setting](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html). For that see the [time_zone system variable](#system-variables), which can also be set as a DSN parameter.
|
||||
|
||||
Please keep in mind, that param values must be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`.
|
||||
|
||||
|
||||
##### `parseTime`
|
||||
|
||||
```
|
||||
Type: bool
|
||||
Valid Values: true, false
|
||||
Default: false
|
||||
```
|
||||
|
||||
`parseTime=true` changes the output type of `DATE` and `DATETIME` values to `time.Time` instead of `[]byte` / `string`
|
||||
|
||||
|
||||
##### `strict`
|
||||
|
||||
```
|
||||
Type: bool
|
||||
Valid Values: true, false
|
||||
Default: false
|
||||
```
|
||||
|
||||
`strict=true` enables the strict mode in which MySQL warnings are treated as errors.
|
||||
|
||||
By default MySQL also treats notes as warnings. Use [`sql_notes=false`](http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_sql_notes) to ignore notes. See the [examples](#examples) for an DSN example.
|
||||
|
||||
|
||||
##### `timeout`
|
||||
|
||||
```
|
||||
Type: decimal number
|
||||
Default: OS default
|
||||
```
|
||||
|
||||
*Driver* side connection timeout. The value must be a string of decimal numbers, each with optional fraction and a unit suffix ( *"ms"*, *"s"*, *"m"*, *"h"* ), such as *"30s"*, *"0.5m"* or *"1m30s"*. To set a server side timeout, use the parameter [`wait_timeout`](http://dev.mysql.com/doc/refman/5.6/en/server-system-variables.html#sysvar_wait_timeout).
|
||||
|
||||
|
||||
##### `tls`
|
||||
|
||||
```
|
||||
Type: bool / string
|
||||
Valid Values: true, false, skip-verify, <name>
|
||||
Default: false
|
||||
```
|
||||
|
||||
`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side). Use a custom value registered with [`mysql.RegisterTLSConfig`](http://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig).
|
||||
|
||||
|
||||
##### System Variables
|
||||
|
||||
All other parameters are interpreted as system variables:
|
||||
* `autocommit`: `"SET autocommit=<value>"`
|
||||
* [`time_zone`](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html): `"SET time_zone=<value>"`
|
||||
* [`tx_isolation`](https://dev.mysql.com/doc/refman/5.5/en/server-system-variables.html#sysvar_tx_isolation): `"SET tx_isolation=<value>"`
|
||||
* `param`: `"SET <param>=<value>"`
|
||||
|
||||
*The values must be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed!*
|
||||
|
||||
#### Examples
|
||||
```
|
||||
user@unix(/path/to/socket)/dbname
|
||||
```
|
||||
|
||||
```
|
||||
root:pw@unix(/tmp/mysql.sock)/myDatabase?loc=Local
|
||||
```
|
||||
|
||||
```
|
||||
user:password@tcp(localhost:5555)/dbname?tls=skip-verify&autocommit=true
|
||||
```
|
||||
|
||||
Use the [strict mode](#strict) but ignore notes:
|
||||
```
|
||||
user:password@/dbname?strict=true&sql_notes=false
|
||||
```
|
||||
|
||||
TCP via IPv6:
|
||||
```
|
||||
user:password@tcp([de:ad:be:ef::ca:fe]:80)/dbname?timeout=90s&collation=utf8mb4_unicode_ci
|
||||
```
|
||||
|
||||
TCP on a remote host, e.g. Amazon RDS:
|
||||
```
|
||||
id:password@tcp(your-amazonaws-uri.com:3306)/dbname
|
||||
```
|
||||
|
||||
Google Cloud SQL on App Engine:
|
||||
```
|
||||
user@cloudsql(project-id:instance-name)/dbname
|
||||
```
|
||||
|
||||
TCP using default port (3306) on localhost:
|
||||
```
|
||||
user:password@tcp/dbname?charset=utf8mb4,utf8&sys_var=esc%40ped
|
||||
```
|
||||
|
||||
Use the default protocol (tcp) and host (localhost:3306):
|
||||
```
|
||||
user:password@/dbname
|
||||
```
|
||||
|
||||
No Database preselected:
|
||||
```
|
||||
user:password@/
|
||||
```
|
||||
|
||||
### `LOAD DATA LOCAL INFILE` support
|
||||
For this feature you need direct access to the package. Therefore you must change the import path (no `_`):
|
||||
```go
|
||||
import "github.com/go-sql-driver/mysql"
|
||||
```
|
||||
|
||||
Files must be whitelisted by registering them with `mysql.RegisterLocalFile(filepath)` (recommended) or the Whitelist check must be deactivated by using the DSN parameter `allowAllFiles=true` ([*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)).
|
||||
|
||||
To use a `io.Reader` a handler function must be registered with `mysql.RegisterReaderHandler(name, handler)` which returns a `io.Reader` or `io.ReadCloser`. The Reader is available with the filepath `Reader::<name>` then. Choose different names for different handlers and `DeregisterReaderHandler` when you don't need it anymore.
|
||||
|
||||
See the [godoc of Go-MySQL-Driver](http://godoc.org/github.com/go-sql-driver/mysql "golang mysql driver documentation") for details.
|
||||
|
||||
|
||||
### `time.Time` support
|
||||
The default internal output type of MySQL `DATE` and `DATETIME` values is `[]byte` which allows you to scan the value into a `[]byte`, `string` or `sql.RawBytes` variable in your programm.
|
||||
|
||||
However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` variables, which is the logical opposite in Go to `DATE` and `DATETIME` in MySQL. You can do that by changing the internal output type from `[]byte` to `time.Time` with the DSN parameter `parseTime=true`. You can set the default [`time.Time` location](http://golang.org/pkg/time/#Location) with the `loc` DSN parameter.
|
||||
|
||||
**Caution:** As of Go 1.1, this makes `time.Time` the only variable type you can scan `DATE` and `DATETIME` values into. This breaks for example [`sql.RawBytes` support](https://github.com/go-sql-driver/mysql/wiki/Examples#rawbytes).
|
||||
|
||||
Alternatively you can use the [`NullTime`](http://godoc.org/github.com/go-sql-driver/mysql#NullTime) type as the scan destination, which works with both `time.Time` and `string` / `[]byte`.
|
||||
|
||||
|
||||
### Unicode support
|
||||
Since version 1.1 Go-MySQL-Driver automatically uses the collation `utf8_general_ci` by default.
|
||||
|
||||
Other collations / charsets can be set using the [`collation`](#collation) DSN parameter.
|
||||
|
||||
Version 1.0 of the driver recommended adding `&charset=utf8` (alias for `SET NAMES utf8`) to the DSN to enable proper UTF-8 support. This is not necessary anymore. The [`collation`](#collation) parameter should be preferred to set another collation / charset than the default.
|
||||
|
||||
See http://dev.mysql.com/doc/refman/5.7/en/charset-unicode.html for more details on MySQL's Unicode support.
|
||||
|
||||
|
||||
## Testing / Development
|
||||
To run the driver tests you may need to adjust the configuration. See the [Testing Wiki-Page](https://github.com/go-sql-driver/mysql/wiki/Testing "Testing") for details.
|
||||
|
||||
Go-MySQL-Driver is not feature-complete yet. Your help is very appreciated.
|
||||
If you want to contribute, you can work on an [open issue](https://github.com/go-sql-driver/mysql/issues?state=open) or review a [pull request](https://github.com/go-sql-driver/mysql/pulls).
|
||||
|
||||
See the [Contribution Guidelines](https://github.com/go-sql-driver/mysql/blob/master/CONTRIBUTING.md) for details.
|
||||
|
||||
---------------------------------------
|
||||
|
||||
## License
|
||||
Go-MySQL-Driver is licensed under the [Mozilla Public License Version 2.0](https://raw.github.com/go-sql-driver/mysql/master/LICENSE)
|
||||
|
||||
Mozilla summarizes the license scope as follows:
|
||||
> MPL: The copyleft applies to any files containing MPLed code.
|
||||
|
||||
|
||||
That means:
|
||||
* You can **use** the **unchanged** source code both in private and commercially
|
||||
* When distributing, you **must publish** the source code of any **changed files** licensed under the MPL 2.0 under a) the MPL 2.0 itself or b) a compatible license (e.g. GPL 3.0 or Apache License 2.0)
|
||||
* You **needn't publish** the source code of your library as long as the files licensed under the MPL 2.0 are **unchanged**
|
||||
|
||||
Please read the [MPL 2.0 FAQ](http://www.mozilla.org/MPL/2.0/FAQ.html) if you have further questions regarding the license.
|
||||
|
||||
You can read the full terms here: [LICENSE](https://raw.github.com/go-sql-driver/mysql/master/LICENSE)
|
||||
|
||||
![Go Gopher and MySQL Dolphin](https://raw.github.com/wiki/go-sql-driver/mysql/go-mysql-driver_m.jpg "Golang Gopher transporting the MySQL Dolphin in a wheelbarrow")
|
||||
|
19
vendor/github.com/go-sql-driver/mysql/appengine.go
generated
vendored
19
vendor/github.com/go-sql-driver/mysql/appengine.go
generated
vendored
@ -1,19 +0,0 @@
|
||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
// +build appengine
|
||||
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"appengine/cloudsql"
|
||||
)
|
||||
|
||||
func init() {
|
||||
RegisterDial("cloudsql", cloudsql.Dial)
|
||||
}
|
136
vendor/github.com/go-sql-driver/mysql/buffer.go
generated
vendored
136
vendor/github.com/go-sql-driver/mysql/buffer.go
generated
vendored
@ -1,136 +0,0 @@
|
||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package mysql
|
||||
|
||||
import "io"
|
||||
|
||||
const defaultBufSize = 4096
|
||||
|
||||
// A buffer which is used for both reading and writing.
|
||||
// This is possible since communication on each connection is synchronous.
|
||||
// In other words, we can't write and read simultaneously on the same connection.
|
||||
// The buffer is similar to bufio.Reader / Writer but zero-copy-ish
|
||||
// Also highly optimized for this particular use case.
|
||||
type buffer struct {
|
||||
buf []byte
|
||||
rd io.Reader
|
||||
idx int
|
||||
length int
|
||||
}
|
||||
|
||||
func newBuffer(rd io.Reader) buffer {
|
||||
var b [defaultBufSize]byte
|
||||
return buffer{
|
||||
buf: b[:],
|
||||
rd: rd,
|
||||
}
|
||||
}
|
||||
|
||||
// fill reads into the buffer until at least _need_ bytes are in it
|
||||
func (b *buffer) fill(need int) error {
|
||||
n := b.length
|
||||
|
||||
// move existing data to the beginning
|
||||
if n > 0 && b.idx > 0 {
|
||||
copy(b.buf[0:n], b.buf[b.idx:])
|
||||
}
|
||||
|
||||
// grow buffer if necessary
|
||||
// TODO: let the buffer shrink again at some point
|
||||
// Maybe keep the org buf slice and swap back?
|
||||
if need > len(b.buf) {
|
||||
// Round up to the next multiple of the default size
|
||||
newBuf := make([]byte, ((need/defaultBufSize)+1)*defaultBufSize)
|
||||
copy(newBuf, b.buf)
|
||||
b.buf = newBuf
|
||||
}
|
||||
|
||||
b.idx = 0
|
||||
|
||||
for {
|
||||
nn, err := b.rd.Read(b.buf[n:])
|
||||
n += nn
|
||||
|
||||
switch err {
|
||||
case nil:
|
||||
if n < need {
|
||||
continue
|
||||
}
|
||||
b.length = n
|
||||
return nil
|
||||
|
||||
case io.EOF:
|
||||
if n >= need {
|
||||
b.length = n
|
||||
return nil
|
||||
}
|
||||
return io.ErrUnexpectedEOF
|
||||
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// returns next N bytes from buffer.
|
||||
// The returned slice is only guaranteed to be valid until the next read
|
||||
func (b *buffer) readNext(need int) ([]byte, error) {
|
||||
if b.length < need {
|
||||
// refill
|
||||
if err := b.fill(need); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
offset := b.idx
|
||||
b.idx += need
|
||||
b.length -= need
|
||||
return b.buf[offset:b.idx], nil
|
||||
}
|
||||
|
||||
// returns a buffer with the requested size.
|
||||
// If possible, a slice from the existing buffer is returned.
|
||||
// Otherwise a bigger buffer is made.
|
||||
// Only one buffer (total) can be used at a time.
|
||||
func (b *buffer) takeBuffer(length int) []byte {
|
||||
if b.length > 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// test (cheap) general case first
|
||||
if length <= defaultBufSize || length <= cap(b.buf) {
|
||||
return b.buf[:length]
|
||||
}
|
||||
|
||||
if length < maxPacketSize {
|
||||
b.buf = make([]byte, length)
|
||||
return b.buf
|
||||
}
|
||||
return make([]byte, length)
|
||||
}
|
||||
|
||||
// shortcut which can be used if the requested buffer is guaranteed to be
|
||||
// smaller than defaultBufSize
|
||||
// Only one buffer (total) can be used at a time.
|
||||
func (b *buffer) takeSmallBuffer(length int) []byte {
|
||||
if b.length == 0 {
|
||||
return b.buf[:length]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// takeCompleteBuffer returns the complete existing buffer.
|
||||
// This can be used if the necessary buffer size is unknown.
|
||||
// Only one buffer (total) can be used at a time.
|
||||
func (b *buffer) takeCompleteBuffer() []byte {
|
||||
if b.length == 0 {
|
||||
return b.buf
|
||||
}
|
||||
return nil
|
||||
}
|
250
vendor/github.com/go-sql-driver/mysql/collations.go
generated
vendored
250
vendor/github.com/go-sql-driver/mysql/collations.go
generated
vendored
@ -1,250 +0,0 @@
|
||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// Copyright 2014 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package mysql
|
||||
|
||||
const defaultCollation byte = 33 // utf8_general_ci
|
||||
|
||||
// A list of available collations mapped to the internal ID.
|
||||
// To update this map use the following MySQL query:
|
||||
// SELECT COLLATION_NAME, ID FROM information_schema.COLLATIONS
|
||||
var collations = map[string]byte{
|
||||
"big5_chinese_ci": 1,
|
||||
"latin2_czech_cs": 2,
|
||||
"dec8_swedish_ci": 3,
|
||||
"cp850_general_ci": 4,
|
||||
"latin1_german1_ci": 5,
|
||||
"hp8_english_ci": 6,
|
||||
"koi8r_general_ci": 7,
|
||||
"latin1_swedish_ci": 8,
|
||||
"latin2_general_ci": 9,
|
||||
"swe7_swedish_ci": 10,
|
||||
"ascii_general_ci": 11,
|
||||
"ujis_japanese_ci": 12,
|
||||
"sjis_japanese_ci": 13,
|
||||
"cp1251_bulgarian_ci": 14,
|
||||
"latin1_danish_ci": 15,
|
||||
"hebrew_general_ci": 16,
|
||||
"tis620_thai_ci": 18,
|
||||
"euckr_korean_ci": 19,
|
||||
"latin7_estonian_cs": 20,
|
||||
"latin2_hungarian_ci": 21,
|
||||
"koi8u_general_ci": 22,
|
||||
"cp1251_ukrainian_ci": 23,
|
||||
"gb2312_chinese_ci": 24,
|
||||
"greek_general_ci": 25,
|
||||
"cp1250_general_ci": 26,
|
||||
"latin2_croatian_ci": 27,
|
||||
"gbk_chinese_ci": 28,
|
||||
"cp1257_lithuanian_ci": 29,
|
||||
"latin5_turkish_ci": 30,
|
||||
"latin1_german2_ci": 31,
|
||||
"armscii8_general_ci": 32,
|
||||
"utf8_general_ci": 33,
|
||||
"cp1250_czech_cs": 34,
|
||||
"ucs2_general_ci": 35,
|
||||
"cp866_general_ci": 36,
|
||||
"keybcs2_general_ci": 37,
|
||||
"macce_general_ci": 38,
|
||||
"macroman_general_ci": 39,
|
||||
"cp852_general_ci": 40,
|
||||
"latin7_general_ci": 41,
|
||||
"latin7_general_cs": 42,
|
||||
"macce_bin": 43,
|
||||
"cp1250_croatian_ci": 44,
|
||||
"utf8mb4_general_ci": 45,
|
||||
"utf8mb4_bin": 46,
|
||||
"latin1_bin": 47,
|
||||
"latin1_general_ci": 48,
|
||||
"latin1_general_cs": 49,
|
||||
"cp1251_bin": 50,
|
||||
"cp1251_general_ci": 51,
|
||||
"cp1251_general_cs": 52,
|
||||
"macroman_bin": 53,
|
||||
"utf16_general_ci": 54,
|
||||
"utf16_bin": 55,
|
||||
"utf16le_general_ci": 56,
|
||||
"cp1256_general_ci": 57,
|
||||
"cp1257_bin": 58,
|
||||
"cp1257_general_ci": 59,
|
||||
"utf32_general_ci": 60,
|
||||
"utf32_bin": 61,
|
||||
"utf16le_bin": 62,
|
||||
"binary": 63,
|
||||
"armscii8_bin": 64,
|
||||
"ascii_bin": 65,
|
||||
"cp1250_bin": 66,
|
||||
"cp1256_bin": 67,
|
||||
"cp866_bin": 68,
|
||||
"dec8_bin": 69,
|
||||
"greek_bin": 70,
|
||||
"hebrew_bin": 71,
|
||||
"hp8_bin": 72,
|
||||
"keybcs2_bin": 73,
|
||||
"koi8r_bin": 74,
|
||||
"koi8u_bin": 75,
|
||||
"latin2_bin": 77,
|
||||
"latin5_bin": 78,
|
||||
"latin7_bin": 79,
|
||||
"cp850_bin": 80,
|
||||
"cp852_bin": 81,
|
||||
"swe7_bin": 82,
|
||||
"utf8_bin": 83,
|
||||
"big5_bin": 84,
|
||||
"euckr_bin": 85,
|
||||
"gb2312_bin": 86,
|
||||
"gbk_bin": 87,
|
||||
"sjis_bin": 88,
|
||||
"tis620_bin": 89,
|
||||
"ucs2_bin": 90,
|
||||
"ujis_bin": 91,
|
||||
"geostd8_general_ci": 92,
|
||||
"geostd8_bin": 93,
|
||||
"latin1_spanish_ci": 94,
|
||||
"cp932_japanese_ci": 95,
|
||||
"cp932_bin": 96,
|
||||
"eucjpms_japanese_ci": 97,
|
||||
"eucjpms_bin": 98,
|
||||
"cp1250_polish_ci": 99,
|
||||
"utf16_unicode_ci": 101,
|
||||
"utf16_icelandic_ci": 102,
|
||||
"utf16_latvian_ci": 103,
|
||||
"utf16_romanian_ci": 104,
|
||||
"utf16_slovenian_ci": 105,
|
||||
"utf16_polish_ci": 106,
|
||||
"utf16_estonian_ci": 107,
|
||||
"utf16_spanish_ci": 108,
|
||||
"utf16_swedish_ci": 109,
|
||||
"utf16_turkish_ci": 110,
|
||||
"utf16_czech_ci": 111,
|
||||
"utf16_danish_ci": 112,
|
||||
"utf16_lithuanian_ci": 113,
|
||||
"utf16_slovak_ci": 114,
|
||||
"utf16_spanish2_ci": 115,
|
||||
"utf16_roman_ci": 116,
|
||||
"utf16_persian_ci": 117,
|
||||
"utf16_esperanto_ci": 118,
|
||||
"utf16_hungarian_ci": 119,
|
||||
"utf16_sinhala_ci": 120,
|
||||
"utf16_german2_ci": 121,
|
||||
"utf16_croatian_ci": 122,
|
||||
"utf16_unicode_520_ci": 123,
|
||||
"utf16_vietnamese_ci": 124,
|
||||
"ucs2_unicode_ci": 128,
|
||||
"ucs2_icelandic_ci": 129,
|
||||
"ucs2_latvian_ci": 130,
|
||||
"ucs2_romanian_ci": 131,
|
||||
"ucs2_slovenian_ci": 132,
|
||||
"ucs2_polish_ci": 133,
|
||||
"ucs2_estonian_ci": 134,
|
||||
"ucs2_spanish_ci": 135,
|
||||
"ucs2_swedish_ci": 136,
|
||||
"ucs2_turkish_ci": 137,
|
||||
"ucs2_czech_ci": 138,
|
||||
"ucs2_danish_ci": 139,
|
||||
"ucs2_lithuanian_ci": 140,
|
||||
"ucs2_slovak_ci": 141,
|
||||
"ucs2_spanish2_ci": 142,
|
||||
"ucs2_roman_ci": 143,
|
||||
"ucs2_persian_ci": 144,
|
||||
"ucs2_esperanto_ci": 145,
|
||||
"ucs2_hungarian_ci": 146,
|
||||
"ucs2_sinhala_ci": 147,
|
||||
"ucs2_german2_ci": 148,
|
||||
"ucs2_croatian_ci": 149,
|
||||
"ucs2_unicode_520_ci": 150,
|
||||
"ucs2_vietnamese_ci": 151,
|
||||
"ucs2_general_mysql500_ci": 159,
|
||||
"utf32_unicode_ci": 160,
|
||||
"utf32_icelandic_ci": 161,
|
||||
"utf32_latvian_ci": 162,
|
||||
"utf32_romanian_ci": 163,
|
||||
"utf32_slovenian_ci": 164,
|
||||
"utf32_polish_ci": 165,
|
||||
"utf32_estonian_ci": 166,
|
||||
"utf32_spanish_ci": 167,
|
||||
"utf32_swedish_ci": 168,
|
||||
"utf32_turkish_ci": 169,
|
||||
"utf32_czech_ci": 170,
|
||||
"utf32_danish_ci": 171,
|
||||
"utf32_lithuanian_ci": 172,
|
||||
"utf32_slovak_ci": 173,
|
||||
"utf32_spanish2_ci": 174,
|
||||
"utf32_roman_ci": 175,
|
||||
"utf32_persian_ci": 176,
|
||||
"utf32_esperanto_ci": 177,
|
||||
"utf32_hungarian_ci": 178,
|
||||
"utf32_sinhala_ci": 179,
|
||||
"utf32_german2_ci": 180,
|
||||
"utf32_croatian_ci": 181,
|
||||
"utf32_unicode_520_ci": 182,
|
||||
"utf32_vietnamese_ci": 183,
|
||||
"utf8_unicode_ci": 192,
|
||||
"utf8_icelandic_ci": 193,
|
||||
"utf8_latvian_ci": 194,
|
||||
"utf8_romanian_ci": 195,
|
||||
"utf8_slovenian_ci": 196,
|
||||
"utf8_polish_ci": 197,
|
||||
"utf8_estonian_ci": 198,
|
||||
"utf8_spanish_ci": 199,
|
||||
"utf8_swedish_ci": 200,
|
||||
"utf8_turkish_ci": 201,
|
||||
"utf8_czech_ci": 202,
|
||||
"utf8_danish_ci": 203,
|
||||
"utf8_lithuanian_ci": 204,
|
||||
"utf8_slovak_ci": 205,
|
||||
"utf8_spanish2_ci": 206,
|
||||
"utf8_roman_ci": 207,
|
||||
"utf8_persian_ci": 208,
|
||||
"utf8_esperanto_ci": 209,
|
||||
"utf8_hungarian_ci": 210,
|
||||
"utf8_sinhala_ci": 211,
|
||||
"utf8_german2_ci": 212,
|
||||
"utf8_croatian_ci": 213,
|
||||
"utf8_unicode_520_ci": 214,
|
||||
"utf8_vietnamese_ci": 215,
|
||||
"utf8_general_mysql500_ci": 223,
|
||||
"utf8mb4_unicode_ci": 224,
|
||||
"utf8mb4_icelandic_ci": 225,
|
||||
"utf8mb4_latvian_ci": 226,
|
||||
"utf8mb4_romanian_ci": 227,
|
||||
"utf8mb4_slovenian_ci": 228,
|
||||
"utf8mb4_polish_ci": 229,
|
||||
"utf8mb4_estonian_ci": 230,
|
||||
"utf8mb4_spanish_ci": 231,
|
||||
"utf8mb4_swedish_ci": 232,
|
||||
"utf8mb4_turkish_ci": 233,
|
||||
"utf8mb4_czech_ci": 234,
|
||||
"utf8mb4_danish_ci": 235,
|
||||
"utf8mb4_lithuanian_ci": 236,
|
||||
"utf8mb4_slovak_ci": 237,
|
||||
"utf8mb4_spanish2_ci": 238,
|
||||
"utf8mb4_roman_ci": 239,
|
||||
"utf8mb4_persian_ci": 240,
|
||||
"utf8mb4_esperanto_ci": 241,
|
||||
"utf8mb4_hungarian_ci": 242,
|
||||
"utf8mb4_sinhala_ci": 243,
|
||||
"utf8mb4_german2_ci": 244,
|
||||
"utf8mb4_croatian_ci": 245,
|
||||
"utf8mb4_unicode_520_ci": 246,
|
||||
"utf8mb4_vietnamese_ci": 247,
|
||||
}
|
||||
|
||||
// A blacklist of collations which is unsafe to interpolate parameters.
|
||||
// These multibyte encodings may contains 0x5c (`\`) in their trailing bytes.
|
||||
var unsafeCollations = map[byte]bool{
|
||||
1: true, // big5_chinese_ci
|
||||
13: true, // sjis_japanese_ci
|
||||
28: true, // gbk_chinese_ci
|
||||
84: true, // big5_bin
|
||||
86: true, // gb2312_bin
|
||||
87: true, // gbk_bin
|
||||
88: true, // sjis_bin
|
||||
95: true, // cp932_japanese_ci
|
||||
96: true, // cp932_bin
|
||||
}
|
403
vendor/github.com/go-sql-driver/mysql/connection.go
generated
vendored
403
vendor/github.com/go-sql-driver/mysql/connection.go
generated
vendored
@ -1,403 +0,0 @@
|
||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"database/sql/driver"
|
||||
"errors"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type mysqlConn struct {
|
||||
buf buffer
|
||||
netConn net.Conn
|
||||
affectedRows uint64
|
||||
insertId uint64
|
||||
cfg *config
|
||||
maxPacketAllowed int
|
||||
maxWriteSize int
|
||||
flags clientFlag
|
||||
status statusFlag
|
||||
sequence uint8
|
||||
parseTime bool
|
||||
strict bool
|
||||
}
|
||||
|
||||
type config struct {
|
||||
user string
|
||||
passwd string
|
||||
net string
|
||||
addr string
|
||||
dbname string
|
||||
params map[string]string
|
||||
loc *time.Location
|
||||
tls *tls.Config
|
||||
timeout time.Duration
|
||||
collation uint8
|
||||
allowAllFiles bool
|
||||
allowOldPasswords bool
|
||||
allowCleartextPasswords bool
|
||||
clientFoundRows bool
|
||||
columnsWithAlias bool
|
||||
interpolateParams bool
|
||||
}
|
||||
|
||||
// Handles parameters set in DSN after the connection is established
|
||||
func (mc *mysqlConn) handleParams() (err error) {
|
||||
for param, val := range mc.cfg.params {
|
||||
switch param {
|
||||
// Charset
|
||||
case "charset":
|
||||
charsets := strings.Split(val, ",")
|
||||
for i := range charsets {
|
||||
// ignore errors here - a charset may not exist
|
||||
err = mc.exec("SET NAMES " + charsets[i])
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// time.Time parsing
|
||||
case "parseTime":
|
||||
var isBool bool
|
||||
mc.parseTime, isBool = readBool(val)
|
||||
if !isBool {
|
||||
return errors.New("Invalid Bool value: " + val)
|
||||
}
|
||||
|
||||
// Strict mode
|
||||
case "strict":
|
||||
var isBool bool
|
||||
mc.strict, isBool = readBool(val)
|
||||
if !isBool {
|
||||
return errors.New("Invalid Bool value: " + val)
|
||||
}
|
||||
|
||||
// Compression
|
||||
case "compress":
|
||||
err = errors.New("Compression not implemented yet")
|
||||
return
|
||||
|
||||
// System Vars
|
||||
default:
|
||||
err = mc.exec("SET " + param + "=" + val + "")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (mc *mysqlConn) Begin() (driver.Tx, error) {
|
||||
if mc.netConn == nil {
|
||||
errLog.Print(ErrInvalidConn)
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
err := mc.exec("START TRANSACTION")
|
||||
if err == nil {
|
||||
return &mysqlTx{mc}, err
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (mc *mysqlConn) Close() (err error) {
|
||||
// Makes Close idempotent
|
||||
if mc.netConn != nil {
|
||||
err = mc.writeCommandPacket(comQuit)
|
||||
if err == nil {
|
||||
err = mc.netConn.Close()
|
||||
} else {
|
||||
mc.netConn.Close()
|
||||
}
|
||||
mc.netConn = nil
|
||||
}
|
||||
|
||||
mc.cfg = nil
|
||||
mc.buf.rd = nil
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) {
|
||||
if mc.netConn == nil {
|
||||
errLog.Print(ErrInvalidConn)
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
// Send command
|
||||
err := mc.writeCommandPacketStr(comStmtPrepare, query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stmt := &mysqlStmt{
|
||||
mc: mc,
|
||||
}
|
||||
|
||||
// Read Result
|
||||
columnCount, err := stmt.readPrepareResultPacket()
|
||||
if err == nil {
|
||||
if stmt.paramCount > 0 {
|
||||
if err = mc.readUntilEOF(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if columnCount > 0 {
|
||||
err = mc.readUntilEOF()
|
||||
}
|
||||
}
|
||||
|
||||
return stmt, err
|
||||
}
|
||||
|
||||
func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (string, error) {
|
||||
buf := mc.buf.takeCompleteBuffer()
|
||||
if buf == nil {
|
||||
// can not take the buffer. Something must be wrong with the connection
|
||||
errLog.Print(ErrBusyBuffer)
|
||||
return "", driver.ErrBadConn
|
||||
}
|
||||
buf = buf[:0]
|
||||
argPos := 0
|
||||
|
||||
for i := 0; i < len(query); i++ {
|
||||
q := strings.IndexByte(query[i:], '?')
|
||||
if q == -1 {
|
||||
buf = append(buf, query[i:]...)
|
||||
break
|
||||
}
|
||||
buf = append(buf, query[i:i+q]...)
|
||||
i += q
|
||||
|
||||
arg := args[argPos]
|
||||
argPos++
|
||||
|
||||
if arg == nil {
|
||||
buf = append(buf, "NULL"...)
|
||||
continue
|
||||
}
|
||||
|
||||
switch v := arg.(type) {
|
||||
case int64:
|
||||
buf = strconv.AppendInt(buf, v, 10)
|
||||
case float64:
|
||||
buf = strconv.AppendFloat(buf, v, 'g', -1, 64)
|
||||
case bool:
|
||||
if v {
|
||||
buf = append(buf, '1')
|
||||
} else {
|
||||
buf = append(buf, '0')
|
||||
}
|
||||
case time.Time:
|
||||
if v.IsZero() {
|
||||
buf = append(buf, "'0000-00-00'"...)
|
||||
} else {
|
||||
v := v.In(mc.cfg.loc)
|
||||
v = v.Add(time.Nanosecond * 500) // To round under microsecond
|
||||
year := v.Year()
|
||||
year100 := year / 100
|
||||
year1 := year % 100
|
||||
month := v.Month()
|
||||
day := v.Day()
|
||||
hour := v.Hour()
|
||||
minute := v.Minute()
|
||||
second := v.Second()
|
||||
micro := v.Nanosecond() / 1000
|
||||
|
||||
buf = append(buf, []byte{
|
||||
'\'',
|
||||
digits10[year100], digits01[year100],
|
||||
digits10[year1], digits01[year1],
|
||||
'-',
|
||||
digits10[month], digits01[month],
|
||||
'-',
|
||||
digits10[day], digits01[day],
|
||||
' ',
|
||||
digits10[hour], digits01[hour],
|
||||
':',
|
||||
digits10[minute], digits01[minute],
|
||||
':',
|
||||
digits10[second], digits01[second],
|
||||
}...)
|
||||
|
||||
if micro != 0 {
|
||||
micro10000 := micro / 10000
|
||||
micro100 := micro / 100 % 100
|
||||
micro1 := micro % 100
|
||||
buf = append(buf, []byte{
|
||||
'.',
|
||||
digits10[micro10000], digits01[micro10000],
|
||||
digits10[micro100], digits01[micro100],
|
||||
digits10[micro1], digits01[micro1],
|
||||
}...)
|
||||
}
|
||||
buf = append(buf, '\'')
|
||||
}
|
||||
case []byte:
|
||||
if v == nil {
|
||||
buf = append(buf, "NULL"...)
|
||||
} else {
|
||||
buf = append(buf, "_binary'"...)
|
||||
if mc.status&statusNoBackslashEscapes == 0 {
|
||||
buf = escapeBytesBackslash(buf, v)
|
||||
} else {
|
||||
buf = escapeBytesQuotes(buf, v)
|
||||
}
|
||||
buf = append(buf, '\'')
|
||||
}
|
||||
case string:
|
||||
buf = append(buf, '\'')
|
||||
if mc.status&statusNoBackslashEscapes == 0 {
|
||||
buf = escapeStringBackslash(buf, v)
|
||||
} else {
|
||||
buf = escapeStringQuotes(buf, v)
|
||||
}
|
||||
buf = append(buf, '\'')
|
||||
default:
|
||||
return "", driver.ErrSkip
|
||||
}
|
||||
|
||||
if len(buf)+4 > mc.maxPacketAllowed {
|
||||
return "", driver.ErrSkip
|
||||
}
|
||||
}
|
||||
if argPos != len(args) {
|
||||
return "", driver.ErrSkip
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, error) {
|
||||
if mc.netConn == nil {
|
||||
errLog.Print(ErrInvalidConn)
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
if len(args) != 0 {
|
||||
if !mc.cfg.interpolateParams {
|
||||
return nil, driver.ErrSkip
|
||||
}
|
||||
// try to interpolate the parameters to save extra roundtrips for preparing and closing a statement
|
||||
prepared, err := mc.interpolateParams(query, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
query = prepared
|
||||
args = nil
|
||||
}
|
||||
mc.affectedRows = 0
|
||||
mc.insertId = 0
|
||||
|
||||
err := mc.exec(query)
|
||||
if err == nil {
|
||||
return &mysqlResult{
|
||||
affectedRows: int64(mc.affectedRows),
|
||||
insertId: int64(mc.insertId),
|
||||
}, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Internal function to execute commands
|
||||
func (mc *mysqlConn) exec(query string) error {
|
||||
// Send command
|
||||
err := mc.writeCommandPacketStr(comQuery, query)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Read Result
|
||||
resLen, err := mc.readResultSetHeaderPacket()
|
||||
if err == nil && resLen > 0 {
|
||||
if err = mc.readUntilEOF(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = mc.readUntilEOF()
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, error) {
|
||||
if mc.netConn == nil {
|
||||
errLog.Print(ErrInvalidConn)
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
if len(args) != 0 {
|
||||
if !mc.cfg.interpolateParams {
|
||||
return nil, driver.ErrSkip
|
||||
}
|
||||
// try client-side prepare to reduce roundtrip
|
||||
prepared, err := mc.interpolateParams(query, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
query = prepared
|
||||
args = nil
|
||||
}
|
||||
// Send command
|
||||
err := mc.writeCommandPacketStr(comQuery, query)
|
||||
if err == nil {
|
||||
// Read Result
|
||||
var resLen int
|
||||
resLen, err = mc.readResultSetHeaderPacket()
|
||||
if err == nil {
|
||||
rows := new(textRows)
|
||||
rows.mc = mc
|
||||
|
||||
if resLen == 0 {
|
||||
// no columns, no more data
|
||||
return emptyRows{}, nil
|
||||
}
|
||||
// Columns
|
||||
rows.columns, err = mc.readColumns(resLen)
|
||||
return rows, err
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Gets the value of the given MySQL System Variable
|
||||
// The returned byte slice is only valid until the next read
|
||||
func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) {
|
||||
// Send command
|
||||
if err := mc.writeCommandPacketStr(comQuery, "SELECT @@"+name); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Read Result
|
||||
resLen, err := mc.readResultSetHeaderPacket()
|
||||
if err == nil {
|
||||
rows := new(textRows)
|
||||
rows.mc = mc
|
||||
|
||||
if resLen > 0 {
|
||||
// Columns
|
||||
if err := mc.readUntilEOF(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
dest := make([]driver.Value, resLen)
|
||||
if err = rows.readRow(dest); err == nil {
|
||||
return dest[0].([]byte), mc.readUntilEOF()
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
162
vendor/github.com/go-sql-driver/mysql/const.go
generated
vendored
162
vendor/github.com/go-sql-driver/mysql/const.go
generated
vendored
@ -1,162 +0,0 @@
|
||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package mysql
|
||||
|
||||
const (
|
||||
minProtocolVersion byte = 10
|
||||
maxPacketSize = 1<<24 - 1
|
||||
timeFormat = "2006-01-02 15:04:05.999999"
|
||||
)
|
||||
|
||||
// MySQL constants documentation:
|
||||
// http://dev.mysql.com/doc/internals/en/client-server-protocol.html
|
||||
|
||||
const (
|
||||
iOK byte = 0x00
|
||||
iLocalInFile byte = 0xfb
|
||||
iEOF byte = 0xfe
|
||||
iERR byte = 0xff
|
||||
)
|
||||
|
||||
// https://dev.mysql.com/doc/internals/en/capability-flags.html#packet-Protocol::CapabilityFlags
|
||||
type clientFlag uint32
|
||||
|
||||
const (
|
||||
clientLongPassword clientFlag = 1 << iota
|
||||
clientFoundRows
|
||||
clientLongFlag
|
||||
clientConnectWithDB
|
||||
clientNoSchema
|
||||
clientCompress
|
||||
clientODBC
|
||||
clientLocalFiles
|
||||
clientIgnoreSpace
|
||||
clientProtocol41
|
||||
clientInteractive
|
||||
clientSSL
|
||||
clientIgnoreSIGPIPE
|
||||
clientTransactions
|
||||
clientReserved
|
||||
clientSecureConn
|
||||
clientMultiStatements
|
||||
clientMultiResults
|
||||
clientPSMultiResults
|
||||
clientPluginAuth
|
||||
clientConnectAttrs
|
||||
clientPluginAuthLenEncClientData
|
||||
clientCanHandleExpiredPasswords
|
||||
clientSessionTrack
|
||||
clientDeprecateEOF
|
||||
)
|
||||
|
||||
const (
|
||||
comQuit byte = iota + 1
|
||||
comInitDB
|
||||
comQuery
|
||||
comFieldList
|
||||
comCreateDB
|
||||
comDropDB
|
||||
comRefresh
|
||||
comShutdown
|
||||
comStatistics
|
||||
comProcessInfo
|
||||
comConnect
|
||||
comProcessKill
|
||||
comDebug
|
||||
comPing
|
||||
comTime
|
||||
comDelayedInsert
|
||||
comChangeUser
|
||||
comBinlogDump
|
||||
comTableDump
|
||||
comConnectOut
|
||||
comRegisterSlave
|
||||
comStmtPrepare
|
||||
comStmtExecute
|
||||
comStmtSendLongData
|
||||
comStmtClose
|
||||
comStmtReset
|
||||
comSetOption
|
||||
comStmtFetch
|
||||
)
|
||||
|
||||
// https://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnType
|
||||
const (
|
||||
fieldTypeDecimal byte = iota
|
||||
fieldTypeTiny
|
||||
fieldTypeShort
|
||||
fieldTypeLong
|
||||
fieldTypeFloat
|
||||
fieldTypeDouble
|
||||
fieldTypeNULL
|
||||
fieldTypeTimestamp
|
||||
fieldTypeLongLong
|
||||
fieldTypeInt24
|
||||
fieldTypeDate
|
||||
fieldTypeTime
|
||||
fieldTypeDateTime
|
||||
fieldTypeYear
|
||||
fieldTypeNewDate
|
||||
fieldTypeVarChar
|
||||
fieldTypeBit
|
||||
)
|
||||
const (
|
||||
fieldTypeNewDecimal byte = iota + 0xf6
|
||||
fieldTypeEnum
|
||||
fieldTypeSet
|
||||
fieldTypeTinyBLOB
|
||||
fieldTypeMediumBLOB
|
||||
fieldTypeLongBLOB
|
||||
fieldTypeBLOB
|
||||
fieldTypeVarString
|
||||
fieldTypeString
|
||||
fieldTypeGeometry
|
||||
)
|
||||
|
||||
type fieldFlag uint16
|
||||
|
||||
const (
|
||||
flagNotNULL fieldFlag = 1 << iota
|
||||
flagPriKey
|
||||
flagUniqueKey
|
||||
flagMultipleKey
|
||||
flagBLOB
|
||||
flagUnsigned
|
||||
flagZeroFill
|
||||
flagBinary
|
||||
flagEnum
|
||||
flagAutoIncrement
|
||||
flagTimestamp
|
||||
flagSet
|
||||
flagUnknown1
|
||||
flagUnknown2
|
||||
flagUnknown3
|
||||
flagUnknown4
|
||||
)
|
||||
|
||||
// http://dev.mysql.com/doc/internals/en/status-flags.html
|
||||
type statusFlag uint16
|
||||
|
||||
const (
|
||||
statusInTrans statusFlag = 1 << iota
|
||||
statusInAutocommit
|
||||
statusReserved // Not in documentation
|
||||
statusMoreResultsExists
|
||||
statusNoGoodIndexUsed
|
||||
statusNoIndexUsed
|
||||
statusCursorExists
|
||||
statusLastRowSent
|
||||
statusDbDropped
|
||||
statusNoBackslashEscapes
|
||||
statusMetadataChanged
|
||||
statusQueryWasSlow
|
||||
statusPsOutParams
|
||||
statusInTransReadonly
|
||||
statusSessionStateChanged
|
||||
)
|
149
vendor/github.com/go-sql-driver/mysql/driver.go
generated
vendored
149
vendor/github.com/go-sql-driver/mysql/driver.go
generated
vendored
@ -1,149 +0,0 @@
|
||||
// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// The driver should be used via the database/sql package:
|
||||
//
|
||||
// import "database/sql"
|
||||
// import _ "github.com/go-sql-driver/mysql"
|
||||
//
|
||||
// db, err := sql.Open("mysql", "user:password@/dbname")
|
||||
//
|
||||
// See https://github.com/go-sql-driver/mysql#usage for details
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"net"
|
||||
)
|
||||
|
||||
// This struct is exported to make the driver directly accessible.
|
||||
// In general the driver is used via the database/sql package.
|
||||
type MySQLDriver struct{}
|
||||
|
||||
// DialFunc is a function which can be used to establish the network connection.
|
||||
// Custom dial functions must be registered with RegisterDial
|
||||
type DialFunc func(addr string) (net.Conn, error)
|
||||
|
||||
var dials map[string]DialFunc
|
||||
|
||||
// RegisterDial registers a custom dial function. It can then be used by the
|
||||
// network address mynet(addr), where mynet is the registered new network.
|
||||
// addr is passed as a parameter to the dial function.
|
||||
func RegisterDial(net string, dial DialFunc) {
|
||||
if dials == nil {
|
||||
dials = make(map[string]DialFunc)
|
||||
}
|
||||
dials[net] = dial
|
||||
}
|
||||
|
||||
// Open new Connection.
|
||||
// See https://github.com/go-sql-driver/mysql#dsn-data-source-name for how
|
||||
// the DSN string is formated
|
||||
func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
|
||||
var err error
|
||||
|
||||
// New mysqlConn
|
||||
mc := &mysqlConn{
|
||||
maxPacketAllowed: maxPacketSize,
|
||||
maxWriteSize: maxPacketSize - 1,
|
||||
}
|
||||
mc.cfg, err = parseDSN(dsn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Connect to Server
|
||||
if dial, ok := dials[mc.cfg.net]; ok {
|
||||
mc.netConn, err = dial(mc.cfg.addr)
|
||||
} else {
|
||||
nd := net.Dialer{Timeout: mc.cfg.timeout}
|
||||
mc.netConn, err = nd.Dial(mc.cfg.net, mc.cfg.addr)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Enable TCP Keepalives on TCP connections
|
||||
if tc, ok := mc.netConn.(*net.TCPConn); ok {
|
||||
if err := tc.SetKeepAlive(true); err != nil {
|
||||
// Don't send COM_QUIT before handshake.
|
||||
mc.netConn.Close()
|
||||
mc.netConn = nil
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
mc.buf = newBuffer(mc.netConn)
|
||||
|
||||
// Reading Handshake Initialization Packet
|
||||
cipher, err := mc.readInitPacket()
|
||||
if err != nil {
|
||||
mc.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Send Client Authentication Packet
|
||||
if err = mc.writeAuthPacket(cipher); err != nil {
|
||||
mc.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Read Result Packet
|
||||
err = mc.readResultOK()
|
||||
if err != nil {
|
||||
// Retry with old authentication method, if allowed
|
||||
if mc.cfg != nil && mc.cfg.allowOldPasswords && err == ErrOldPassword {
|
||||
if err = mc.writeOldAuthPacket(cipher); err != nil {
|
||||
mc.Close()
|
||||
return nil, err
|
||||
}
|
||||
if err = mc.readResultOK(); err != nil {
|
||||
mc.Close()
|
||||
return nil, err
|
||||
}
|
||||
} else if mc.cfg != nil && mc.cfg.allowCleartextPasswords && err == ErrCleartextPassword {
|
||||
if err = mc.writeClearAuthPacket(); err != nil {
|
||||
mc.Close()
|
||||
return nil, err
|
||||
}
|
||||
if err = mc.readResultOK(); err != nil {
|
||||
mc.Close()
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
mc.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Get max allowed packet size
|
||||
maxap, err := mc.getSystemVar("max_allowed_packet")
|
||||
if err != nil {
|
||||
mc.Close()
|
||||
return nil, err
|
||||
}
|
||||
mc.maxPacketAllowed = stringToInt(maxap) - 1
|
||||
if mc.maxPacketAllowed < maxPacketSize {
|
||||
mc.maxWriteSize = mc.maxPacketAllowed
|
||||
}
|
||||
|
||||
// Handle DSN Params
|
||||
err = mc.handleParams()
|
||||
if err != nil {
|
||||
mc.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return mc, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
sql.Register("mysql", &MySQLDriver{})
|
||||
}
|
131
vendor/github.com/go-sql-driver/mysql/errors.go
generated
vendored
131
vendor/github.com/go-sql-driver/mysql/errors.go
generated
vendored
@ -1,131 +0,0 @@
|
||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
// Various errors the driver might return. Can change between driver versions.
|
||||
var (
|
||||
ErrInvalidConn = errors.New("Invalid Connection")
|
||||
ErrMalformPkt = errors.New("Malformed Packet")
|
||||
ErrNoTLS = errors.New("TLS encryption requested but server does not support TLS")
|
||||
ErrOldPassword = errors.New("This user requires old password authentication. If you still want to use it, please add 'allowOldPasswords=1' to your DSN. See also https://github.com/go-sql-driver/mysql/wiki/old_passwords")
|
||||
ErrCleartextPassword = errors.New("This user requires clear text authentication. If you still want to use it, please add 'allowCleartextPasswords=1' to your DSN.")
|
||||
ErrUnknownPlugin = errors.New("The authentication plugin is not supported.")
|
||||
ErrOldProtocol = errors.New("MySQL-Server does not support required Protocol 41+")
|
||||
ErrPktSync = errors.New("Commands out of sync. You can't run this command now")
|
||||
ErrPktSyncMul = errors.New("Commands out of sync. Did you run multiple statements at once?")
|
||||
ErrPktTooLarge = errors.New("Packet for query is too large. You can change this value on the server by adjusting the 'max_allowed_packet' variable.")
|
||||
ErrBusyBuffer = errors.New("Busy buffer")
|
||||
)
|
||||
|
||||
var errLog Logger = log.New(os.Stderr, "[MySQL] ", log.Ldate|log.Ltime|log.Lshortfile)
|
||||
|
||||
// Logger is used to log critical error messages.
|
||||
type Logger interface {
|
||||
Print(v ...interface{})
|
||||
}
|
||||
|
||||
// SetLogger is used to set the logger for critical errors.
|
||||
// The initial logger is os.Stderr.
|
||||
func SetLogger(logger Logger) error {
|
||||
if logger == nil {
|
||||
return errors.New("logger is nil")
|
||||
}
|
||||
errLog = logger
|
||||
return nil
|
||||
}
|
||||
|
||||
// MySQLError is an error type which represents a single MySQL error
|
||||
type MySQLError struct {
|
||||
Number uint16
|
||||
Message string
|
||||
}
|
||||
|
||||
func (me *MySQLError) Error() string {
|
||||
return fmt.Sprintf("Error %d: %s", me.Number, me.Message)
|
||||
}
|
||||
|
||||
// MySQLWarnings is an error type which represents a group of one or more MySQL
|
||||
// warnings
|
||||
type MySQLWarnings []MySQLWarning
|
||||
|
||||
func (mws MySQLWarnings) Error() string {
|
||||
var msg string
|
||||
for i, warning := range mws {
|
||||
if i > 0 {
|
||||
msg += "\r\n"
|
||||
}
|
||||
msg += fmt.Sprintf(
|
||||
"%s %s: %s",
|
||||
warning.Level,
|
||||
warning.Code,
|
||||
warning.Message,
|
||||
)
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
// MySQLWarning is an error type which represents a single MySQL warning.
|
||||
// Warnings are returned in groups only. See MySQLWarnings
|
||||
type MySQLWarning struct {
|
||||
Level string
|
||||
Code string
|
||||
Message string
|
||||
}
|
||||
|
||||
func (mc *mysqlConn) getWarnings() (err error) {
|
||||
rows, err := mc.Query("SHOW WARNINGS", nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var warnings = MySQLWarnings{}
|
||||
var values = make([]driver.Value, 3)
|
||||
|
||||
for {
|
||||
err = rows.Next(values)
|
||||
switch err {
|
||||
case nil:
|
||||
warning := MySQLWarning{}
|
||||
|
||||
if raw, ok := values[0].([]byte); ok {
|
||||
warning.Level = string(raw)
|
||||
} else {
|
||||
warning.Level = fmt.Sprintf("%s", values[0])
|
||||
}
|
||||
if raw, ok := values[1].([]byte); ok {
|
||||
warning.Code = string(raw)
|
||||
} else {
|
||||
warning.Code = fmt.Sprintf("%s", values[1])
|
||||
}
|
||||
if raw, ok := values[2].([]byte); ok {
|
||||
warning.Message = string(raw)
|
||||
} else {
|
||||
warning.Message = fmt.Sprintf("%s", values[0])
|
||||
}
|
||||
|
||||
warnings = append(warnings, warning)
|
||||
|
||||
case io.EOF:
|
||||
return warnings
|
||||
|
||||
default:
|
||||
rows.Close()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
182
vendor/github.com/go-sql-driver/mysql/infile.go
generated
vendored
182
vendor/github.com/go-sql-driver/mysql/infile.go
generated
vendored
@ -1,182 +0,0 @@
|
||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
fileRegister map[string]bool
|
||||
fileRegisterLock sync.RWMutex
|
||||
readerRegister map[string]func() io.Reader
|
||||
readerRegisterLock sync.RWMutex
|
||||
)
|
||||
|
||||
// RegisterLocalFile adds the given file to the file whitelist,
|
||||
// so that it can be used by "LOAD DATA LOCAL INFILE <filepath>".
|
||||
// Alternatively you can allow the use of all local files with
|
||||
// the DSN parameter 'allowAllFiles=true'
|
||||
//
|
||||
// filePath := "/home/gopher/data.csv"
|
||||
// mysql.RegisterLocalFile(filePath)
|
||||
// err := db.Exec("LOAD DATA LOCAL INFILE '" + filePath + "' INTO TABLE foo")
|
||||
// if err != nil {
|
||||
// ...
|
||||
//
|
||||
func RegisterLocalFile(filePath string) {
|
||||
fileRegisterLock.Lock()
|
||||
// lazy map init
|
||||
if fileRegister == nil {
|
||||
fileRegister = make(map[string]bool)
|
||||
}
|
||||
|
||||
fileRegister[strings.Trim(filePath, `"`)] = true
|
||||
fileRegisterLock.Unlock()
|
||||
}
|
||||
|
||||
// DeregisterLocalFile removes the given filepath from the whitelist.
|
||||
func DeregisterLocalFile(filePath string) {
|
||||
fileRegisterLock.Lock()
|
||||
delete(fileRegister, strings.Trim(filePath, `"`))
|
||||
fileRegisterLock.Unlock()
|
||||
}
|
||||
|
||||
// RegisterReaderHandler registers a handler function which is used
|
||||
// to receive a io.Reader.
|
||||
// The Reader can be used by "LOAD DATA LOCAL INFILE Reader::<name>".
|
||||
// If the handler returns a io.ReadCloser Close() is called when the
|
||||
// request is finished.
|
||||
//
|
||||
// mysql.RegisterReaderHandler("data", func() io.Reader {
|
||||
// var csvReader io.Reader // Some Reader that returns CSV data
|
||||
// ... // Open Reader here
|
||||
// return csvReader
|
||||
// })
|
||||
// err := db.Exec("LOAD DATA LOCAL INFILE 'Reader::data' INTO TABLE foo")
|
||||
// if err != nil {
|
||||
// ...
|
||||
//
|
||||
func RegisterReaderHandler(name string, handler func() io.Reader) {
|
||||
readerRegisterLock.Lock()
|
||||
// lazy map init
|
||||
if readerRegister == nil {
|
||||
readerRegister = make(map[string]func() io.Reader)
|
||||
}
|
||||
|
||||
readerRegister[name] = handler
|
||||
readerRegisterLock.Unlock()
|
||||
}
|
||||
|
||||
// DeregisterReaderHandler removes the ReaderHandler function with
|
||||
// the given name from the registry.
|
||||
func DeregisterReaderHandler(name string) {
|
||||
readerRegisterLock.Lock()
|
||||
delete(readerRegister, name)
|
||||
readerRegisterLock.Unlock()
|
||||
}
|
||||
|
||||
func deferredClose(err *error, closer io.Closer) {
|
||||
closeErr := closer.Close()
|
||||
if *err == nil {
|
||||
*err = closeErr
|
||||
}
|
||||
}
|
||||
|
||||
func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
|
||||
var rdr io.Reader
|
||||
var data []byte
|
||||
|
||||
if idx := strings.Index(name, "Reader::"); idx == 0 || (idx > 0 && name[idx-1] == '/') { // io.Reader
|
||||
// The server might return an an absolute path. See issue #355.
|
||||
name = name[idx+8:]
|
||||
|
||||
readerRegisterLock.RLock()
|
||||
handler, inMap := readerRegister[name]
|
||||
readerRegisterLock.RUnlock()
|
||||
|
||||
if inMap {
|
||||
rdr = handler()
|
||||
if rdr != nil {
|
||||
data = make([]byte, 4+mc.maxWriteSize)
|
||||
|
||||
if cl, ok := rdr.(io.Closer); ok {
|
||||
defer deferredClose(&err, cl)
|
||||
}
|
||||
} else {
|
||||
err = fmt.Errorf("Reader '%s' is <nil>", name)
|
||||
}
|
||||
} else {
|
||||
err = fmt.Errorf("Reader '%s' is not registered", name)
|
||||
}
|
||||
} else { // File
|
||||
name = strings.Trim(name, `"`)
|
||||
fileRegisterLock.RLock()
|
||||
fr := fileRegister[name]
|
||||
fileRegisterLock.RUnlock()
|
||||
if mc.cfg.allowAllFiles || fr {
|
||||
var file *os.File
|
||||
var fi os.FileInfo
|
||||
|
||||
if file, err = os.Open(name); err == nil {
|
||||
defer deferredClose(&err, file)
|
||||
|
||||
// get file size
|
||||
if fi, err = file.Stat(); err == nil {
|
||||
rdr = file
|
||||
if fileSize := int(fi.Size()); fileSize <= mc.maxWriteSize {
|
||||
data = make([]byte, 4+fileSize)
|
||||
} else if fileSize <= mc.maxPacketAllowed {
|
||||
data = make([]byte, 4+mc.maxWriteSize)
|
||||
} else {
|
||||
err = fmt.Errorf("Local File '%s' too large: Size: %d, Max: %d", name, fileSize, mc.maxPacketAllowed)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err = fmt.Errorf("Local File '%s' is not registered. Use the DSN parameter 'allowAllFiles=true' to allow all files", name)
|
||||
}
|
||||
}
|
||||
|
||||
// send content packets
|
||||
if err == nil {
|
||||
var n int
|
||||
for err == nil {
|
||||
n, err = rdr.Read(data[4:])
|
||||
if n > 0 {
|
||||
if ioErr := mc.writePacket(data[:4+n]); ioErr != nil {
|
||||
return ioErr
|
||||
}
|
||||
}
|
||||
}
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
|
||||
// send empty packet (termination)
|
||||
if data == nil {
|
||||
data = make([]byte, 4)
|
||||
}
|
||||
if ioErr := mc.writePacket(data[:4]); ioErr != nil {
|
||||
return ioErr
|
||||
}
|
||||
|
||||
// read OK packet
|
||||
if err == nil {
|
||||
return mc.readResultOK()
|
||||
} else {
|
||||
mc.readPacket()
|
||||
}
|
||||
return err
|
||||
}
|
1182
vendor/github.com/go-sql-driver/mysql/packets.go
generated
vendored
1182
vendor/github.com/go-sql-driver/mysql/packets.go
generated
vendored
File diff suppressed because it is too large
Load Diff
22
vendor/github.com/go-sql-driver/mysql/result.go
generated
vendored
22
vendor/github.com/go-sql-driver/mysql/result.go
generated
vendored
@ -1,22 +0,0 @@
|
||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package mysql
|
||||
|
||||
type mysqlResult struct {
|
||||
affectedRows int64
|
||||
insertId int64
|
||||
}
|
||||
|
||||
func (res *mysqlResult) LastInsertId() (int64, error) {
|
||||
return res.insertId, nil
|
||||
}
|
||||
|
||||
func (res *mysqlResult) RowsAffected() (int64, error) {
|
||||
return res.affectedRows, nil
|
||||
}
|
106
vendor/github.com/go-sql-driver/mysql/rows.go
generated
vendored
106
vendor/github.com/go-sql-driver/mysql/rows.go
generated
vendored
@ -1,106 +0,0 @@
|
||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"io"
|
||||
)
|
||||
|
||||
type mysqlField struct {
|
||||
tableName string
|
||||
name string
|
||||
flags fieldFlag
|
||||
fieldType byte
|
||||
decimals byte
|
||||
}
|
||||
|
||||
type mysqlRows struct {
|
||||
mc *mysqlConn
|
||||
columns []mysqlField
|
||||
}
|
||||
|
||||
type binaryRows struct {
|
||||
mysqlRows
|
||||
}
|
||||
|
||||
type textRows struct {
|
||||
mysqlRows
|
||||
}
|
||||
|
||||
type emptyRows struct{}
|
||||
|
||||
func (rows *mysqlRows) Columns() []string {
|
||||
columns := make([]string, len(rows.columns))
|
||||
if rows.mc.cfg.columnsWithAlias {
|
||||
for i := range columns {
|
||||
if tableName := rows.columns[i].tableName; len(tableName) > 0 {
|
||||
columns[i] = tableName + "." + rows.columns[i].name
|
||||
} else {
|
||||
columns[i] = rows.columns[i].name
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for i := range columns {
|
||||
columns[i] = rows.columns[i].name
|
||||
}
|
||||
}
|
||||
return columns
|
||||
}
|
||||
|
||||
func (rows *mysqlRows) Close() error {
|
||||
mc := rows.mc
|
||||
if mc == nil {
|
||||
return nil
|
||||
}
|
||||
if mc.netConn == nil {
|
||||
return ErrInvalidConn
|
||||
}
|
||||
|
||||
// Remove unread packets from stream
|
||||
err := mc.readUntilEOF()
|
||||
rows.mc = nil
|
||||
return err
|
||||
}
|
||||
|
||||
func (rows *binaryRows) Next(dest []driver.Value) error {
|
||||
if mc := rows.mc; mc != nil {
|
||||
if mc.netConn == nil {
|
||||
return ErrInvalidConn
|
||||
}
|
||||
|
||||
// Fetch next row from stream
|
||||
return rows.readRow(dest)
|
||||
}
|
||||
return io.EOF
|
||||
}
|
||||
|
||||
func (rows *textRows) Next(dest []driver.Value) error {
|
||||
if mc := rows.mc; mc != nil {
|
||||
if mc.netConn == nil {
|
||||
return ErrInvalidConn
|
||||
}
|
||||
|
||||
// Fetch next row from stream
|
||||
return rows.readRow(dest)
|
||||
}
|
||||
return io.EOF
|
||||
}
|
||||
|
||||
func (rows emptyRows) Columns() []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rows emptyRows) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rows emptyRows) Next(dest []driver.Value) error {
|
||||
return io.EOF
|
||||
}
|
150
vendor/github.com/go-sql-driver/mysql/statement.go
generated
vendored
150
vendor/github.com/go-sql-driver/mysql/statement.go
generated
vendored
@ -1,150 +0,0 @@
|
||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type mysqlStmt struct {
|
||||
mc *mysqlConn
|
||||
id uint32
|
||||
paramCount int
|
||||
columns []mysqlField // cached from the first query
|
||||
}
|
||||
|
||||
func (stmt *mysqlStmt) Close() error {
|
||||
if stmt.mc == nil || stmt.mc.netConn == nil {
|
||||
errLog.Print(ErrInvalidConn)
|
||||
return driver.ErrBadConn
|
||||
}
|
||||
|
||||
err := stmt.mc.writeCommandPacketUint32(comStmtClose, stmt.id)
|
||||
stmt.mc = nil
|
||||
return err
|
||||
}
|
||||
|
||||
func (stmt *mysqlStmt) NumInput() int {
|
||||
return stmt.paramCount
|
||||
}
|
||||
|
||||
func (stmt *mysqlStmt) ColumnConverter(idx int) driver.ValueConverter {
|
||||
return converter{}
|
||||
}
|
||||
|
||||
func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) {
|
||||
if stmt.mc.netConn == nil {
|
||||
errLog.Print(ErrInvalidConn)
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
// Send command
|
||||
err := stmt.writeExecutePacket(args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mc := stmt.mc
|
||||
|
||||
mc.affectedRows = 0
|
||||
mc.insertId = 0
|
||||
|
||||
// Read Result
|
||||
resLen, err := mc.readResultSetHeaderPacket()
|
||||
if err == nil {
|
||||
if resLen > 0 {
|
||||
// Columns
|
||||
err = mc.readUntilEOF()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Rows
|
||||
err = mc.readUntilEOF()
|
||||
}
|
||||
if err == nil {
|
||||
return &mysqlResult{
|
||||
affectedRows: int64(mc.affectedRows),
|
||||
insertId: int64(mc.insertId),
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) {
|
||||
if stmt.mc.netConn == nil {
|
||||
errLog.Print(ErrInvalidConn)
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
// Send command
|
||||
err := stmt.writeExecutePacket(args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mc := stmt.mc
|
||||
|
||||
// Read Result
|
||||
resLen, err := mc.readResultSetHeaderPacket()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rows := new(binaryRows)
|
||||
rows.mc = mc
|
||||
|
||||
if resLen > 0 {
|
||||
// Columns
|
||||
// If not cached, read them and cache them
|
||||
if stmt.columns == nil {
|
||||
rows.columns, err = mc.readColumns(resLen)
|
||||
stmt.columns = rows.columns
|
||||
} else {
|
||||
rows.columns = stmt.columns
|
||||
err = mc.readUntilEOF()
|
||||
}
|
||||
}
|
||||
|
||||
return rows, err
|
||||
}
|
||||
|
||||
type converter struct{}
|
||||
|
||||
func (c converter) ConvertValue(v interface{}) (driver.Value, error) {
|
||||
if driver.IsValue(v) {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
rv := reflect.ValueOf(v)
|
||||
switch rv.Kind() {
|
||||
case reflect.Ptr:
|
||||
// indirect pointers
|
||||
if rv.IsNil() {
|
||||
return nil, nil
|
||||
}
|
||||
return c.ConvertValue(rv.Elem().Interface())
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return rv.Int(), nil
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32:
|
||||
return int64(rv.Uint()), nil
|
||||
case reflect.Uint64:
|
||||
u64 := rv.Uint()
|
||||
if u64 >= 1<<63 {
|
||||
return strconv.FormatUint(u64, 10), nil
|
||||
}
|
||||
return int64(u64), nil
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return rv.Float(), nil
|
||||
}
|
||||
return nil, fmt.Errorf("unsupported type %T, a %s", v, rv.Kind())
|
||||
}
|
31
vendor/github.com/go-sql-driver/mysql/transaction.go
generated
vendored
31
vendor/github.com/go-sql-driver/mysql/transaction.go
generated
vendored
@ -1,31 +0,0 @@
|
||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package mysql
|
||||
|
||||
type mysqlTx struct {
|
||||
mc *mysqlConn
|
||||
}
|
||||
|
||||
func (tx *mysqlTx) Commit() (err error) {
|
||||
if tx.mc == nil || tx.mc.netConn == nil {
|
||||
return ErrInvalidConn
|
||||
}
|
||||
err = tx.mc.exec("COMMIT")
|
||||
tx.mc = nil
|
||||
return
|
||||
}
|
||||
|
||||
func (tx *mysqlTx) Rollback() (err error) {
|
||||
if tx.mc == nil || tx.mc.netConn == nil {
|
||||
return ErrInvalidConn
|
||||
}
|
||||
err = tx.mc.exec("ROLLBACK")
|
||||
tx.mc = nil
|
||||
return
|
||||
}
|
973
vendor/github.com/go-sql-driver/mysql/utils.go
generated
vendored
973
vendor/github.com/go-sql-driver/mysql/utils.go
generated
vendored
@ -1,973 +0,0 @@
|
||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"crypto/tls"
|
||||
"database/sql/driver"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
tlsConfigRegister map[string]*tls.Config // Register for custom tls.Configs
|
||||
|
||||
errInvalidDSNUnescaped = errors.New("Invalid DSN: Did you forget to escape a param value?")
|
||||
errInvalidDSNAddr = errors.New("Invalid DSN: Network Address not terminated (missing closing brace)")
|
||||
errInvalidDSNNoSlash = errors.New("Invalid DSN: Missing the slash separating the database name")
|
||||
errInvalidDSNUnsafeCollation = errors.New("Invalid DSN: interpolateParams can be used with ascii, latin1, utf8 and utf8mb4 charset")
|
||||
)
|
||||
|
||||
func init() {
|
||||
tlsConfigRegister = make(map[string]*tls.Config)
|
||||
}
|
||||
|
||||
// RegisterTLSConfig registers a custom tls.Config to be used with sql.Open.
|
||||
// Use the key as a value in the DSN where tls=value.
|
||||
//
|
||||
// rootCertPool := x509.NewCertPool()
|
||||
// pem, err := ioutil.ReadFile("/path/ca-cert.pem")
|
||||
// if err != nil {
|
||||
// log.Fatal(err)
|
||||
// }
|
||||
// if ok := rootCertPool.AppendCertsFromPEM(pem); !ok {
|
||||
// log.Fatal("Failed to append PEM.")
|
||||
// }
|
||||
// clientCert := make([]tls.Certificate, 0, 1)
|
||||
// certs, err := tls.LoadX509KeyPair("/path/client-cert.pem", "/path/client-key.pem")
|
||||
// if err != nil {
|
||||
// log.Fatal(err)
|
||||
// }
|
||||
// clientCert = append(clientCert, certs)
|
||||
// mysql.RegisterTLSConfig("custom", &tls.Config{
|
||||
// RootCAs: rootCertPool,
|
||||
// Certificates: clientCert,
|
||||
// })
|
||||
// db, err := sql.Open("mysql", "user@tcp(localhost:3306)/test?tls=custom")
|
||||
//
|
||||
func RegisterTLSConfig(key string, config *tls.Config) error {
|
||||
if _, isBool := readBool(key); isBool || strings.ToLower(key) == "skip-verify" {
|
||||
return fmt.Errorf("Key '%s' is reserved", key)
|
||||
}
|
||||
|
||||
tlsConfigRegister[key] = config
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeregisterTLSConfig removes the tls.Config associated with key.
|
||||
func DeregisterTLSConfig(key string) {
|
||||
delete(tlsConfigRegister, key)
|
||||
}
|
||||
|
||||
// parseDSN parses the DSN string to a config
|
||||
func parseDSN(dsn string) (cfg *config, err error) {
|
||||
// New config with some default values
|
||||
cfg = &config{
|
||||
loc: time.UTC,
|
||||
collation: defaultCollation,
|
||||
}
|
||||
|
||||
// [user[:password]@][net[(addr)]]/dbname[?param1=value1¶mN=valueN]
|
||||
// Find the last '/' (since the password or the net addr might contain a '/')
|
||||
foundSlash := false
|
||||
for i := len(dsn) - 1; i >= 0; i-- {
|
||||
if dsn[i] == '/' {
|
||||
foundSlash = true
|
||||
var j, k int
|
||||
|
||||
// left part is empty if i <= 0
|
||||
if i > 0 {
|
||||
// [username[:password]@][protocol[(address)]]
|
||||
// Find the last '@' in dsn[:i]
|
||||
for j = i; j >= 0; j-- {
|
||||
if dsn[j] == '@' {
|
||||
// username[:password]
|
||||
// Find the first ':' in dsn[:j]
|
||||
for k = 0; k < j; k++ {
|
||||
if dsn[k] == ':' {
|
||||
cfg.passwd = dsn[k+1 : j]
|
||||
break
|
||||
}
|
||||
}
|
||||
cfg.user = dsn[:k]
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// [protocol[(address)]]
|
||||
// Find the first '(' in dsn[j+1:i]
|
||||
for k = j + 1; k < i; k++ {
|
||||
if dsn[k] == '(' {
|
||||
// dsn[i-1] must be == ')' if an address is specified
|
||||
if dsn[i-1] != ')' {
|
||||
if strings.ContainsRune(dsn[k+1:i], ')') {
|
||||
return nil, errInvalidDSNUnescaped
|
||||
}
|
||||
return nil, errInvalidDSNAddr
|
||||
}
|
||||
cfg.addr = dsn[k+1 : i-1]
|
||||
break
|
||||
}
|
||||
}
|
||||
cfg.net = dsn[j+1 : k]
|
||||
}
|
||||
|
||||
// dbname[?param1=value1&...¶mN=valueN]
|
||||
// Find the first '?' in dsn[i+1:]
|
||||
for j = i + 1; j < len(dsn); j++ {
|
||||
if dsn[j] == '?' {
|
||||
if err = parseDSNParams(cfg, dsn[j+1:]); err != nil {
|
||||
return
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
cfg.dbname = dsn[i+1 : j]
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !foundSlash && len(dsn) > 0 {
|
||||
return nil, errInvalidDSNNoSlash
|
||||
}
|
||||
|
||||
if cfg.interpolateParams && unsafeCollations[cfg.collation] {
|
||||
return nil, errInvalidDSNUnsafeCollation
|
||||
}
|
||||
|
||||
// Set default network if empty
|
||||
if cfg.net == "" {
|
||||
cfg.net = "tcp"
|
||||
}
|
||||
|
||||
// Set default address if empty
|
||||
if cfg.addr == "" {
|
||||
switch cfg.net {
|
||||
case "tcp":
|
||||
cfg.addr = "127.0.0.1:3306"
|
||||
case "unix":
|
||||
cfg.addr = "/tmp/mysql.sock"
|
||||
default:
|
||||
return nil, errors.New("Default addr for network '" + cfg.net + "' unknown")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// parseDSNParams parses the DSN "query string"
|
||||
// Values must be url.QueryEscape'ed
|
||||
func parseDSNParams(cfg *config, params string) (err error) {
|
||||
for _, v := range strings.Split(params, "&") {
|
||||
param := strings.SplitN(v, "=", 2)
|
||||
if len(param) != 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
// cfg params
|
||||
switch value := param[1]; param[0] {
|
||||
|
||||
// Enable client side placeholder substitution
|
||||
case "interpolateParams":
|
||||
var isBool bool
|
||||
cfg.interpolateParams, isBool = readBool(value)
|
||||
if !isBool {
|
||||
return fmt.Errorf("Invalid Bool value: %s", value)
|
||||
}
|
||||
|
||||
// Disable INFILE whitelist / enable all files
|
||||
case "allowAllFiles":
|
||||
var isBool bool
|
||||
cfg.allowAllFiles, isBool = readBool(value)
|
||||
if !isBool {
|
||||
return fmt.Errorf("Invalid Bool value: %s", value)
|
||||
}
|
||||
|
||||
// Use cleartext authentication mode (MySQL 5.5.10+)
|
||||
case "allowCleartextPasswords":
|
||||
var isBool bool
|
||||
cfg.allowCleartextPasswords, isBool = readBool(value)
|
||||
if !isBool {
|
||||
return fmt.Errorf("Invalid Bool value: %s", value)
|
||||
}
|
||||
|
||||
// Use old authentication mode (pre MySQL 4.1)
|
||||
case "allowOldPasswords":
|
||||
var isBool bool
|
||||
cfg.allowOldPasswords, isBool = readBool(value)
|
||||
if !isBool {
|
||||
return fmt.Errorf("Invalid Bool value: %s", value)
|
||||
}
|
||||
|
||||
// Switch "rowsAffected" mode
|
||||
case "clientFoundRows":
|
||||
var isBool bool
|
||||
cfg.clientFoundRows, isBool = readBool(value)
|
||||
if !isBool {
|
||||
return fmt.Errorf("Invalid Bool value: %s", value)
|
||||
}
|
||||
|
||||
// Collation
|
||||
case "collation":
|
||||
collation, ok := collations[value]
|
||||
if !ok {
|
||||
// Note possibility for false negatives:
|
||||
// could be triggered although the collation is valid if the
|
||||
// collations map does not contain entries the server supports.
|
||||
err = errors.New("unknown collation")
|
||||
return
|
||||
}
|
||||
cfg.collation = collation
|
||||
break
|
||||
|
||||
case "columnsWithAlias":
|
||||
var isBool bool
|
||||
cfg.columnsWithAlias, isBool = readBool(value)
|
||||
if !isBool {
|
||||
return fmt.Errorf("Invalid Bool value: %s", value)
|
||||
}
|
||||
|
||||
// Time Location
|
||||
case "loc":
|
||||
if value, err = url.QueryUnescape(value); err != nil {
|
||||
return
|
||||
}
|
||||
cfg.loc, err = time.LoadLocation(value)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Dial Timeout
|
||||
case "timeout":
|
||||
cfg.timeout, err = time.ParseDuration(value)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// TLS-Encryption
|
||||
case "tls":
|
||||
boolValue, isBool := readBool(value)
|
||||
if isBool {
|
||||
if boolValue {
|
||||
cfg.tls = &tls.Config{}
|
||||
}
|
||||
} else {
|
||||
if strings.ToLower(value) == "skip-verify" {
|
||||
cfg.tls = &tls.Config{InsecureSkipVerify: true}
|
||||
} else if tlsConfig, ok := tlsConfigRegister[value]; ok {
|
||||
if len(tlsConfig.ServerName) == 0 && !tlsConfig.InsecureSkipVerify {
|
||||
host, _, err := net.SplitHostPort(cfg.addr)
|
||||
if err == nil {
|
||||
tlsConfig.ServerName = host
|
||||
}
|
||||
}
|
||||
|
||||
cfg.tls = tlsConfig
|
||||
} else {
|
||||
return fmt.Errorf("Invalid value / unknown config name: %s", value)
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
// lazy init
|
||||
if cfg.params == nil {
|
||||
cfg.params = make(map[string]string)
|
||||
}
|
||||
|
||||
if cfg.params[param[0]], err = url.QueryUnescape(value); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Returns the bool value of the input.
|
||||
// The 2nd return value indicates if the input was a valid bool value
|
||||
func readBool(input string) (value bool, valid bool) {
|
||||
switch input {
|
||||
case "1", "true", "TRUE", "True":
|
||||
return true, true
|
||||
case "0", "false", "FALSE", "False":
|
||||
return false, true
|
||||
}
|
||||
|
||||
// Not a valid bool value
|
||||
return
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
* Authentication *
|
||||
******************************************************************************/
|
||||
|
||||
// Encrypt password using 4.1+ method
|
||||
func scramblePassword(scramble, password []byte) []byte {
|
||||
if len(password) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// stage1Hash = SHA1(password)
|
||||
crypt := sha1.New()
|
||||
crypt.Write(password)
|
||||
stage1 := crypt.Sum(nil)
|
||||
|
||||
// scrambleHash = SHA1(scramble + SHA1(stage1Hash))
|
||||
// inner Hash
|
||||
crypt.Reset()
|
||||
crypt.Write(stage1)
|
||||
hash := crypt.Sum(nil)
|
||||
|
||||
// outer Hash
|
||||
crypt.Reset()
|
||||
crypt.Write(scramble)
|
||||
crypt.Write(hash)
|
||||
scramble = crypt.Sum(nil)
|
||||
|
||||
// token = scrambleHash XOR stage1Hash
|
||||
for i := range scramble {
|
||||
scramble[i] ^= stage1[i]
|
||||
}
|
||||
return scramble
|
||||
}
|
||||
|
||||
// Encrypt password using pre 4.1 (old password) method
|
||||
// https://github.com/atcurtis/mariadb/blob/master/mysys/my_rnd.c
|
||||
type myRnd struct {
|
||||
seed1, seed2 uint32
|
||||
}
|
||||
|
||||
const myRndMaxVal = 0x3FFFFFFF
|
||||
|
||||
// Pseudo random number generator
|
||||
func newMyRnd(seed1, seed2 uint32) *myRnd {
|
||||
return &myRnd{
|
||||
seed1: seed1 % myRndMaxVal,
|
||||
seed2: seed2 % myRndMaxVal,
|
||||
}
|
||||
}
|
||||
|
||||
// Tested to be equivalent to MariaDB's floating point variant
|
||||
// http://play.golang.org/p/QHvhd4qved
|
||||
// http://play.golang.org/p/RG0q4ElWDx
|
||||
func (r *myRnd) NextByte() byte {
|
||||
r.seed1 = (r.seed1*3 + r.seed2) % myRndMaxVal
|
||||
r.seed2 = (r.seed1 + r.seed2 + 33) % myRndMaxVal
|
||||
|
||||
return byte(uint64(r.seed1) * 31 / myRndMaxVal)
|
||||
}
|
||||
|
||||
// Generate binary hash from byte string using insecure pre 4.1 method
|
||||
func pwHash(password []byte) (result [2]uint32) {
|
||||
var add uint32 = 7
|
||||
var tmp uint32
|
||||
|
||||
result[0] = 1345345333
|
||||
result[1] = 0x12345671
|
||||
|
||||
for _, c := range password {
|
||||
// skip spaces and tabs in password
|
||||
if c == ' ' || c == '\t' {
|
||||
continue
|
||||
}
|
||||
|
||||
tmp = uint32(c)
|
||||
result[0] ^= (((result[0] & 63) + add) * tmp) + (result[0] << 8)
|
||||
result[1] += (result[1] << 8) ^ result[0]
|
||||
add += tmp
|
||||
}
|
||||
|
||||
// Remove sign bit (1<<31)-1)
|
||||
result[0] &= 0x7FFFFFFF
|
||||
result[1] &= 0x7FFFFFFF
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Encrypt password using insecure pre 4.1 method
|
||||
func scrambleOldPassword(scramble, password []byte) []byte {
|
||||
if len(password) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
scramble = scramble[:8]
|
||||
|
||||
hashPw := pwHash(password)
|
||||
hashSc := pwHash(scramble)
|
||||
|
||||
r := newMyRnd(hashPw[0]^hashSc[0], hashPw[1]^hashSc[1])
|
||||
|
||||
var out [8]byte
|
||||
for i := range out {
|
||||
out[i] = r.NextByte() + 64
|
||||
}
|
||||
|
||||
mask := r.NextByte()
|
||||
for i := range out {
|
||||
out[i] ^= mask
|
||||
}
|
||||
|
||||
return out[:]
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
* Time related utils *
|
||||
******************************************************************************/
|
||||
|
||||
// NullTime represents a time.Time that may be NULL.
|
||||
// NullTime implements the Scanner interface so
|
||||
// it can be used as a scan destination:
|
||||
//
|
||||
// var nt NullTime
|
||||
// err := db.QueryRow("SELECT time FROM foo WHERE id=?", id).Scan(&nt)
|
||||
// ...
|
||||
// if nt.Valid {
|
||||
// // use nt.Time
|
||||
// } else {
|
||||
// // NULL value
|
||||
// }
|
||||
//
|
||||
// This NullTime implementation is not driver-specific
|
||||
type NullTime struct {
|
||||
Time time.Time
|
||||
Valid bool // Valid is true if Time is not NULL
|
||||
}
|
||||
|
||||
// Scan implements the Scanner interface.
|
||||
// The value type must be time.Time or string / []byte (formatted time-string),
|
||||
// otherwise Scan fails.
|
||||
func (nt *NullTime) Scan(value interface{}) (err error) {
|
||||
if value == nil {
|
||||
nt.Time, nt.Valid = time.Time{}, false
|
||||
return
|
||||
}
|
||||
|
||||
switch v := value.(type) {
|
||||
case time.Time:
|
||||
nt.Time, nt.Valid = v, true
|
||||
return
|
||||
case []byte:
|
||||
nt.Time, err = parseDateTime(string(v), time.UTC)
|
||||
nt.Valid = (err == nil)
|
||||
return
|
||||
case string:
|
||||
nt.Time, err = parseDateTime(v, time.UTC)
|
||||
nt.Valid = (err == nil)
|
||||
return
|
||||
}
|
||||
|
||||
nt.Valid = false
|
||||
return fmt.Errorf("Can't convert %T to time.Time", value)
|
||||
}
|
||||
|
||||
// Value implements the driver Valuer interface.
|
||||
func (nt NullTime) Value() (driver.Value, error) {
|
||||
if !nt.Valid {
|
||||
return nil, nil
|
||||
}
|
||||
return nt.Time, nil
|
||||
}
|
||||
|
||||
func parseDateTime(str string, loc *time.Location) (t time.Time, err error) {
|
||||
base := "0000-00-00 00:00:00.0000000"
|
||||
switch len(str) {
|
||||
case 10, 19, 21, 22, 23, 24, 25, 26: // up to "YYYY-MM-DD HH:MM:SS.MMMMMM"
|
||||
if str == base[:len(str)] {
|
||||
return
|
||||
}
|
||||
t, err = time.Parse(timeFormat[:len(str)], str)
|
||||
default:
|
||||
err = fmt.Errorf("Invalid Time-String: %s", str)
|
||||
return
|
||||
}
|
||||
|
||||
// Adjust location
|
||||
if err == nil && loc != time.UTC {
|
||||
y, mo, d := t.Date()
|
||||
h, mi, s := t.Clock()
|
||||
t, err = time.Date(y, mo, d, h, mi, s, t.Nanosecond(), loc), nil
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func parseBinaryDateTime(num uint64, data []byte, loc *time.Location) (driver.Value, error) {
|
||||
switch num {
|
||||
case 0:
|
||||
return time.Time{}, nil
|
||||
case 4:
|
||||
return time.Date(
|
||||
int(binary.LittleEndian.Uint16(data[:2])), // year
|
||||
time.Month(data[2]), // month
|
||||
int(data[3]), // day
|
||||
0, 0, 0, 0,
|
||||
loc,
|
||||
), nil
|
||||
case 7:
|
||||
return time.Date(
|
||||
int(binary.LittleEndian.Uint16(data[:2])), // year
|
||||
time.Month(data[2]), // month
|
||||
int(data[3]), // day
|
||||
int(data[4]), // hour
|
||||
int(data[5]), // minutes
|
||||
int(data[6]), // seconds
|
||||
0,
|
||||
loc,
|
||||
), nil
|
||||
case 11:
|
||||
return time.Date(
|
||||
int(binary.LittleEndian.Uint16(data[:2])), // year
|
||||
time.Month(data[2]), // month
|
||||
int(data[3]), // day
|
||||
int(data[4]), // hour
|
||||
int(data[5]), // minutes
|
||||
int(data[6]), // seconds
|
||||
int(binary.LittleEndian.Uint32(data[7:11]))*1000, // nanoseconds
|
||||
loc,
|
||||
), nil
|
||||
}
|
||||
return nil, fmt.Errorf("Invalid DATETIME-packet length %d", num)
|
||||
}
|
||||
|
||||
// zeroDateTime is used in formatBinaryDateTime to avoid an allocation
|
||||
// if the DATE or DATETIME has the zero value.
|
||||
// It must never be changed.
|
||||
// The current behavior depends on database/sql copying the result.
|
||||
var zeroDateTime = []byte("0000-00-00 00:00:00.000000")
|
||||
|
||||
const digits01 = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789"
|
||||
const digits10 = "0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999"
|
||||
|
||||
func formatBinaryDateTime(src []byte, length uint8, justTime bool) (driver.Value, error) {
|
||||
// length expects the deterministic length of the zero value,
|
||||
// negative time and 100+ hours are automatically added if needed
|
||||
if len(src) == 0 {
|
||||
if justTime {
|
||||
return zeroDateTime[11 : 11+length], nil
|
||||
}
|
||||
return zeroDateTime[:length], nil
|
||||
}
|
||||
var dst []byte // return value
|
||||
var pt, p1, p2, p3 byte // current digit pair
|
||||
var zOffs byte // offset of value in zeroDateTime
|
||||
if justTime {
|
||||
switch length {
|
||||
case
|
||||
8, // time (can be up to 10 when negative and 100+ hours)
|
||||
10, 11, 12, 13, 14, 15: // time with fractional seconds
|
||||
default:
|
||||
return nil, fmt.Errorf("illegal TIME length %d", length)
|
||||
}
|
||||
switch len(src) {
|
||||
case 8, 12:
|
||||
default:
|
||||
return nil, fmt.Errorf("Invalid TIME-packet length %d", len(src))
|
||||
}
|
||||
// +2 to enable negative time and 100+ hours
|
||||
dst = make([]byte, 0, length+2)
|
||||
if src[0] == 1 {
|
||||
dst = append(dst, '-')
|
||||
}
|
||||
if src[1] != 0 {
|
||||
hour := uint16(src[1])*24 + uint16(src[5])
|
||||
pt = byte(hour / 100)
|
||||
p1 = byte(hour - 100*uint16(pt))
|
||||
dst = append(dst, digits01[pt])
|
||||
} else {
|
||||
p1 = src[5]
|
||||
}
|
||||
zOffs = 11
|
||||
src = src[6:]
|
||||
} else {
|
||||
switch length {
|
||||
case 10, 19, 21, 22, 23, 24, 25, 26:
|
||||
default:
|
||||
t := "DATE"
|
||||
if length > 10 {
|
||||
t += "TIME"
|
||||
}
|
||||
return nil, fmt.Errorf("illegal %s length %d", t, length)
|
||||
}
|
||||
switch len(src) {
|
||||
case 4, 7, 11:
|
||||
default:
|
||||
t := "DATE"
|
||||
if length > 10 {
|
||||
t += "TIME"
|
||||
}
|
||||
return nil, fmt.Errorf("illegal %s-packet length %d", t, len(src))
|
||||
}
|
||||
dst = make([]byte, 0, length)
|
||||
// start with the date
|
||||
year := binary.LittleEndian.Uint16(src[:2])
|
||||
pt = byte(year / 100)
|
||||
p1 = byte(year - 100*uint16(pt))
|
||||
p2, p3 = src[2], src[3]
|
||||
dst = append(dst,
|
||||
digits10[pt], digits01[pt],
|
||||
digits10[p1], digits01[p1], '-',
|
||||
digits10[p2], digits01[p2], '-',
|
||||
digits10[p3], digits01[p3],
|
||||
)
|
||||
if length == 10 {
|
||||
return dst, nil
|
||||
}
|
||||
if len(src) == 4 {
|
||||
return append(dst, zeroDateTime[10:length]...), nil
|
||||
}
|
||||
dst = append(dst, ' ')
|
||||
p1 = src[4] // hour
|
||||
src = src[5:]
|
||||
}
|
||||
// p1 is 2-digit hour, src is after hour
|
||||
p2, p3 = src[0], src[1]
|
||||
dst = append(dst,
|
||||
digits10[p1], digits01[p1], ':',
|
||||
digits10[p2], digits01[p2], ':',
|
||||
digits10[p3], digits01[p3],
|
||||
)
|
||||
if length <= byte(len(dst)) {
|
||||
return dst, nil
|
||||
}
|
||||
src = src[2:]
|
||||
if len(src) == 0 {
|
||||
return append(dst, zeroDateTime[19:zOffs+length]...), nil
|
||||
}
|
||||
microsecs := binary.LittleEndian.Uint32(src[:4])
|
||||
p1 = byte(microsecs / 10000)
|
||||
microsecs -= 10000 * uint32(p1)
|
||||
p2 = byte(microsecs / 100)
|
||||
microsecs -= 100 * uint32(p2)
|
||||
p3 = byte(microsecs)
|
||||
switch decimals := zOffs + length - 20; decimals {
|
||||
default:
|
||||
return append(dst, '.',
|
||||
digits10[p1], digits01[p1],
|
||||
digits10[p2], digits01[p2],
|
||||
digits10[p3], digits01[p3],
|
||||
), nil
|
||||
case 1:
|
||||
return append(dst, '.',
|
||||
digits10[p1],
|
||||
), nil
|
||||
case 2:
|
||||
return append(dst, '.',
|
||||
digits10[p1], digits01[p1],
|
||||
), nil
|
||||
case 3:
|
||||
return append(dst, '.',
|
||||
digits10[p1], digits01[p1],
|
||||
digits10[p2],
|
||||
), nil
|
||||
case 4:
|
||||
return append(dst, '.',
|
||||
digits10[p1], digits01[p1],
|
||||
digits10[p2], digits01[p2],
|
||||
), nil
|
||||
case 5:
|
||||
return append(dst, '.',
|
||||
digits10[p1], digits01[p1],
|
||||
digits10[p2], digits01[p2],
|
||||
digits10[p3],
|
||||
), nil
|
||||
}
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
* Convert from and to bytes *
|
||||
******************************************************************************/
|
||||
|
||||
func uint64ToBytes(n uint64) []byte {
|
||||
return []byte{
|
||||
byte(n),
|
||||
byte(n >> 8),
|
||||
byte(n >> 16),
|
||||
byte(n >> 24),
|
||||
byte(n >> 32),
|
||||
byte(n >> 40),
|
||||
byte(n >> 48),
|
||||
byte(n >> 56),
|
||||
}
|
||||
}
|
||||
|
||||
func uint64ToString(n uint64) []byte {
|
||||
var a [20]byte
|
||||
i := 20
|
||||
|
||||
// U+0030 = 0
|
||||
// ...
|
||||
// U+0039 = 9
|
||||
|
||||
var q uint64
|
||||
for n >= 10 {
|
||||
i--
|
||||
q = n / 10
|
||||
a[i] = uint8(n-q*10) + 0x30
|
||||
n = q
|
||||
}
|
||||
|
||||
i--
|
||||
a[i] = uint8(n) + 0x30
|
||||
|
||||
return a[i:]
|
||||
}
|
||||
|
||||
// treats string value as unsigned integer representation
|
||||
func stringToInt(b []byte) int {
|
||||
val := 0
|
||||
for i := range b {
|
||||
val *= 10
|
||||
val += int(b[i] - 0x30)
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// returns the string read as a bytes slice, wheter the value is NULL,
|
||||
// the number of bytes read and an error, in case the string is longer than
|
||||
// the input slice
|
||||
func readLengthEncodedString(b []byte) ([]byte, bool, int, error) {
|
||||
// Get length
|
||||
num, isNull, n := readLengthEncodedInteger(b)
|
||||
if num < 1 {
|
||||
return b[n:n], isNull, n, nil
|
||||
}
|
||||
|
||||
n += int(num)
|
||||
|
||||
// Check data length
|
||||
if len(b) >= n {
|
||||
return b[n-int(num) : n], false, n, nil
|
||||
}
|
||||
return nil, false, n, io.EOF
|
||||
}
|
||||
|
||||
// returns the number of bytes skipped and an error, in case the string is
|
||||
// longer than the input slice
|
||||
func skipLengthEncodedString(b []byte) (int, error) {
|
||||
// Get length
|
||||
num, _, n := readLengthEncodedInteger(b)
|
||||
if num < 1 {
|
||||
return n, nil
|
||||
}
|
||||
|
||||
n += int(num)
|
||||
|
||||
// Check data length
|
||||
if len(b) >= n {
|
||||
return n, nil
|
||||
}
|
||||
return n, io.EOF
|
||||
}
|
||||
|
||||
// returns the number read, whether the value is NULL and the number of bytes read
|
||||
func readLengthEncodedInteger(b []byte) (uint64, bool, int) {
|
||||
// See issue #349
|
||||
if len(b) == 0 {
|
||||
return 0, true, 1
|
||||
}
|
||||
switch b[0] {
|
||||
|
||||
// 251: NULL
|
||||
case 0xfb:
|
||||
return 0, true, 1
|
||||
|
||||
// 252: value of following 2
|
||||
case 0xfc:
|
||||
return uint64(b[1]) | uint64(b[2])<<8, false, 3
|
||||
|
||||
// 253: value of following 3
|
||||
case 0xfd:
|
||||
return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16, false, 4
|
||||
|
||||
// 254: value of following 8
|
||||
case 0xfe:
|
||||
return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16 |
|
||||
uint64(b[4])<<24 | uint64(b[5])<<32 | uint64(b[6])<<40 |
|
||||
uint64(b[7])<<48 | uint64(b[8])<<56,
|
||||
false, 9
|
||||
}
|
||||
|
||||
// 0-250: value of first byte
|
||||
return uint64(b[0]), false, 1
|
||||
}
|
||||
|
||||
// encodes a uint64 value and appends it to the given bytes slice
|
||||
func appendLengthEncodedInteger(b []byte, n uint64) []byte {
|
||||
switch {
|
||||
case n <= 250:
|
||||
return append(b, byte(n))
|
||||
|
||||
case n <= 0xffff:
|
||||
return append(b, 0xfc, byte(n), byte(n>>8))
|
||||
|
||||
case n <= 0xffffff:
|
||||
return append(b, 0xfd, byte(n), byte(n>>8), byte(n>>16))
|
||||
}
|
||||
return append(b, 0xfe, byte(n), byte(n>>8), byte(n>>16), byte(n>>24),
|
||||
byte(n>>32), byte(n>>40), byte(n>>48), byte(n>>56))
|
||||
}
|
||||
|
||||
// reserveBuffer checks cap(buf) and expand buffer to len(buf) + appendSize.
|
||||
// If cap(buf) is not enough, reallocate new buffer.
|
||||
func reserveBuffer(buf []byte, appendSize int) []byte {
|
||||
newSize := len(buf) + appendSize
|
||||
if cap(buf) < newSize {
|
||||
// Grow buffer exponentially
|
||||
newBuf := make([]byte, len(buf)*2+appendSize)
|
||||
copy(newBuf, buf)
|
||||
buf = newBuf
|
||||
}
|
||||
return buf[:newSize]
|
||||
}
|
||||
|
||||
// escapeBytesBackslash escapes []byte with backslashes (\)
|
||||
// This escapes the contents of a string (provided as []byte) by adding backslashes before special
|
||||
// characters, and turning others into specific escape sequences, such as
|
||||
// turning newlines into \n and null bytes into \0.
|
||||
// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L823-L932
|
||||
func escapeBytesBackslash(buf, v []byte) []byte {
|
||||
pos := len(buf)
|
||||
buf = reserveBuffer(buf, len(v)*2)
|
||||
|
||||
for _, c := range v {
|
||||
switch c {
|
||||
case '\x00':
|
||||
buf[pos] = '\\'
|
||||
buf[pos+1] = '0'
|
||||
pos += 2
|
||||
case '\n':
|
||||
buf[pos] = '\\'
|
||||
buf[pos+1] = 'n'
|
||||
pos += 2
|
||||
case '\r':
|
||||
buf[pos] = '\\'
|
||||
buf[pos+1] = 'r'
|
||||
pos += 2
|
||||
case '\x1a':
|
||||
buf[pos] = '\\'
|
||||
buf[pos+1] = 'Z'
|
||||
pos += 2
|
||||
case '\'':
|
||||
buf[pos] = '\\'
|
||||
buf[pos+1] = '\''
|
||||
pos += 2
|
||||
case '"':
|
||||
buf[pos] = '\\'
|
||||
buf[pos+1] = '"'
|
||||
pos += 2
|
||||
case '\\':
|
||||
buf[pos] = '\\'
|
||||
buf[pos+1] = '\\'
|
||||
pos += 2
|
||||
default:
|
||||
buf[pos] = c
|
||||
pos += 1
|
||||
}
|
||||
}
|
||||
|
||||
return buf[:pos]
|
||||
}
|
||||
|
||||
// escapeStringBackslash is similar to escapeBytesBackslash but for string.
|
||||
func escapeStringBackslash(buf []byte, v string) []byte {
|
||||
pos := len(buf)
|
||||
buf = reserveBuffer(buf, len(v)*2)
|
||||
|
||||
for i := 0; i < len(v); i++ {
|
||||
c := v[i]
|
||||
switch c {
|
||||
case '\x00':
|
||||
buf[pos] = '\\'
|
||||
buf[pos+1] = '0'
|
||||
pos += 2
|
||||
case '\n':
|
||||
buf[pos] = '\\'
|
||||
buf[pos+1] = 'n'
|
||||
pos += 2
|
||||
case '\r':
|
||||
buf[pos] = '\\'
|
||||
buf[pos+1] = 'r'
|
||||
pos += 2
|
||||
case '\x1a':
|
||||
buf[pos] = '\\'
|
||||
buf[pos+1] = 'Z'
|
||||
pos += 2
|
||||
case '\'':
|
||||
buf[pos] = '\\'
|
||||
buf[pos+1] = '\''
|
||||
pos += 2
|
||||
case '"':
|
||||
buf[pos] = '\\'
|
||||
buf[pos+1] = '"'
|
||||
pos += 2
|
||||
case '\\':
|
||||
buf[pos] = '\\'
|
||||
buf[pos+1] = '\\'
|
||||
pos += 2
|
||||
default:
|
||||
buf[pos] = c
|
||||
pos += 1
|
||||
}
|
||||
}
|
||||
|
||||
return buf[:pos]
|
||||
}
|
||||
|
||||
// escapeBytesQuotes escapes apostrophes in []byte by doubling them up.
|
||||
// This escapes the contents of a string by doubling up any apostrophes that
|
||||
// it contains. This is used when the NO_BACKSLASH_ESCAPES SQL_MODE is in
|
||||
// effect on the server.
|
||||
// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L963-L1038
|
||||
func escapeBytesQuotes(buf, v []byte) []byte {
|
||||
pos := len(buf)
|
||||
buf = reserveBuffer(buf, len(v)*2)
|
||||
|
||||
for _, c := range v {
|
||||
if c == '\'' {
|
||||
buf[pos] = '\''
|
||||
buf[pos+1] = '\''
|
||||
pos += 2
|
||||
} else {
|
||||
buf[pos] = c
|
||||
pos++
|
||||
}
|
||||
}
|
||||
|
||||
return buf[:pos]
|
||||
}
|
||||
|
||||
// escapeStringQuotes is similar to escapeBytesQuotes but for string.
|
||||
func escapeStringQuotes(buf []byte, v string) []byte {
|
||||
pos := len(buf)
|
||||
buf = reserveBuffer(buf, len(v)*2)
|
||||
|
||||
for i := 0; i < len(v); i++ {
|
||||
c := v[i]
|
||||
if c == '\'' {
|
||||
buf[pos] = '\''
|
||||
buf[pos+1] = '\''
|
||||
pos += 2
|
||||
} else {
|
||||
buf[pos] = c
|
||||
pos++
|
||||
}
|
||||
}
|
||||
|
||||
return buf[:pos]
|
||||
}
|
24
vendor/github.com/jmoiron/sqlx/.gitignore
generated
vendored
24
vendor/github.com/jmoiron/sqlx/.gitignore
generated
vendored
@ -1,24 +0,0 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
tags
|
||||
environ
|
23
vendor/github.com/jmoiron/sqlx/LICENSE
generated
vendored
23
vendor/github.com/jmoiron/sqlx/LICENSE
generated
vendored
@ -1,23 +0,0 @@
|
||||
Copyright (c) 2013, Jason Moiron
|
||||
|
||||
Permission is hereby granted, free of charge, to any person
|
||||
obtaining a copy of this software and associated documentation
|
||||
files (the "Software"), to deal in the Software without
|
||||
restriction, including without limitation the rights to use,
|
||||
copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the
|
||||
Software is furnished to do so, subject to the following
|
||||
conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
||||
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
|
||||
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
258
vendor/github.com/jmoiron/sqlx/README.md
generated
vendored
258
vendor/github.com/jmoiron/sqlx/README.md
generated
vendored
@ -1,258 +0,0 @@
|
||||
#sqlx
|
||||
|
||||
[![Build Status](https://drone.io/github.com/jmoiron/sqlx/status.png)](https://drone.io/github.com/jmoiron/sqlx/latest) [![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/jmoiron/sqlx) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/jmoiron/sqlx/master/LICENSE)
|
||||
|
||||
sqlx is a library which provides a set of extensions on go's standard
|
||||
`database/sql` library. The sqlx versions of `sql.DB`, `sql.TX`, `sql.Stmt`,
|
||||
et al. all leave the underlying interfaces untouched, so that their interfaces
|
||||
are a superset on the standard ones. This makes it relatively painless to
|
||||
integrate existing codebases using database/sql with sqlx.
|
||||
|
||||
Major additional concepts are:
|
||||
|
||||
* Marshal rows into structs (with embedded struct support), maps, and slices
|
||||
* Named parameter support including prepared statements
|
||||
* `Get` and `Select` to go quickly from query to struct/slice
|
||||
* `LoadFile` for executing statements from a file
|
||||
|
||||
There is now some [fairly comprehensive documentation](http://jmoiron.github.io/sqlx/) for sqlx.
|
||||
You can also read the usage below for a quick sample on how sqlx works, or check out the [API
|
||||
documentation on godoc](http://godoc.org/github.com/jmoiron/sqlx).
|
||||
|
||||
## Recent Changes
|
||||
|
||||
The ability to use basic types as Select and Get destinations was added. This
|
||||
is only valid when there is one column in the result set, and both functions
|
||||
return an error if this isn't the case. This allows for much simpler patterns
|
||||
of access for single column results:
|
||||
|
||||
```go
|
||||
var count int
|
||||
err := db.Get(&count, "SELECT count(*) FROM person;")
|
||||
|
||||
var names []string
|
||||
err := db.Select(&names, "SELECT name FROM person;")
|
||||
```
|
||||
|
||||
See the note on Scannability at the bottom of this README for some more info.
|
||||
|
||||
### Backwards Compatibility
|
||||
|
||||
There is no Go1-like promise of absolute stability, but I take the issue
|
||||
seriously and will maintain the library in a compatible state unless vital
|
||||
bugs prevent me from doing so. Since [#59](https://github.com/jmoiron/sqlx/issues/59) and [#60](https://github.com/jmoiron/sqlx/issues/60) necessitated
|
||||
breaking behavior, a wider API cleanup was done at the time of fixing.
|
||||
|
||||
## install
|
||||
|
||||
go get github.com/jmoiron/sqlx
|
||||
|
||||
## issues
|
||||
|
||||
Row headers can be ambiguous (`SELECT 1 AS a, 2 AS a`), and the result of
|
||||
`Columns()` can have duplicate names on queries like:
|
||||
|
||||
```sql
|
||||
SELECT a.id, a.name, b.id, b.name FROM foos AS a JOIN foos AS b ON a.parent = b.id;
|
||||
```
|
||||
|
||||
making a struct or map destination ambiguous. Use `AS` in your queries
|
||||
to give rows distinct names, `rows.Scan` to scan them manually, or
|
||||
`SliceScan` to get a slice of results.
|
||||
|
||||
## usage
|
||||
|
||||
Below is an example which shows some common use cases for sqlx. Check
|
||||
[sqlx_test.go](https://github.com/jmoiron/sqlx/blob/master/sqlx_test.go) for more
|
||||
usage.
|
||||
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
_ "github.com/lib/pq"
|
||||
"database/sql"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"log"
|
||||
)
|
||||
|
||||
var schema = `
|
||||
CREATE TABLE person (
|
||||
first_name text,
|
||||
last_name text,
|
||||
email text
|
||||
);
|
||||
|
||||
CREATE TABLE place (
|
||||
country text,
|
||||
city text NULL,
|
||||
telcode integer
|
||||
)`
|
||||
|
||||
type Person struct {
|
||||
FirstName string `db:"first_name"`
|
||||
LastName string `db:"last_name"`
|
||||
Email string
|
||||
}
|
||||
|
||||
type Place struct {
|
||||
Country string
|
||||
City sql.NullString
|
||||
TelCode int
|
||||
}
|
||||
|
||||
func main() {
|
||||
// this connects & tries a simple 'SELECT 1', panics on error
|
||||
// use sqlx.Open() for sql.Open() semantics
|
||||
db, err := sqlx.Connect("postgres", "user=foo dbname=bar sslmode=disable")
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
// exec the schema or fail; multi-statement Exec behavior varies between
|
||||
// database drivers; pq will exec them all, sqlite3 won't, ymmv
|
||||
db.MustExec(schema)
|
||||
|
||||
tx := db.MustBegin()
|
||||
tx.MustExec("INSERT INTO person (first_name, last_name, email) VALUES ($1, $2, $3)", "Jason", "Moiron", "jmoiron@jmoiron.net")
|
||||
tx.MustExec("INSERT INTO person (first_name, last_name, email) VALUES ($1, $2, $3)", "John", "Doe", "johndoeDNE@gmail.net")
|
||||
tx.MustExec("INSERT INTO place (country, city, telcode) VALUES ($1, $2, $3)", "United States", "New York", "1")
|
||||
tx.MustExec("INSERT INTO place (country, telcode) VALUES ($1, $2)", "Hong Kong", "852")
|
||||
tx.MustExec("INSERT INTO place (country, telcode) VALUES ($1, $2)", "Singapore", "65")
|
||||
// Named queries can use structs, so if you have an existing struct (i.e. person := &Person{}) that you have populated, you can pass it in as &person
|
||||
tx.NamedExec("INSERT INTO person (first_name, last_name, email) VALUES (:first_name, :last_name, :email)", &Person{"Jane", "Citizen", "jane.citzen@example.com"})
|
||||
tx.Commit()
|
||||
|
||||
// Query the database, storing results in a []Person (wrapped in []interface{})
|
||||
people := []Person{}
|
||||
db.Select(&people, "SELECT * FROM person ORDER BY first_name ASC")
|
||||
jason, john := people[0], people[1]
|
||||
|
||||
fmt.Printf("%#v\n%#v", jason, john)
|
||||
// Person{FirstName:"Jason", LastName:"Moiron", Email:"jmoiron@jmoiron.net"}
|
||||
// Person{FirstName:"John", LastName:"Doe", Email:"johndoeDNE@gmail.net"}
|
||||
|
||||
// You can also get a single result, a la QueryRow
|
||||
jason = Person{}
|
||||
err = db.Get(&jason, "SELECT * FROM person WHERE first_name=$1", "Jason")
|
||||
fmt.Printf("%#v\n", jason)
|
||||
// Person{FirstName:"Jason", LastName:"Moiron", Email:"jmoiron@jmoiron.net"}
|
||||
|
||||
// if you have null fields and use SELECT *, you must use sql.Null* in your struct
|
||||
places := []Place{}
|
||||
err = db.Select(&places, "SELECT * FROM place ORDER BY telcode ASC")
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
usa, singsing, honkers := places[0], places[1], places[2]
|
||||
|
||||
fmt.Printf("%#v\n%#v\n%#v\n", usa, singsing, honkers)
|
||||
// Place{Country:"United States", City:sql.NullString{String:"New York", Valid:true}, TelCode:1}
|
||||
// Place{Country:"Singapore", City:sql.NullString{String:"", Valid:false}, TelCode:65}
|
||||
// Place{Country:"Hong Kong", City:sql.NullString{String:"", Valid:false}, TelCode:852}
|
||||
|
||||
// Loop through rows using only one struct
|
||||
place := Place{}
|
||||
rows, err := db.Queryx("SELECT * FROM place")
|
||||
for rows.Next() {
|
||||
err := rows.StructScan(&place)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
fmt.Printf("%#v\n", place)
|
||||
}
|
||||
// Place{Country:"United States", City:sql.NullString{String:"New York", Valid:true}, TelCode:1}
|
||||
// Place{Country:"Hong Kong", City:sql.NullString{String:"", Valid:false}, TelCode:852}
|
||||
// Place{Country:"Singapore", City:sql.NullString{String:"", Valid:false}, TelCode:65}
|
||||
|
||||
// Named queries, using `:name` as the bindvar. Automatic bindvar support
|
||||
// which takes into account the dbtype based on the driverName on sqlx.Open/Connect
|
||||
_, err = db.NamedExec(`INSERT INTO person (first_name,last_name,email) VALUES (:first,:last,:email)`,
|
||||
map[string]interface{}{
|
||||
"first": "Bin",
|
||||
"last": "Smuth",
|
||||
"email": "bensmith@allblacks.nz",
|
||||
})
|
||||
|
||||
// Selects Mr. Smith from the database
|
||||
rows, err = db.NamedQuery(`SELECT * FROM person WHERE first_name=:fn`, map[string]interface{}{"fn": "Bin"})
|
||||
|
||||
// Named queries can also use structs. Their bind names follow the same rules
|
||||
// as the name -> db mapping, so struct fields are lowercased and the `db` tag
|
||||
// is taken into consideration.
|
||||
rows, err = db.NamedQuery(`SELECT * FROM person WHERE first_name=:first_name`, jason)
|
||||
}
|
||||
```
|
||||
|
||||
## Scannability
|
||||
|
||||
Get and Select are able to take base types, so the following is now possible:
|
||||
|
||||
```go
|
||||
var name string
|
||||
db.Get(&name, "SELECT first_name FROM person WHERE id=$1", 10)
|
||||
|
||||
var ids []int64
|
||||
db.Select(&ids, "SELECT id FROM person LIMIT 20;")
|
||||
```
|
||||
|
||||
This can get complicated with destination types which are structs, like `sql.NullString`. Because of this, straightforward rules for *scannability* had to be developed. Iff something is "Scannable", then it is used directly in `rows.Scan`; if it's not, then the standard sqlx struct rules apply.
|
||||
|
||||
Something is scannable if any of the following are true:
|
||||
|
||||
* It is not a struct, ie. `reflect.ValueOf(v).Kind() != reflect.Struct`
|
||||
* It implements the `sql.Scanner` interface
|
||||
* It has no exported fields (eg. `time.Time`)
|
||||
|
||||
## embedded structs
|
||||
|
||||
Scan targets obey Go attribute rules directly, including nested embedded structs. Older versions of sqlx would attempt to also descend into non-embedded structs, but this is no longer supported.
|
||||
|
||||
Go makes *accessing* '[ambiguous selectors](http://play.golang.org/p/MGRxdjLaUc)' a compile time error, defining structs with ambiguous selectors is legal. Sqlx will decide which field to use on a struct based on a breadth first search of the struct and any structs it embeds, as specified by the order of the fields as accessible by `reflect`, which generally means in source-order. This means that sqlx chooses the outer-most, top-most matching name for targets, even when the selector might technically be ambiguous.
|
||||
|
||||
## scan safety
|
||||
|
||||
By default, scanning into structs requires the structs to have fields for all of the
|
||||
columns in the query. This was done for a few reasons:
|
||||
|
||||
* A mistake in naming during development could lead you to believe that data is
|
||||
being written to a field when actually it can't be found and it is being dropped
|
||||
* This behavior mirrors the behavior of the Go compiler with respect to unused
|
||||
variables
|
||||
* Selecting more data than you need is wasteful (more data on the wire, more time
|
||||
marshalling, etc)
|
||||
|
||||
Unlike Marshallers in the stdlib, the programmer scanning an sql result into a struct
|
||||
will generally have a full understanding of what the underlying data model is *and*
|
||||
full control over the SQL statement.
|
||||
|
||||
Despite this, there are use cases where it's convenient to be able to ignore unknown
|
||||
columns. In most of these cases, you might be better off with `ScanSlice`, but where
|
||||
you want to still use structs, there is now the `Unsafe` method. Its usage is most
|
||||
simply shown in an example:
|
||||
|
||||
```go
|
||||
db, err := sqlx.Connect("postgres", "user=foo dbname=bar sslmode=disable")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
type Person {
|
||||
Name string
|
||||
}
|
||||
var p Person
|
||||
|
||||
// This fails, because there is no destination for location in Person
|
||||
err = db.Get(&p, "SELECT name, location FROM person LIMIT 1")
|
||||
|
||||
udb := db.Unsafe()
|
||||
|
||||
// This succeeds and just sets `Name` in the p struct
|
||||
err = udb.Get(&p, "SELECT name, location FROM person LIMIT 1")
|
||||
```
|
||||
|
||||
The `Unsafe` method is implemented on `Tx`, `DB`, and `Stmt`. When you use an unsafe
|
||||
`Tx` or `DB` to create a new `Tx` or `Stmt`, those inherit its lack of safety.
|
||||
|
84
vendor/github.com/jmoiron/sqlx/bind.go
generated
vendored
84
vendor/github.com/jmoiron/sqlx/bind.go
generated
vendored
@ -1,84 +0,0 @@
|
||||
package sqlx
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Bindvar types supported by Rebind, BindMap and BindStruct.
|
||||
const (
|
||||
UNKNOWN = iota
|
||||
QUESTION
|
||||
DOLLAR
|
||||
NAMED
|
||||
)
|
||||
|
||||
// BindType returns the bindtype for a given database given a drivername.
|
||||
func BindType(driverName string) int {
|
||||
switch driverName {
|
||||
case "postgres", "pgx":
|
||||
return DOLLAR
|
||||
case "mysql":
|
||||
return QUESTION
|
||||
case "sqlite3":
|
||||
return QUESTION
|
||||
case "oci8":
|
||||
return NAMED
|
||||
}
|
||||
return UNKNOWN
|
||||
}
|
||||
|
||||
// FIXME: this should be able to be tolerant of escaped ?'s in queries without
|
||||
// losing much speed, and should be to avoid confusion.
|
||||
|
||||
// FIXME: this is now produces the wrong results for oracle's NAMED bindtype
|
||||
|
||||
// Rebind a query from the default bindtype (QUESTION) to the target bindtype.
|
||||
func Rebind(bindType int, query string) string {
|
||||
if bindType != DOLLAR {
|
||||
return query
|
||||
}
|
||||
|
||||
qb := []byte(query)
|
||||
// Add space enough for 10 params before we have to allocate
|
||||
rqb := make([]byte, 0, len(qb)+10)
|
||||
j := 1
|
||||
for _, b := range qb {
|
||||
if b == '?' {
|
||||
rqb = append(rqb, '$')
|
||||
for _, b := range strconv.Itoa(j) {
|
||||
rqb = append(rqb, byte(b))
|
||||
}
|
||||
j++
|
||||
} else {
|
||||
rqb = append(rqb, b)
|
||||
}
|
||||
}
|
||||
return string(rqb)
|
||||
}
|
||||
|
||||
// Experimental implementation of Rebind which uses a bytes.Buffer. The code is
|
||||
// much simpler and should be more resistant to odd unicode, but it is twice as
|
||||
// slow. Kept here for benchmarking purposes and to possibly replace Rebind if
|
||||
// problems arise with its somewhat naive handling of unicode.
|
||||
|
||||
func rebindBuff(bindType int, query string) string {
|
||||
if bindType != DOLLAR {
|
||||
return query
|
||||
}
|
||||
|
||||
b := make([]byte, 0, len(query))
|
||||
rqb := bytes.NewBuffer(b)
|
||||
j := 1
|
||||
for _, r := range query {
|
||||
if r == '?' {
|
||||
rqb.WriteRune('$')
|
||||
rqb.WriteString(strconv.Itoa(j))
|
||||
j++
|
||||
} else {
|
||||
rqb.WriteRune(r)
|
||||
}
|
||||
}
|
||||
|
||||
return rqb.String()
|
||||
}
|
12
vendor/github.com/jmoiron/sqlx/doc.go
generated
vendored
12
vendor/github.com/jmoiron/sqlx/doc.go
generated
vendored
@ -1,12 +0,0 @@
|
||||
// Package sqlx provides general purpose extensions to database/sql.
|
||||
//
|
||||
// It is intended to seamlessly wrap database/sql and provide convenience
|
||||
// methods which are useful in the development of database driven applications.
|
||||
// None of the underlying database/sql methods are changed. Instead all extended
|
||||
// behavior is implemented through new methods defined on wrapper types.
|
||||
//
|
||||
// Additions include scanning into structs, named query support, rebinding
|
||||
// queries for different drivers, convenient shorthands for common error handling
|
||||
// and more.
|
||||
//
|
||||
package sqlx
|
321
vendor/github.com/jmoiron/sqlx/named.go
generated
vendored
321
vendor/github.com/jmoiron/sqlx/named.go
generated
vendored
@ -1,321 +0,0 @@
|
||||
package sqlx
|
||||
|
||||
// Named Query Support
|
||||
//
|
||||
// * BindMap - bind query bindvars to map/struct args
|
||||
// * NamedExec, NamedQuery - named query w/ struct or map
|
||||
// * NamedStmt - a pre-compiled named query which is a prepared statement
|
||||
//
|
||||
// Internal Interfaces:
|
||||
//
|
||||
// * compileNamedQuery - rebind a named query, returning a query and list of names
|
||||
// * bindArgs, bindMapArgs, bindAnyArgs - given a list of names, return an arglist
|
||||
//
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"unicode"
|
||||
|
||||
"github.com/jmoiron/sqlx/reflectx"
|
||||
)
|
||||
|
||||
// NamedStmt is a prepared statement that executes named queries. Prepare it
|
||||
// how you would execute a NamedQuery, but pass in a struct or map when executing.
|
||||
type NamedStmt struct {
|
||||
Params []string
|
||||
QueryString string
|
||||
Stmt *Stmt
|
||||
}
|
||||
|
||||
// Close closes the named statement.
|
||||
func (n *NamedStmt) Close() error {
|
||||
return n.Stmt.Close()
|
||||
}
|
||||
|
||||
// Exec executes a named statement using the struct passed.
|
||||
func (n *NamedStmt) Exec(arg interface{}) (sql.Result, error) {
|
||||
args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
|
||||
if err != nil {
|
||||
return *new(sql.Result), err
|
||||
}
|
||||
return n.Stmt.Exec(args...)
|
||||
}
|
||||
|
||||
// Query executes a named statement using the struct argument, returning rows.
|
||||
func (n *NamedStmt) Query(arg interface{}) (*sql.Rows, error) {
|
||||
args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return n.Stmt.Query(args...)
|
||||
}
|
||||
|
||||
// QueryRow executes a named statement against the database. Because sqlx cannot
|
||||
// create a *sql.Row with an error condition pre-set for binding errors, sqlx
|
||||
// returns a *sqlx.Row instead.
|
||||
func (n *NamedStmt) QueryRow(arg interface{}) *Row {
|
||||
args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
|
||||
if err != nil {
|
||||
return &Row{err: err}
|
||||
}
|
||||
return n.Stmt.QueryRowx(args...)
|
||||
}
|
||||
|
||||
// MustExec execs a NamedStmt, panicing on error
|
||||
func (n *NamedStmt) MustExec(arg interface{}) sql.Result {
|
||||
res, err := n.Exec(arg)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// Queryx using this NamedStmt
|
||||
func (n *NamedStmt) Queryx(arg interface{}) (*Rows, error) {
|
||||
r, err := n.Query(arg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Rows{Rows: r, Mapper: n.Stmt.Mapper}, err
|
||||
}
|
||||
|
||||
// QueryRowx this NamedStmt. Because of limitations with QueryRow, this is
|
||||
// an alias for QueryRow.
|
||||
func (n *NamedStmt) QueryRowx(arg interface{}) *Row {
|
||||
return n.QueryRow(arg)
|
||||
}
|
||||
|
||||
// Select using this NamedStmt
|
||||
func (n *NamedStmt) Select(dest interface{}, arg interface{}) error {
|
||||
rows, err := n.Query(arg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// if something happens here, we want to make sure the rows are Closed
|
||||
defer rows.Close()
|
||||
return scanAll(rows, dest, false)
|
||||
}
|
||||
|
||||
// Get using this NamedStmt
|
||||
func (n *NamedStmt) Get(dest interface{}, arg interface{}) error {
|
||||
r := n.QueryRowx(arg)
|
||||
return r.scanAny(dest, false)
|
||||
}
|
||||
|
||||
// A union interface of preparer and binder, required to be able to prepare
|
||||
// named statements (as the bindtype must be determined).
|
||||
type namedPreparer interface {
|
||||
Preparer
|
||||
binder
|
||||
}
|
||||
|
||||
func prepareNamed(p namedPreparer, query string) (*NamedStmt, error) {
|
||||
bindType := BindType(p.DriverName())
|
||||
q, args, err := compileNamedQuery([]byte(query), bindType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stmt, err := Preparex(p, q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &NamedStmt{
|
||||
QueryString: q,
|
||||
Params: args,
|
||||
Stmt: stmt,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func bindAnyArgs(names []string, arg interface{}, m *reflectx.Mapper) ([]interface{}, error) {
|
||||
if maparg, ok := arg.(map[string]interface{}); ok {
|
||||
return bindMapArgs(names, maparg)
|
||||
}
|
||||
return bindArgs(names, arg, m)
|
||||
}
|
||||
|
||||
// private interface to generate a list of interfaces from a given struct
|
||||
// type, given a list of names to pull out of the struct. Used by public
|
||||
// BindStruct interface.
|
||||
func bindArgs(names []string, arg interface{}, m *reflectx.Mapper) ([]interface{}, error) {
|
||||
arglist := make([]interface{}, 0, len(names))
|
||||
|
||||
// grab the indirected value of arg
|
||||
v := reflect.ValueOf(arg)
|
||||
for v = reflect.ValueOf(arg); v.Kind() == reflect.Ptr; {
|
||||
v = v.Elem()
|
||||
}
|
||||
|
||||
fields := m.TraversalsByName(v.Type(), names)
|
||||
for i, t := range fields {
|
||||
if len(t) == 0 {
|
||||
return arglist, fmt.Errorf("could not find name %s in %#v", names[i], arg)
|
||||
}
|
||||
val := reflectx.FieldByIndexesReadOnly(v, t)
|
||||
arglist = append(arglist, val.Interface())
|
||||
}
|
||||
|
||||
return arglist, nil
|
||||
}
|
||||
|
||||
// like bindArgs, but for maps.
|
||||
func bindMapArgs(names []string, arg map[string]interface{}) ([]interface{}, error) {
|
||||
arglist := make([]interface{}, 0, len(names))
|
||||
|
||||
for _, name := range names {
|
||||
val, ok := arg[name]
|
||||
if !ok {
|
||||
return arglist, fmt.Errorf("could not find name %s in %#v", name, arg)
|
||||
}
|
||||
arglist = append(arglist, val)
|
||||
}
|
||||
return arglist, nil
|
||||
}
|
||||
|
||||
// bindStruct binds a named parameter query with fields from a struct argument.
|
||||
// The rules for binding field names to parameter names follow the same
|
||||
// conventions as for StructScan, including obeying the `db` struct tags.
|
||||
func bindStruct(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) {
|
||||
bound, names, err := compileNamedQuery([]byte(query), bindType)
|
||||
if err != nil {
|
||||
return "", []interface{}{}, err
|
||||
}
|
||||
|
||||
arglist, err := bindArgs(names, arg, m)
|
||||
if err != nil {
|
||||
return "", []interface{}{}, err
|
||||
}
|
||||
|
||||
return bound, arglist, nil
|
||||
}
|
||||
|
||||
// bindMap binds a named parameter query with a map of arguments.
|
||||
func bindMap(bindType int, query string, args map[string]interface{}) (string, []interface{}, error) {
|
||||
bound, names, err := compileNamedQuery([]byte(query), bindType)
|
||||
if err != nil {
|
||||
return "", []interface{}{}, err
|
||||
}
|
||||
|
||||
arglist, err := bindMapArgs(names, args)
|
||||
return bound, arglist, err
|
||||
}
|
||||
|
||||
// -- Compilation of Named Queries
|
||||
|
||||
// Allow digits and letters in bind params; additionally runes are
|
||||
// checked against underscores, meaning that bind params can have be
|
||||
// alphanumeric with underscores. Mind the difference between unicode
|
||||
// digits and numbers, where '5' is a digit but '五' is not.
|
||||
var allowedBindRunes = []*unicode.RangeTable{unicode.Letter, unicode.Digit}
|
||||
|
||||
// FIXME: this function isn't safe for unicode named params, as a failing test
|
||||
// can testify. This is not a regression but a failure of the original code
|
||||
// as well. It should be modified to range over runes in a string rather than
|
||||
// bytes, even though this is less convenient and slower. Hopefully the
|
||||
// addition of the prepared NamedStmt (which will only do this once) will make
|
||||
// up for the slightly slower ad-hoc NamedExec/NamedQuery.
|
||||
|
||||
// compile a NamedQuery into an unbound query (using the '?' bindvar) and
|
||||
// a list of names.
|
||||
func compileNamedQuery(qs []byte, bindType int) (query string, names []string, err error) {
|
||||
names = make([]string, 0, 10)
|
||||
rebound := make([]byte, 0, len(qs))
|
||||
|
||||
inName := false
|
||||
last := len(qs) - 1
|
||||
currentVar := 1
|
||||
name := make([]byte, 0, 10)
|
||||
|
||||
for i, b := range qs {
|
||||
// a ':' while we're in a name is an error
|
||||
if b == ':' {
|
||||
// if this is the second ':' in a '::' escape sequence, append a ':'
|
||||
if inName && i > 0 && qs[i-1] == ':' {
|
||||
rebound = append(rebound, ':')
|
||||
inName = false
|
||||
continue
|
||||
} else if inName {
|
||||
err = errors.New("unexpected `:` while reading named param at " + strconv.Itoa(i))
|
||||
return query, names, err
|
||||
}
|
||||
inName = true
|
||||
name = []byte{}
|
||||
// if we're in a name, and this is an allowed character, continue
|
||||
} else if inName && (unicode.IsOneOf(allowedBindRunes, rune(b)) || b == '_') && i != last {
|
||||
// append the byte to the name if we are in a name and not on the last byte
|
||||
name = append(name, b)
|
||||
// if we're in a name and it's not an allowed character, the name is done
|
||||
} else if inName {
|
||||
inName = false
|
||||
// if this is the final byte of the string and it is part of the name, then
|
||||
// make sure to add it to the name
|
||||
if i == last && unicode.IsOneOf(allowedBindRunes, rune(b)) {
|
||||
name = append(name, b)
|
||||
}
|
||||
// add the string representation to the names list
|
||||
names = append(names, string(name))
|
||||
// add a proper bindvar for the bindType
|
||||
switch bindType {
|
||||
// oracle only supports named type bind vars even for positional
|
||||
case NAMED:
|
||||
rebound = append(rebound, ':')
|
||||
rebound = append(rebound, name...)
|
||||
case QUESTION, UNKNOWN:
|
||||
rebound = append(rebound, '?')
|
||||
case DOLLAR:
|
||||
rebound = append(rebound, '$')
|
||||
for _, b := range strconv.Itoa(currentVar) {
|
||||
rebound = append(rebound, byte(b))
|
||||
}
|
||||
currentVar++
|
||||
}
|
||||
// add this byte to string unless it was not part of the name
|
||||
if i != last {
|
||||
rebound = append(rebound, b)
|
||||
} else if !unicode.IsOneOf(allowedBindRunes, rune(b)) {
|
||||
rebound = append(rebound, b)
|
||||
}
|
||||
} else {
|
||||
// this is a normal byte and should just go onto the rebound query
|
||||
rebound = append(rebound, b)
|
||||
}
|
||||
}
|
||||
|
||||
return string(rebound), names, err
|
||||
}
|
||||
|
||||
// Bind binds a struct or a map to a query with named parameters.
|
||||
func BindNamed(bindType int, query string, arg interface{}) (string, []interface{}, error) {
|
||||
return bindNamedMapper(bindType, query, arg, mapper())
|
||||
}
|
||||
|
||||
func bindNamedMapper(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) {
|
||||
if maparg, ok := arg.(map[string]interface{}); ok {
|
||||
return bindMap(bindType, query, maparg)
|
||||
}
|
||||
return bindStruct(bindType, query, arg, m)
|
||||
}
|
||||
|
||||
// NamedQuery binds a named query and then runs Query on the result using the
|
||||
// provided Ext (sqlx.Tx, sqlx.Db). It works with both structs and with
|
||||
// map[string]interface{} types.
|
||||
func NamedQuery(e Ext, query string, arg interface{}) (*Rows, error) {
|
||||
q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return e.Queryx(q, args...)
|
||||
}
|
||||
|
||||
// NamedExec uses BindStruct to get a query executable by the driver and
|
||||
// then runs Exec on the result. Returns an error from the binding
|
||||
// or the query excution itself.
|
||||
func NamedExec(e Ext, query string, arg interface{}) (sql.Result, error) {
|
||||
q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return e.Exec(q, args...)
|
||||
}
|
227
vendor/github.com/jmoiron/sqlx/named_test.go
generated
vendored
227
vendor/github.com/jmoiron/sqlx/named_test.go
generated
vendored
@ -1,227 +0,0 @@
|
||||
package sqlx
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCompileQuery(t *testing.T) {
|
||||
table := []struct {
|
||||
Q, R, D, N string
|
||||
V []string
|
||||
}{
|
||||
// basic test for named parameters, invalid char ',' terminating
|
||||
{
|
||||
Q: `INSERT INTO foo (a,b,c,d) VALUES (:name, :age, :first, :last)`,
|
||||
R: `INSERT INTO foo (a,b,c,d) VALUES (?, ?, ?, ?)`,
|
||||
D: `INSERT INTO foo (a,b,c,d) VALUES ($1, $2, $3, $4)`,
|
||||
N: `INSERT INTO foo (a,b,c,d) VALUES (:name, :age, :first, :last)`,
|
||||
V: []string{"name", "age", "first", "last"},
|
||||
},
|
||||
// This query tests a named parameter ending the string as well as numbers
|
||||
{
|
||||
Q: `SELECT * FROM a WHERE first_name=:name1 AND last_name=:name2`,
|
||||
R: `SELECT * FROM a WHERE first_name=? AND last_name=?`,
|
||||
D: `SELECT * FROM a WHERE first_name=$1 AND last_name=$2`,
|
||||
N: `SELECT * FROM a WHERE first_name=:name1 AND last_name=:name2`,
|
||||
V: []string{"name1", "name2"},
|
||||
},
|
||||
{
|
||||
Q: `SELECT "::foo" FROM a WHERE first_name=:name1 AND last_name=:name2`,
|
||||
R: `SELECT ":foo" FROM a WHERE first_name=? AND last_name=?`,
|
||||
D: `SELECT ":foo" FROM a WHERE first_name=$1 AND last_name=$2`,
|
||||
N: `SELECT ":foo" FROM a WHERE first_name=:name1 AND last_name=:name2`,
|
||||
V: []string{"name1", "name2"},
|
||||
},
|
||||
{
|
||||
Q: `SELECT 'a::b::c' || first_name, '::::ABC::_::' FROM person WHERE first_name=:first_name AND last_name=:last_name`,
|
||||
R: `SELECT 'a:b:c' || first_name, '::ABC:_:' FROM person WHERE first_name=? AND last_name=?`,
|
||||
D: `SELECT 'a:b:c' || first_name, '::ABC:_:' FROM person WHERE first_name=$1 AND last_name=$2`,
|
||||
N: `SELECT 'a:b:c' || first_name, '::ABC:_:' FROM person WHERE first_name=:first_name AND last_name=:last_name`,
|
||||
V: []string{"first_name", "last_name"},
|
||||
},
|
||||
/* This unicode awareness test sadly fails, because of our byte-wise worldview.
|
||||
* We could certainly iterate by Rune instead, though it's a great deal slower,
|
||||
* it's probably the RightWay(tm)
|
||||
{
|
||||
Q: `INSERT INTO foo (a,b,c,d) VALUES (:あ, :b, :キコ, :名前)`,
|
||||
R: `INSERT INTO foo (a,b,c,d) VALUES (?, ?, ?, ?)`,
|
||||
D: `INSERT INTO foo (a,b,c,d) VALUES ($1, $2, $3, $4)`,
|
||||
N: []string{"name", "age", "first", "last"},
|
||||
},
|
||||
*/
|
||||
}
|
||||
|
||||
for _, test := range table {
|
||||
qr, names, err := compileNamedQuery([]byte(test.Q), QUESTION)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if qr != test.R {
|
||||
t.Errorf("expected %s, got %s", test.R, qr)
|
||||
}
|
||||
if len(names) != len(test.V) {
|
||||
t.Errorf("expected %#v, got %#v", test.V, names)
|
||||
} else {
|
||||
for i, name := range names {
|
||||
if name != test.V[i] {
|
||||
t.Errorf("expected %dth name to be %s, got %s", i+1, test.V[i], name)
|
||||
}
|
||||
}
|
||||
}
|
||||
qd, _, _ := compileNamedQuery([]byte(test.Q), DOLLAR)
|
||||
if qd != test.D {
|
||||
t.Errorf("\nexpected: `%s`\ngot: `%s`", test.D, qd)
|
||||
}
|
||||
|
||||
qq, _, _ := compileNamedQuery([]byte(test.Q), NAMED)
|
||||
if qq != test.N {
|
||||
t.Errorf("\nexpected: `%s`\ngot: `%s`\n(len: %d vs %d)", test.N, qq, len(test.N), len(qq))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type Test struct {
|
||||
t *testing.T
|
||||
}
|
||||
|
||||
func (t Test) Error(err error, msg ...interface{}) {
|
||||
if err != nil {
|
||||
if len(msg) == 0 {
|
||||
t.t.Error(err)
|
||||
} else {
|
||||
t.t.Error(msg...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t Test) Errorf(err error, format string, args ...interface{}) {
|
||||
if err != nil {
|
||||
t.t.Errorf(format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNamedQueries(t *testing.T) {
|
||||
RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) {
|
||||
loadDefaultFixture(db, t)
|
||||
test := Test{t}
|
||||
var ns *NamedStmt
|
||||
var err error
|
||||
|
||||
// Check that invalid preparations fail
|
||||
ns, err = db.PrepareNamed("SELECT * FROM person WHERE first_name=:first:name")
|
||||
if err == nil {
|
||||
t.Error("Expected an error with invalid prepared statement.")
|
||||
}
|
||||
|
||||
ns, err = db.PrepareNamed("invalid sql")
|
||||
if err == nil {
|
||||
t.Error("Expected an error with invalid prepared statement.")
|
||||
}
|
||||
|
||||
// Check closing works as anticipated
|
||||
ns, err = db.PrepareNamed("SELECT * FROM person WHERE first_name=:first_name")
|
||||
test.Error(err)
|
||||
err = ns.Close()
|
||||
test.Error(err)
|
||||
|
||||
ns, err = db.PrepareNamed(`
|
||||
SELECT first_name, last_name, email
|
||||
FROM person WHERE first_name=:first_name AND email=:email`)
|
||||
test.Error(err)
|
||||
|
||||
// test Queryx w/ uses Query
|
||||
p := Person{FirstName: "Jason", LastName: "Moiron", Email: "jmoiron@jmoiron.net"}
|
||||
|
||||
rows, err := ns.Queryx(p)
|
||||
test.Error(err)
|
||||
for rows.Next() {
|
||||
var p2 Person
|
||||
rows.StructScan(&p2)
|
||||
if p.FirstName != p2.FirstName {
|
||||
t.Errorf("got %s, expected %s", p.FirstName, p2.FirstName)
|
||||
}
|
||||
if p.LastName != p2.LastName {
|
||||
t.Errorf("got %s, expected %s", p.LastName, p2.LastName)
|
||||
}
|
||||
if p.Email != p2.Email {
|
||||
t.Errorf("got %s, expected %s", p.Email, p2.Email)
|
||||
}
|
||||
}
|
||||
|
||||
// test Select
|
||||
people := make([]Person, 0, 5)
|
||||
err = ns.Select(&people, p)
|
||||
test.Error(err)
|
||||
|
||||
if len(people) != 1 {
|
||||
t.Errorf("got %d results, expected %d", len(people), 1)
|
||||
}
|
||||
if p.FirstName != people[0].FirstName {
|
||||
t.Errorf("got %s, expected %s", p.FirstName, people[0].FirstName)
|
||||
}
|
||||
if p.LastName != people[0].LastName {
|
||||
t.Errorf("got %s, expected %s", p.LastName, people[0].LastName)
|
||||
}
|
||||
if p.Email != people[0].Email {
|
||||
t.Errorf("got %s, expected %s", p.Email, people[0].Email)
|
||||
}
|
||||
|
||||
// test Exec
|
||||
ns, err = db.PrepareNamed(`
|
||||
INSERT INTO person (first_name, last_name, email)
|
||||
VALUES (:first_name, :last_name, :email)`)
|
||||
test.Error(err)
|
||||
|
||||
js := Person{
|
||||
FirstName: "Julien",
|
||||
LastName: "Savea",
|
||||
Email: "jsavea@ab.co.nz",
|
||||
}
|
||||
_, err = ns.Exec(js)
|
||||
test.Error(err)
|
||||
|
||||
// Make sure we can pull him out again
|
||||
p2 := Person{}
|
||||
db.Get(&p2, db.Rebind("SELECT * FROM person WHERE email=?"), js.Email)
|
||||
if p2.Email != js.Email {
|
||||
t.Errorf("expected %s, got %s", js.Email, p2.Email)
|
||||
}
|
||||
|
||||
// test Txn NamedStmts
|
||||
tx := db.MustBegin()
|
||||
txns := tx.NamedStmt(ns)
|
||||
|
||||
// We're going to add Steven in this txn
|
||||
sl := Person{
|
||||
FirstName: "Steven",
|
||||
LastName: "Luatua",
|
||||
Email: "sluatua@ab.co.nz",
|
||||
}
|
||||
|
||||
_, err = txns.Exec(sl)
|
||||
test.Error(err)
|
||||
// then rollback...
|
||||
tx.Rollback()
|
||||
// looking for Steven after a rollback should fail
|
||||
err = db.Get(&p2, db.Rebind("SELECT * FROM person WHERE email=?"), sl.Email)
|
||||
if err != sql.ErrNoRows {
|
||||
t.Errorf("expected no rows error, got %v", err)
|
||||
}
|
||||
|
||||
// now do the same, but commit
|
||||
tx = db.MustBegin()
|
||||
txns = tx.NamedStmt(ns)
|
||||
_, err = txns.Exec(sl)
|
||||
test.Error(err)
|
||||
tx.Commit()
|
||||
|
||||
// looking for Steven after a Commit should succeed
|
||||
err = db.Get(&p2, db.Rebind("SELECT * FROM person WHERE email=?"), sl.Email)
|
||||
test.Error(err)
|
||||
if p2.Email != sl.Email {
|
||||
t.Errorf("expected %s, got %s", sl.Email, p2.Email)
|
||||
}
|
||||
|
||||
})
|
||||
}
|
17
vendor/github.com/jmoiron/sqlx/reflectx/README.md
generated
vendored
17
vendor/github.com/jmoiron/sqlx/reflectx/README.md
generated
vendored
@ -1,17 +0,0 @@
|
||||
# reflectx
|
||||
|
||||
The sqlx package has special reflect needs. In particular, it needs to:
|
||||
|
||||
* be able to map a name to a field
|
||||
* understand embedded structs
|
||||
* understand mapping names to fields by a particular tag
|
||||
* user specified name -> field mapping functions
|
||||
|
||||
These behaviors mimic the behaviors by the standard library marshallers and also the
|
||||
behavior of standard Go accessors.
|
||||
|
||||
The first two are amply taken care of by `Reflect.Value.FieldByName`, and the third is
|
||||
addressed by `Reflect.Value.FieldByNameFunc`, but these don't quite understand struct
|
||||
tags in the ways that are vital to most marshalers, and they are slow.
|
||||
|
||||
This reflectx package extends reflect to achieve these goals.
|
250
vendor/github.com/jmoiron/sqlx/reflectx/reflect.go
generated
vendored
250
vendor/github.com/jmoiron/sqlx/reflectx/reflect.go
generated
vendored
@ -1,250 +0,0 @@
|
||||
// Package reflect implements extensions to the standard reflect lib suitable
|
||||
// for implementing marshaling and unmarshaling packages. The main Mapper type
|
||||
// allows for Go-compatible named atribute access, including accessing embedded
|
||||
// struct attributes and the ability to use functions and struct tags to
|
||||
// customize field names.
|
||||
//
|
||||
package reflectx
|
||||
|
||||
import "sync"
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
type fieldMap map[string][]int
|
||||
|
||||
// Mapper is a general purpose mapper of names to struct fields. A Mapper
|
||||
// behaves like most marshallers, optionally obeying a field tag for name
|
||||
// mapping and a function to provide a basic mapping of fields to names.
|
||||
type Mapper struct {
|
||||
cache map[reflect.Type]fieldMap
|
||||
tagName string
|
||||
mapFunc func(string) string
|
||||
mutex sync.Mutex
|
||||
}
|
||||
|
||||
// NewMapper returns a new mapper which optionally obeys the field tag given
|
||||
// by tagName. If tagName is the empty string, it is ignored.
|
||||
func NewMapper(tagName string) *Mapper {
|
||||
return &Mapper{
|
||||
cache: make(map[reflect.Type]fieldMap),
|
||||
tagName: tagName,
|
||||
}
|
||||
}
|
||||
|
||||
// NewMapperFunc returns a new mapper which optionally obeys a field tag and
|
||||
// a struct field name mapper func given by f. Tags will take precedence, but
|
||||
// for any other field, the mapped name will be f(field.Name)
|
||||
func NewMapperFunc(tagName string, f func(string) string) *Mapper {
|
||||
return &Mapper{
|
||||
cache: make(map[reflect.Type]fieldMap),
|
||||
tagName: tagName,
|
||||
mapFunc: f,
|
||||
}
|
||||
}
|
||||
|
||||
// TypeMap returns a mapping of field strings to int slices representing
|
||||
// the traversal down the struct to reach the field.
|
||||
func (m *Mapper) TypeMap(t reflect.Type) fieldMap {
|
||||
m.mutex.Lock()
|
||||
mapping, ok := m.cache[t]
|
||||
if !ok {
|
||||
mapping = getMapping(t, m.tagName, m.mapFunc)
|
||||
m.cache[t] = mapping
|
||||
}
|
||||
m.mutex.Unlock()
|
||||
return mapping
|
||||
}
|
||||
|
||||
// FieldMap returns the mapper's mapping of field names to reflect values. Panics
|
||||
// if v's Kind is not Struct, or v is not Indirectable to a struct kind.
|
||||
func (m *Mapper) FieldMap(v reflect.Value) map[string]reflect.Value {
|
||||
v = reflect.Indirect(v)
|
||||
mustBe(v, reflect.Struct)
|
||||
|
||||
r := map[string]reflect.Value{}
|
||||
nm := m.TypeMap(v.Type())
|
||||
for tagName, indexes := range nm {
|
||||
r[tagName] = FieldByIndexes(v, indexes)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// FieldByName returns a field by the its mapped name as a reflect.Value.
|
||||
// Panics if v's Kind is not Struct or v is not Indirectable to a struct Kind.
|
||||
// Returns zero Value if the name is not found.
|
||||
func (m *Mapper) FieldByName(v reflect.Value, name string) reflect.Value {
|
||||
v = reflect.Indirect(v)
|
||||
mustBe(v, reflect.Struct)
|
||||
|
||||
nm := m.TypeMap(v.Type())
|
||||
traversal, ok := nm[name]
|
||||
if !ok {
|
||||
return *new(reflect.Value)
|
||||
}
|
||||
return FieldByIndexes(v, traversal)
|
||||
}
|
||||
|
||||
// FieldsByName returns a slice of values corresponding to the slice of names
|
||||
// for the value. Panics if v's Kind is not Struct or v is not Indirectable
|
||||
// to a struct Kind. Returns zero Value for each name not found.
|
||||
func (m *Mapper) FieldsByName(v reflect.Value, names []string) []reflect.Value {
|
||||
v = reflect.Indirect(v)
|
||||
mustBe(v, reflect.Struct)
|
||||
|
||||
nm := m.TypeMap(v.Type())
|
||||
|
||||
vals := make([]reflect.Value, 0, len(names))
|
||||
for _, name := range names {
|
||||
traversal, ok := nm[name]
|
||||
if !ok {
|
||||
vals = append(vals, *new(reflect.Value))
|
||||
} else {
|
||||
vals = append(vals, FieldByIndexes(v, traversal))
|
||||
}
|
||||
}
|
||||
return vals
|
||||
}
|
||||
|
||||
// Traversals by name returns a slice of int slices which represent the struct
|
||||
// traversals for each mapped name. Panics if t is not a struct or Indirectable
|
||||
// to a struct. Returns empty int slice for each name not found.
|
||||
func (m *Mapper) TraversalsByName(t reflect.Type, names []string) [][]int {
|
||||
t = Deref(t)
|
||||
mustBe(t, reflect.Struct)
|
||||
nm := m.TypeMap(t)
|
||||
|
||||
r := make([][]int, 0, len(names))
|
||||
for _, name := range names {
|
||||
traversal, ok := nm[name]
|
||||
if !ok {
|
||||
r = append(r, []int{})
|
||||
} else {
|
||||
r = append(r, traversal)
|
||||
}
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// FieldByIndexes returns a value for a particular struct traversal.
|
||||
func FieldByIndexes(v reflect.Value, indexes []int) reflect.Value {
|
||||
for _, i := range indexes {
|
||||
v = reflect.Indirect(v).Field(i)
|
||||
// if this is a pointer, it's possible it is nil
|
||||
if v.Kind() == reflect.Ptr && v.IsNil() {
|
||||
alloc := reflect.New(Deref(v.Type()))
|
||||
v.Set(alloc)
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// FieldByIndexesReadOnly returns a value for a particular struct traversal,
|
||||
// but is not concerned with allocating nil pointers because the value is
|
||||
// going to be used for reading and not setting.
|
||||
func FieldByIndexesReadOnly(v reflect.Value, indexes []int) reflect.Value {
|
||||
for _, i := range indexes {
|
||||
v = reflect.Indirect(v).Field(i)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Deref is Indirect for reflect.Types
|
||||
func Deref(t reflect.Type) reflect.Type {
|
||||
if t.Kind() == reflect.Ptr {
|
||||
t = t.Elem()
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// -- helpers & utilities --
|
||||
|
||||
type Kinder interface {
|
||||
Kind() reflect.Kind
|
||||
}
|
||||
|
||||
// mustBe checks a value against a kind, panicing with a reflect.ValueError
|
||||
// if the kind isn't that which is required.
|
||||
func mustBe(v Kinder, expected reflect.Kind) {
|
||||
k := v.Kind()
|
||||
if k != expected {
|
||||
panic(&reflect.ValueError{Method: methodName(), Kind: k})
|
||||
}
|
||||
}
|
||||
|
||||
// methodName is returns the caller of the function calling methodName
|
||||
func methodName() string {
|
||||
pc, _, _, _ := runtime.Caller(2)
|
||||
f := runtime.FuncForPC(pc)
|
||||
if f == nil {
|
||||
return "unknown method"
|
||||
}
|
||||
return f.Name()
|
||||
}
|
||||
|
||||
type typeQueue struct {
|
||||
t reflect.Type
|
||||
p []int
|
||||
}
|
||||
|
||||
// A copying append that creates a new slice each time.
|
||||
func apnd(is []int, i int) []int {
|
||||
x := make([]int, len(is)+1)
|
||||
for p, n := range is {
|
||||
x[p] = n
|
||||
}
|
||||
x[len(x)-1] = i
|
||||
return x
|
||||
}
|
||||
|
||||
// getMapping returns a mapping for the t type, using the tagName and the mapFunc
|
||||
// to determine the canonical names of fields.
|
||||
func getMapping(t reflect.Type, tagName string, mapFunc func(string) string) fieldMap {
|
||||
queue := []typeQueue{}
|
||||
queue = append(queue, typeQueue{Deref(t), []int{}})
|
||||
m := fieldMap{}
|
||||
for len(queue) != 0 {
|
||||
// pop the first item off of the queue
|
||||
tq := queue[0]
|
||||
queue = queue[1:]
|
||||
// iterate through all of its fields
|
||||
for fieldPos := 0; fieldPos < tq.t.NumField(); fieldPos++ {
|
||||
f := tq.t.Field(fieldPos)
|
||||
|
||||
name := f.Tag.Get(tagName)
|
||||
if len(name) == 0 {
|
||||
if mapFunc != nil {
|
||||
name = mapFunc(f.Name)
|
||||
} else {
|
||||
name = f.Name
|
||||
}
|
||||
}
|
||||
|
||||
// if the name is "-", disabled via a tag, skip it
|
||||
if name == "-" {
|
||||
continue
|
||||
}
|
||||
|
||||
// skip unexported fields
|
||||
if len(f.PkgPath) != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// bfs search of anonymous embedded structs
|
||||
if f.Anonymous {
|
||||
queue = append(queue, typeQueue{Deref(f.Type), apnd(tq.p, fieldPos)})
|
||||
continue
|
||||
}
|
||||
|
||||
// if the name is shadowed by an earlier identical name in the search, skip it
|
||||
if _, ok := m[name]; ok {
|
||||
continue
|
||||
}
|
||||
// add it to the map at the current position
|
||||
m[name] = apnd(tq.p, fieldPos)
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
216
vendor/github.com/jmoiron/sqlx/reflectx/reflect_test.go
generated
vendored
216
vendor/github.com/jmoiron/sqlx/reflectx/reflect_test.go
generated
vendored
@ -1,216 +0,0 @@
|
||||
package reflectx
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func ival(v reflect.Value) int {
|
||||
return v.Interface().(int)
|
||||
}
|
||||
|
||||
func TestBasic(t *testing.T) {
|
||||
type Foo struct {
|
||||
A int
|
||||
B int
|
||||
C int
|
||||
}
|
||||
|
||||
f := Foo{1, 2, 3}
|
||||
fv := reflect.ValueOf(f)
|
||||
m := NewMapper("")
|
||||
|
||||
v := m.FieldByName(fv, "A")
|
||||
if ival(v) != f.A {
|
||||
t.Errorf("Expecting %d, got %d", ival(v), f.A)
|
||||
}
|
||||
v = m.FieldByName(fv, "B")
|
||||
if ival(v) != f.B {
|
||||
t.Errorf("Expecting %d, got %d", f.B, ival(v))
|
||||
}
|
||||
v = m.FieldByName(fv, "C")
|
||||
if ival(v) != f.C {
|
||||
t.Errorf("Expecting %d, got %d", f.C, ival(v))
|
||||
}
|
||||
}
|
||||
|
||||
func TestEmbedded(t *testing.T) {
|
||||
type Foo struct {
|
||||
A int
|
||||
}
|
||||
|
||||
type Bar struct {
|
||||
Foo
|
||||
B int
|
||||
}
|
||||
|
||||
type Baz struct {
|
||||
A int
|
||||
Bar
|
||||
}
|
||||
|
||||
m := NewMapper("")
|
||||
|
||||
z := Baz{}
|
||||
z.A = 1
|
||||
z.B = 2
|
||||
z.Bar.Foo.A = 3
|
||||
zv := reflect.ValueOf(z)
|
||||
|
||||
v := m.FieldByName(zv, "A")
|
||||
if ival(v) != z.A {
|
||||
t.Errorf("Expecting %d, got %d", ival(v), z.A)
|
||||
}
|
||||
v = m.FieldByName(zv, "B")
|
||||
if ival(v) != z.B {
|
||||
t.Errorf("Expecting %d, got %d", ival(v), z.B)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMapping(t *testing.T) {
|
||||
type Person struct {
|
||||
ID int
|
||||
Name string
|
||||
WearsGlasses bool `db:"wears_glasses"`
|
||||
}
|
||||
|
||||
m := NewMapperFunc("db", strings.ToLower)
|
||||
p := Person{1, "Jason", true}
|
||||
mapping := m.TypeMap(reflect.TypeOf(p))
|
||||
|
||||
for _, key := range []string{"id", "name", "wears_glasses"} {
|
||||
if _, ok := mapping[key]; !ok {
|
||||
t.Errorf("Expecting to find key %s in mapping but did not.", key)
|
||||
}
|
||||
}
|
||||
|
||||
type SportsPerson struct {
|
||||
Weight int
|
||||
Age int
|
||||
Person
|
||||
}
|
||||
s := SportsPerson{Weight: 100, Age: 30, Person: p}
|
||||
mapping = m.TypeMap(reflect.TypeOf(s))
|
||||
for _, key := range []string{"id", "name", "wears_glasses", "weight", "age"} {
|
||||
if _, ok := mapping[key]; !ok {
|
||||
t.Errorf("Expecting to find key %s in mapping but did not.", key)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
type RugbyPlayer struct {
|
||||
Position int
|
||||
IsIntense bool `db:"is_intense"`
|
||||
IsAllBlack bool `db:"-"`
|
||||
SportsPerson
|
||||
}
|
||||
r := RugbyPlayer{12, true, false, s}
|
||||
mapping = m.TypeMap(reflect.TypeOf(r))
|
||||
for _, key := range []string{"id", "name", "wears_glasses", "weight", "age", "position", "is_intense"} {
|
||||
if _, ok := mapping[key]; !ok {
|
||||
t.Errorf("Expecting to find key %s in mapping but did not.", key)
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := mapping["isallblack"]; ok {
|
||||
t.Errorf("Expecting to ignore `IsAllBlack` field")
|
||||
}
|
||||
|
||||
type EmbeddedLiteral struct {
|
||||
Embedded struct {
|
||||
Person string
|
||||
Position int
|
||||
}
|
||||
IsIntense bool
|
||||
}
|
||||
|
||||
e := EmbeddedLiteral{}
|
||||
mapping = m.TypeMap(reflect.TypeOf(e))
|
||||
//fmt.Printf("Mapping: %#v\n", mapping)
|
||||
|
||||
//f := FieldByIndexes(reflect.ValueOf(e), mapping["isintense"])
|
||||
//fmt.Println(f, f.Interface())
|
||||
|
||||
//tbn := m.TraversalsByName(reflect.TypeOf(e), []string{"isintense"})
|
||||
//fmt.Printf("%#v\n", tbn)
|
||||
|
||||
}
|
||||
|
||||
type E1 struct {
|
||||
A int
|
||||
}
|
||||
type E2 struct {
|
||||
E1
|
||||
B int
|
||||
}
|
||||
type E3 struct {
|
||||
E2
|
||||
C int
|
||||
}
|
||||
type E4 struct {
|
||||
E3
|
||||
D int
|
||||
}
|
||||
|
||||
func BenchmarkFieldNameL1(b *testing.B) {
|
||||
e4 := E4{D: 1}
|
||||
for i := 0; i < b.N; i++ {
|
||||
v := reflect.ValueOf(e4)
|
||||
f := v.FieldByName("D")
|
||||
if f.Interface().(int) != 1 {
|
||||
b.Fatal("Wrong value.")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkFieldNameL4(b *testing.B) {
|
||||
e4 := E4{}
|
||||
e4.A = 1
|
||||
for i := 0; i < b.N; i++ {
|
||||
v := reflect.ValueOf(e4)
|
||||
f := v.FieldByName("A")
|
||||
if f.Interface().(int) != 1 {
|
||||
b.Fatal("Wrong value.")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkFieldPosL1(b *testing.B) {
|
||||
e4 := E4{D: 1}
|
||||
for i := 0; i < b.N; i++ {
|
||||
v := reflect.ValueOf(e4)
|
||||
f := v.Field(1)
|
||||
if f.Interface().(int) != 1 {
|
||||
b.Fatal("Wrong value.")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkFieldPosL4(b *testing.B) {
|
||||
e4 := E4{}
|
||||
e4.A = 1
|
||||
for i := 0; i < b.N; i++ {
|
||||
v := reflect.ValueOf(e4)
|
||||
f := v.Field(0)
|
||||
f = f.Field(0)
|
||||
f = f.Field(0)
|
||||
f = f.Field(0)
|
||||
if f.Interface().(int) != 1 {
|
||||
b.Fatal("Wrong value.")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkFieldByIndexL4(b *testing.B) {
|
||||
e4 := E4{}
|
||||
e4.A = 1
|
||||
idx := []int{0, 0, 0, 0}
|
||||
for i := 0; i < b.N; i++ {
|
||||
v := reflect.ValueOf(e4)
|
||||
f := FieldByIndexes(v, idx)
|
||||
if f.Interface().(int) != 1 {
|
||||
b.Fatal("Wrong value.")
|
||||
}
|
||||
}
|
||||
}
|
986
vendor/github.com/jmoiron/sqlx/sqlx.go
generated
vendored
986
vendor/github.com/jmoiron/sqlx/sqlx.go
generated
vendored
@ -1,986 +0,0 @@
|
||||
package sqlx
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/jmoiron/sqlx/reflectx"
|
||||
)
|
||||
|
||||
// Although the NameMapper is convenient, in practice it should not
|
||||
// be relied on except for application code. If you are writing a library
|
||||
// that uses sqlx, you should be aware that the name mappings you expect
|
||||
// can be overridded by your user's application.
|
||||
|
||||
// NameMapper is used to map column names to struct field names. By default,
|
||||
// it uses strings.ToLower to lowercase struct field names. It can be set
|
||||
// to whatever you want, but it is encouraged to be set before sqlx is used
|
||||
// as name-to-field mappings are cached after first use on a type.
|
||||
var NameMapper = strings.ToLower
|
||||
var origMapper = reflect.ValueOf(NameMapper)
|
||||
|
||||
// Rather than creating on init, this is created when necessary so that
|
||||
// importers have time to customize the NameMapper.
|
||||
var mpr *reflectx.Mapper
|
||||
|
||||
// mapper returns a valid mapper using the configured NameMapper func.
|
||||
func mapper() *reflectx.Mapper {
|
||||
if mpr == nil {
|
||||
mpr = reflectx.NewMapperFunc("db", NameMapper)
|
||||
} else if origMapper != reflect.ValueOf(NameMapper) {
|
||||
// if NameMapper has changed, create a new mapper
|
||||
mpr = reflectx.NewMapperFunc("db", NameMapper)
|
||||
origMapper = reflect.ValueOf(NameMapper)
|
||||
}
|
||||
return mpr
|
||||
}
|
||||
|
||||
// isScannable takes the reflect.Type and the actual dest value and returns
|
||||
// whether or not it's Scannable. Something is scannable if:
|
||||
// * it is not a struct
|
||||
// * it implements sql.Scanner
|
||||
// * it has no exported fields
|
||||
func isScannable(t reflect.Type) bool {
|
||||
if reflect.PtrTo(t).Implements(_scannerInterface) {
|
||||
return true
|
||||
}
|
||||
if t.Kind() != reflect.Struct {
|
||||
return true
|
||||
}
|
||||
|
||||
// it's not important that we use the right mapper for this particular object,
|
||||
// we're only concerned on how many exported fields this struct has
|
||||
m := mapper()
|
||||
if len(m.TypeMap(t)) == 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ColScanner is an interface used by MapScan and SliceScan
|
||||
type ColScanner interface {
|
||||
Columns() ([]string, error)
|
||||
Scan(dest ...interface{}) error
|
||||
Err() error
|
||||
}
|
||||
|
||||
// Queryer is an interface used by Get and Select
|
||||
type Queryer interface {
|
||||
Query(query string, args ...interface{}) (*sql.Rows, error)
|
||||
Queryx(query string, args ...interface{}) (*Rows, error)
|
||||
QueryRowx(query string, args ...interface{}) *Row
|
||||
}
|
||||
|
||||
// Execer is an interface used by MustExec and LoadFile
|
||||
type Execer interface {
|
||||
Exec(query string, args ...interface{}) (sql.Result, error)
|
||||
}
|
||||
|
||||
// Binder is an interface for something which can bind queries (Tx, DB)
|
||||
type binder interface {
|
||||
DriverName() string
|
||||
Rebind(string) string
|
||||
BindNamed(string, interface{}) (string, []interface{}, error)
|
||||
}
|
||||
|
||||
// Ext is a union interface which can bind, query, and exec, used by
|
||||
// NamedQuery and NamedExec.
|
||||
type Ext interface {
|
||||
binder
|
||||
Queryer
|
||||
Execer
|
||||
}
|
||||
|
||||
// Preparer is an interface used by Preparex.
|
||||
type Preparer interface {
|
||||
Prepare(query string) (*sql.Stmt, error)
|
||||
}
|
||||
|
||||
// determine if any of our extensions are unsafe
|
||||
func isUnsafe(i interface{}) bool {
|
||||
switch i.(type) {
|
||||
case Row:
|
||||
return i.(Row).unsafe
|
||||
case *Row:
|
||||
return i.(*Row).unsafe
|
||||
case Rows:
|
||||
return i.(Rows).unsafe
|
||||
case *Rows:
|
||||
return i.(*Rows).unsafe
|
||||
case Stmt:
|
||||
return i.(Stmt).unsafe
|
||||
case qStmt:
|
||||
return i.(qStmt).Stmt.unsafe
|
||||
case *qStmt:
|
||||
return i.(*qStmt).Stmt.unsafe
|
||||
case DB:
|
||||
return i.(DB).unsafe
|
||||
case *DB:
|
||||
return i.(*DB).unsafe
|
||||
case Tx:
|
||||
return i.(Tx).unsafe
|
||||
case *Tx:
|
||||
return i.(*Tx).unsafe
|
||||
case sql.Rows, *sql.Rows:
|
||||
return false
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func mapperFor(i interface{}) *reflectx.Mapper {
|
||||
switch i.(type) {
|
||||
case DB:
|
||||
return i.(DB).Mapper
|
||||
case *DB:
|
||||
return i.(*DB).Mapper
|
||||
case Tx:
|
||||
return i.(Tx).Mapper
|
||||
case *Tx:
|
||||
return i.(*Tx).Mapper
|
||||
default:
|
||||
return mapper()
|
||||
}
|
||||
}
|
||||
|
||||
var _scannerInterface = reflect.TypeOf((*sql.Scanner)(nil)).Elem()
|
||||
var _valuerInterface = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
|
||||
|
||||
// Row is a reimplementation of sql.Row in order to gain access to the underlying
|
||||
// sql.Rows.Columns() data, necessary for StructScan.
|
||||
type Row struct {
|
||||
err error
|
||||
unsafe bool
|
||||
rows *sql.Rows
|
||||
Mapper *reflectx.Mapper
|
||||
}
|
||||
|
||||
// Scan is a fixed implementation of sql.Row.Scan, which does not discard the
|
||||
// underlying error from the internal rows object if it exists.
|
||||
func (r *Row) Scan(dest ...interface{}) error {
|
||||
if r.err != nil {
|
||||
return r.err
|
||||
}
|
||||
|
||||
// TODO(bradfitz): for now we need to defensively clone all
|
||||
// []byte that the driver returned (not permitting
|
||||
// *RawBytes in Rows.Scan), since we're about to close
|
||||
// the Rows in our defer, when we return from this function.
|
||||
// the contract with the driver.Next(...) interface is that it
|
||||
// can return slices into read-only temporary memory that's
|
||||
// only valid until the next Scan/Close. But the TODO is that
|
||||
// for a lot of drivers, this copy will be unnecessary. We
|
||||
// should provide an optional interface for drivers to
|
||||
// implement to say, "don't worry, the []bytes that I return
|
||||
// from Next will not be modified again." (for instance, if
|
||||
// they were obtained from the network anyway) But for now we
|
||||
// don't care.
|
||||
defer r.rows.Close()
|
||||
for _, dp := range dest {
|
||||
if _, ok := dp.(*sql.RawBytes); ok {
|
||||
return errors.New("sql: RawBytes isn't allowed on Row.Scan")
|
||||
}
|
||||
}
|
||||
|
||||
if !r.rows.Next() {
|
||||
if err := r.rows.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
return sql.ErrNoRows
|
||||
}
|
||||
err := r.rows.Scan(dest...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Make sure the query can be processed to completion with no errors.
|
||||
if err := r.rows.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Columns returns the underlying sql.Rows.Columns(), or the deferred error usually
|
||||
// returned by Row.Scan()
|
||||
func (r *Row) Columns() ([]string, error) {
|
||||
if r.err != nil {
|
||||
return []string{}, r.err
|
||||
}
|
||||
return r.rows.Columns()
|
||||
}
|
||||
|
||||
// Err returns the error encountered while scanning.
|
||||
func (r *Row) Err() error {
|
||||
return r.err
|
||||
}
|
||||
|
||||
// DB is a wrapper around sql.DB which keeps track of the driverName upon Open,
|
||||
// used mostly to automatically bind named queries using the right bindvars.
|
||||
type DB struct {
|
||||
*sql.DB
|
||||
driverName string
|
||||
unsafe bool
|
||||
Mapper *reflectx.Mapper
|
||||
}
|
||||
|
||||
// NewDb returns a new sqlx DB wrapper for a pre-existing *sql.DB. The
|
||||
// driverName of the original database is required for named query support.
|
||||
func NewDb(db *sql.DB, driverName string) *DB {
|
||||
return &DB{DB: db, driverName: driverName, Mapper: mapper()}
|
||||
}
|
||||
|
||||
// DriverName returns the driverName passed to the Open function for this DB.
|
||||
func (db *DB) DriverName() string {
|
||||
return db.driverName
|
||||
}
|
||||
|
||||
// Open is the same as sql.Open, but returns an *sqlx.DB instead.
|
||||
func Open(driverName, dataSourceName string) (*DB, error) {
|
||||
db, err := sql.Open(driverName, dataSourceName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &DB{DB: db, driverName: driverName, Mapper: mapper()}, err
|
||||
}
|
||||
|
||||
// MustOpen is the same as sql.Open, but returns an *sqlx.DB instead and panics on error.
|
||||
func MustOpen(driverName, dataSourceName string) *DB {
|
||||
db, err := Open(driverName, dataSourceName)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return db
|
||||
}
|
||||
|
||||
// MapperFunc sets a new mapper for this db using the default sqlx struct tag
|
||||
// and the provided mapper function.
|
||||
func (db *DB) MapperFunc(mf func(string) string) {
|
||||
db.Mapper = reflectx.NewMapperFunc("db", mf)
|
||||
}
|
||||
|
||||
// Rebind transforms a query from QUESTION to the DB driver's bindvar type.
|
||||
func (db *DB) Rebind(query string) string {
|
||||
return Rebind(BindType(db.driverName), query)
|
||||
}
|
||||
|
||||
// Unsafe returns a version of DB which will silently succeed to scan when
|
||||
// columns in the SQL result have no fields in the destination struct.
|
||||
// sqlx.Stmt and sqlx.Tx which are created from this DB will inherit its
|
||||
// safety behavior.
|
||||
func (db *DB) Unsafe() *DB {
|
||||
return &DB{DB: db.DB, driverName: db.driverName, unsafe: true, Mapper: db.Mapper}
|
||||
}
|
||||
|
||||
// BindNamed binds a query using the DB driver's bindvar type.
|
||||
func (db *DB) BindNamed(query string, arg interface{}) (string, []interface{}, error) {
|
||||
return BindNamed(BindType(db.driverName), query, arg)
|
||||
}
|
||||
|
||||
// NamedQuery using this DB.
|
||||
func (db *DB) NamedQuery(query string, arg interface{}) (*Rows, error) {
|
||||
return NamedQuery(db, query, arg)
|
||||
}
|
||||
|
||||
// NamedExec using this DB.
|
||||
func (db *DB) NamedExec(query string, arg interface{}) (sql.Result, error) {
|
||||
return NamedExec(db, query, arg)
|
||||
}
|
||||
|
||||
// Select using this DB.
|
||||
func (db *DB) Select(dest interface{}, query string, args ...interface{}) error {
|
||||
return Select(db, dest, query, args...)
|
||||
}
|
||||
|
||||
// Get using this DB.
|
||||
func (db *DB) Get(dest interface{}, query string, args ...interface{}) error {
|
||||
return Get(db, dest, query, args...)
|
||||
}
|
||||
|
||||
// MustBegin starts a transaction, and panics on error. Returns an *sqlx.Tx instead
|
||||
// of an *sql.Tx.
|
||||
func (db *DB) MustBegin() *Tx {
|
||||
tx, err := db.Beginx()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return tx
|
||||
}
|
||||
|
||||
// Beginx begins a transaction and returns an *sqlx.Tx instead of an *sql.Tx.
|
||||
func (db *DB) Beginx() (*Tx, error) {
|
||||
tx, err := db.DB.Begin()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Tx{Tx: tx, driverName: db.driverName, unsafe: db.unsafe, Mapper: db.Mapper}, err
|
||||
}
|
||||
|
||||
// Queryx queries the database and returns an *sqlx.Rows.
|
||||
func (db *DB) Queryx(query string, args ...interface{}) (*Rows, error) {
|
||||
r, err := db.DB.Query(query, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Rows{Rows: r, unsafe: db.unsafe, Mapper: db.Mapper}, err
|
||||
}
|
||||
|
||||
// QueryRowx queries the database and returns an *sqlx.Row.
|
||||
func (db *DB) QueryRowx(query string, args ...interface{}) *Row {
|
||||
rows, err := db.DB.Query(query, args...)
|
||||
return &Row{rows: rows, err: err, unsafe: db.unsafe, Mapper: db.Mapper}
|
||||
}
|
||||
|
||||
// MustExec (panic) runs MustExec using this database.
|
||||
func (db *DB) MustExec(query string, args ...interface{}) sql.Result {
|
||||
return MustExec(db, query, args...)
|
||||
}
|
||||
|
||||
// Preparex returns an sqlx.Stmt instead of a sql.Stmt
|
||||
func (db *DB) Preparex(query string) (*Stmt, error) {
|
||||
return Preparex(db, query)
|
||||
}
|
||||
|
||||
// PrepareNamed returns an sqlx.NamedStmt
|
||||
func (db *DB) PrepareNamed(query string) (*NamedStmt, error) {
|
||||
return prepareNamed(db, query)
|
||||
}
|
||||
|
||||
// Tx is an sqlx wrapper around sql.Tx with extra functionality
|
||||
type Tx struct {
|
||||
*sql.Tx
|
||||
driverName string
|
||||
unsafe bool
|
||||
Mapper *reflectx.Mapper
|
||||
}
|
||||
|
||||
// DriverName returns the driverName used by the DB which began this transaction.
|
||||
func (tx *Tx) DriverName() string {
|
||||
return tx.driverName
|
||||
}
|
||||
|
||||
// Rebind a query within a transaction's bindvar type.
|
||||
func (tx *Tx) Rebind(query string) string {
|
||||
return Rebind(BindType(tx.driverName), query)
|
||||
}
|
||||
|
||||
// Unsafe returns a version of Tx which will silently succeed to scan when
|
||||
// columns in the SQL result have no fields in the destination struct.
|
||||
func (tx *Tx) Unsafe() *Tx {
|
||||
return &Tx{Tx: tx.Tx, driverName: tx.driverName, unsafe: true, Mapper: tx.Mapper}
|
||||
}
|
||||
|
||||
// BindNamed binds a query within a transaction's bindvar type.
|
||||
func (tx *Tx) BindNamed(query string, arg interface{}) (string, []interface{}, error) {
|
||||
return BindNamed(BindType(tx.driverName), query, arg)
|
||||
}
|
||||
|
||||
// NamedQuery within a transaction.
|
||||
func (tx *Tx) NamedQuery(query string, arg interface{}) (*Rows, error) {
|
||||
return NamedQuery(tx, query, arg)
|
||||
}
|
||||
|
||||
// NamedExec a named query within a transaction.
|
||||
func (tx *Tx) NamedExec(query string, arg interface{}) (sql.Result, error) {
|
||||
return NamedExec(tx, query, arg)
|
||||
}
|
||||
|
||||
// Select within a transaction.
|
||||
func (tx *Tx) Select(dest interface{}, query string, args ...interface{}) error {
|
||||
return Select(tx, dest, query, args...)
|
||||
}
|
||||
|
||||
// Queryx within a transaction.
|
||||
func (tx *Tx) Queryx(query string, args ...interface{}) (*Rows, error) {
|
||||
r, err := tx.Tx.Query(query, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Rows{Rows: r, unsafe: tx.unsafe, Mapper: tx.Mapper}, err
|
||||
}
|
||||
|
||||
// QueryRowx within a transaction.
|
||||
func (tx *Tx) QueryRowx(query string, args ...interface{}) *Row {
|
||||
rows, err := tx.Tx.Query(query, args...)
|
||||
return &Row{rows: rows, err: err, unsafe: tx.unsafe, Mapper: tx.Mapper}
|
||||
}
|
||||
|
||||
// Get within a transaction.
|
||||
func (tx *Tx) Get(dest interface{}, query string, args ...interface{}) error {
|
||||
return Get(tx, dest, query, args...)
|
||||
}
|
||||
|
||||
// MustExec runs MustExec within a transaction.
|
||||
func (tx *Tx) MustExec(query string, args ...interface{}) sql.Result {
|
||||
return MustExec(tx, query, args...)
|
||||
}
|
||||
|
||||
// Preparex a statement within a transaction.
|
||||
func (tx *Tx) Preparex(query string) (*Stmt, error) {
|
||||
return Preparex(tx, query)
|
||||
}
|
||||
|
||||
// Stmtx returns a version of the prepared statement which runs within a transaction. Provided
|
||||
// stmt can be either *sql.Stmt or *sqlx.Stmt.
|
||||
func (tx *Tx) Stmtx(stmt interface{}) *Stmt {
|
||||
var st sql.Stmt
|
||||
var s *sql.Stmt
|
||||
switch stmt.(type) {
|
||||
case sql.Stmt:
|
||||
st = stmt.(sql.Stmt)
|
||||
s = &st
|
||||
case Stmt:
|
||||
s = stmt.(Stmt).Stmt
|
||||
case *Stmt:
|
||||
s = stmt.(*Stmt).Stmt
|
||||
case *sql.Stmt:
|
||||
s = stmt.(*sql.Stmt)
|
||||
}
|
||||
return &Stmt{Stmt: tx.Stmt(s), Mapper: tx.Mapper}
|
||||
}
|
||||
|
||||
// NamedStmt returns a version of the prepared statement which runs within a transaction.
|
||||
func (tx *Tx) NamedStmt(stmt *NamedStmt) *NamedStmt {
|
||||
return &NamedStmt{
|
||||
QueryString: stmt.QueryString,
|
||||
Params: stmt.Params,
|
||||
Stmt: tx.Stmtx(stmt.Stmt),
|
||||
}
|
||||
}
|
||||
|
||||
// PrepareNamed returns an sqlx.NamedStmt
|
||||
func (tx *Tx) PrepareNamed(query string) (*NamedStmt, error) {
|
||||
return prepareNamed(tx, query)
|
||||
}
|
||||
|
||||
// Stmt is an sqlx wrapper around sql.Stmt with extra functionality
|
||||
type Stmt struct {
|
||||
*sql.Stmt
|
||||
unsafe bool
|
||||
Mapper *reflectx.Mapper
|
||||
}
|
||||
|
||||
// Unsafe returns a version of Stmt which will silently succeed to scan when
|
||||
// columns in the SQL result have no fields in the destination struct.
|
||||
func (s *Stmt) Unsafe() *Stmt {
|
||||
return &Stmt{Stmt: s.Stmt, unsafe: true, Mapper: s.Mapper}
|
||||
}
|
||||
|
||||
// Select using the prepared statement.
|
||||
func (s *Stmt) Select(dest interface{}, args ...interface{}) error {
|
||||
return Select(&qStmt{*s}, dest, "", args...)
|
||||
}
|
||||
|
||||
// Get using the prepared statement.
|
||||
func (s *Stmt) Get(dest interface{}, args ...interface{}) error {
|
||||
return Get(&qStmt{*s}, dest, "", args...)
|
||||
}
|
||||
|
||||
// MustExec (panic) using this statement. Note that the query portion of the error
|
||||
// output will be blank, as Stmt does not expose its query.
|
||||
func (s *Stmt) MustExec(args ...interface{}) sql.Result {
|
||||
return MustExec(&qStmt{*s}, "", args...)
|
||||
}
|
||||
|
||||
// QueryRowx using this statement.
|
||||
func (s *Stmt) QueryRowx(args ...interface{}) *Row {
|
||||
qs := &qStmt{*s}
|
||||
return qs.QueryRowx("", args...)
|
||||
}
|
||||
|
||||
// Queryx using this statement.
|
||||
func (s *Stmt) Queryx(args ...interface{}) (*Rows, error) {
|
||||
qs := &qStmt{*s}
|
||||
return qs.Queryx("", args...)
|
||||
}
|
||||
|
||||
// qStmt is an unexposed wrapper which lets you use a Stmt as a Queryer & Execer by
|
||||
// implementing those interfaces and ignoring the `query` argument.
|
||||
type qStmt struct{ Stmt }
|
||||
|
||||
func (q *qStmt) Query(query string, args ...interface{}) (*sql.Rows, error) {
|
||||
return q.Stmt.Query(args...)
|
||||
}
|
||||
|
||||
func (q *qStmt) Queryx(query string, args ...interface{}) (*Rows, error) {
|
||||
r, err := q.Stmt.Query(args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Rows{Rows: r, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}, err
|
||||
}
|
||||
|
||||
func (q *qStmt) QueryRowx(query string, args ...interface{}) *Row {
|
||||
rows, err := q.Stmt.Query(args...)
|
||||
return &Row{rows: rows, err: err, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}
|
||||
}
|
||||
|
||||
func (q *qStmt) Exec(query string, args ...interface{}) (sql.Result, error) {
|
||||
return q.Stmt.Exec(args...)
|
||||
}
|
||||
|
||||
// Rows is a wrapper around sql.Rows which caches costly reflect operations
|
||||
// during a looped StructScan
|
||||
type Rows struct {
|
||||
*sql.Rows
|
||||
unsafe bool
|
||||
Mapper *reflectx.Mapper
|
||||
// these fields cache memory use for a rows during iteration w/ structScan
|
||||
started bool
|
||||
fields [][]int
|
||||
values []interface{}
|
||||
}
|
||||
|
||||
// SliceScan using this Rows.
|
||||
func (r *Rows) SliceScan() ([]interface{}, error) {
|
||||
return SliceScan(r)
|
||||
}
|
||||
|
||||
// MapScan using this Rows.
|
||||
func (r *Rows) MapScan(dest map[string]interface{}) error {
|
||||
return MapScan(r, dest)
|
||||
}
|
||||
|
||||
// StructScan is like sql.Rows.Scan, but scans a single Row into a single Struct.
|
||||
// Use this and iterate over Rows manually when the memory load of Select() might be
|
||||
// prohibitive. *Rows.StructScan caches the reflect work of matching up column
|
||||
// positions to fields to avoid that overhead per scan, which means it is not safe
|
||||
// to run StructScan on the same Rows instance with different struct types.
|
||||
func (r *Rows) StructScan(dest interface{}) error {
|
||||
v := reflect.ValueOf(dest)
|
||||
|
||||
if v.Kind() != reflect.Ptr {
|
||||
return errors.New("must pass a pointer, not a value, to StructScan destination")
|
||||
}
|
||||
|
||||
v = reflect.Indirect(v)
|
||||
|
||||
if !r.started {
|
||||
columns, err := r.Columns()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m := r.Mapper
|
||||
|
||||
r.fields = m.TraversalsByName(v.Type(), columns)
|
||||
// if we are not unsafe and are missing fields, return an error
|
||||
if f, err := missingFields(r.fields); err != nil && !r.unsafe {
|
||||
return fmt.Errorf("missing destination name %s", columns[f])
|
||||
}
|
||||
r.values = make([]interface{}, len(columns))
|
||||
r.started = true
|
||||
}
|
||||
|
||||
err := fieldsByTraversal(v, r.fields, r.values, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// scan into the struct field pointers and append to our results
|
||||
err = r.Scan(r.values...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return r.Err()
|
||||
}
|
||||
|
||||
// Connect to a database and verify with a ping.
|
||||
func Connect(driverName, dataSourceName string) (*DB, error) {
|
||||
db, err := Open(driverName, dataSourceName)
|
||||
if err != nil {
|
||||
return db, err
|
||||
}
|
||||
err = db.Ping()
|
||||
return db, err
|
||||
}
|
||||
|
||||
// MustConnect connects to a database and panics on error.
|
||||
func MustConnect(driverName, dataSourceName string) *DB {
|
||||
db, err := Connect(driverName, dataSourceName)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return db
|
||||
}
|
||||
|
||||
// Preparex prepares a statement.
|
||||
func Preparex(p Preparer, query string) (*Stmt, error) {
|
||||
s, err := p.Prepare(query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Stmt{Stmt: s, unsafe: isUnsafe(p), Mapper: mapperFor(p)}, err
|
||||
}
|
||||
|
||||
// Select executes a query using the provided Queryer, and StructScans each row
|
||||
// into dest, which must be a slice. If the slice elements are scannable, then
|
||||
// the result set must have only one column. Otherwise, StructScan is used.
|
||||
// The *sql.Rows are closed automatically.
|
||||
func Select(q Queryer, dest interface{}, query string, args ...interface{}) error {
|
||||
rows, err := q.Queryx(query, args...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// if something happens here, we want to make sure the rows are Closed
|
||||
defer rows.Close()
|
||||
return scanAll(rows, dest, false)
|
||||
}
|
||||
|
||||
// Get does a QueryRow using the provided Queryer, and scans the resulting row
|
||||
// to dest. If dest is scannable, the result must only have one column. Otherwise,
|
||||
// StructScan is used. Get will return sql.ErrNoRows like row.Scan would.
|
||||
func Get(q Queryer, dest interface{}, query string, args ...interface{}) error {
|
||||
r := q.QueryRowx(query, args...)
|
||||
return r.scanAny(dest, false)
|
||||
}
|
||||
|
||||
// LoadFile exec's every statement in a file (as a single call to Exec).
|
||||
// LoadFile may return a nil *sql.Result if errors are encountered locating or
|
||||
// reading the file at path. LoadFile reads the entire file into memory, so it
|
||||
// is not suitable for loading large data dumps, but can be useful for initializing
|
||||
// schemas or loading indexes.
|
||||
//
|
||||
// FIXME: this does not really work with multi-statement files for mattn/go-sqlite3
|
||||
// or the go-mysql-driver/mysql drivers; pq seems to be an exception here. Detecting
|
||||
// this by requiring something with DriverName() and then attempting to split the
|
||||
// queries will be difficult to get right, and its current driver-specific behavior
|
||||
// is deemed at least not complex in its incorrectness.
|
||||
func LoadFile(e Execer, path string) (*sql.Result, error) {
|
||||
realpath, err := filepath.Abs(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
contents, err := ioutil.ReadFile(realpath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res, err := e.Exec(string(contents))
|
||||
return &res, err
|
||||
}
|
||||
|
||||
// MustExec execs the query using e and panics if there was an error.
|
||||
func MustExec(e Execer, query string, args ...interface{}) sql.Result {
|
||||
res, err := e.Exec(query, args...)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// SliceScan using this Rows.
|
||||
func (r *Row) SliceScan() ([]interface{}, error) {
|
||||
return SliceScan(r)
|
||||
}
|
||||
|
||||
// MapScan using this Rows.
|
||||
func (r *Row) MapScan(dest map[string]interface{}) error {
|
||||
return MapScan(r, dest)
|
||||
}
|
||||
|
||||
func (r *Row) scanAny(dest interface{}, structOnly bool) error {
|
||||
if r.err != nil {
|
||||
return r.err
|
||||
}
|
||||
defer r.rows.Close()
|
||||
|
||||
v := reflect.ValueOf(dest)
|
||||
if v.Kind() != reflect.Ptr {
|
||||
return errors.New("must pass a pointer, not a value, to StructScan destination")
|
||||
}
|
||||
if v.IsNil() {
|
||||
return errors.New("nil pointer passed to StructScan destination")
|
||||
}
|
||||
|
||||
base := reflectx.Deref(v.Type())
|
||||
scannable := isScannable(base)
|
||||
|
||||
if structOnly && scannable {
|
||||
return structOnlyError(base)
|
||||
}
|
||||
|
||||
columns, err := r.Columns()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if scannable && len(columns) > 1 {
|
||||
return fmt.Errorf("scannable dest type %s with >1 columns (%d) in result", base.Kind(), len(columns))
|
||||
}
|
||||
|
||||
if scannable {
|
||||
return r.Scan(dest)
|
||||
}
|
||||
|
||||
m := r.Mapper
|
||||
|
||||
fields := m.TraversalsByName(v.Type(), columns)
|
||||
// if we are not unsafe and are missing fields, return an error
|
||||
if f, err := missingFields(fields); err != nil && !r.unsafe {
|
||||
return fmt.Errorf("missing destination name %s", columns[f])
|
||||
}
|
||||
values := make([]interface{}, len(columns))
|
||||
|
||||
err = fieldsByTraversal(v, fields, values, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// scan into the struct field pointers and append to our results
|
||||
return r.Scan(values...)
|
||||
}
|
||||
|
||||
// StructScan a single Row into dest.
|
||||
func (r *Row) StructScan(dest interface{}) error {
|
||||
return r.scanAny(dest, true)
|
||||
}
|
||||
|
||||
// SliceScan a row, returning a []interface{} with values similar to MapScan.
|
||||
// This function is primarly intended for use where the number of columns
|
||||
// is not known. Because you can pass an []interface{} directly to Scan,
|
||||
// it's recommended that you do that as it will not have to allocate new
|
||||
// slices per row.
|
||||
func SliceScan(r ColScanner) ([]interface{}, error) {
|
||||
// ignore r.started, since we needn't use reflect for anything.
|
||||
columns, err := r.Columns()
|
||||
if err != nil {
|
||||
return []interface{}{}, err
|
||||
}
|
||||
|
||||
values := make([]interface{}, len(columns))
|
||||
for i := range values {
|
||||
values[i] = new(interface{})
|
||||
}
|
||||
|
||||
err = r.Scan(values...)
|
||||
|
||||
if err != nil {
|
||||
return values, err
|
||||
}
|
||||
|
||||
for i := range columns {
|
||||
values[i] = *(values[i].(*interface{}))
|
||||
}
|
||||
|
||||
return values, r.Err()
|
||||
}
|
||||
|
||||
// MapScan scans a single Row into the dest map[string]interface{}.
|
||||
// Use this to get results for SQL that might not be under your control
|
||||
// (for instance, if you're building an interface for an SQL server that
|
||||
// executes SQL from input). Please do not use this as a primary interface!
|
||||
// This will modify the map sent to it in place, so reuse the same map with
|
||||
// care. Columns which occur more than once in the result will overwrite
|
||||
// eachother!
|
||||
func MapScan(r ColScanner, dest map[string]interface{}) error {
|
||||
// ignore r.started, since we needn't use reflect for anything.
|
||||
columns, err := r.Columns()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
values := make([]interface{}, len(columns))
|
||||
for i := range values {
|
||||
values[i] = new(interface{})
|
||||
}
|
||||
|
||||
err = r.Scan(values...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i, column := range columns {
|
||||
dest[column] = *(values[i].(*interface{}))
|
||||
}
|
||||
|
||||
return r.Err()
|
||||
}
|
||||
|
||||
type rowsi interface {
|
||||
Close() error
|
||||
Columns() ([]string, error)
|
||||
Err() error
|
||||
Next() bool
|
||||
Scan(...interface{}) error
|
||||
}
|
||||
|
||||
// structOnlyError returns an error appropriate for type when a non-scannable
|
||||
// struct is expected but something else is given
|
||||
func structOnlyError(t reflect.Type) error {
|
||||
isStruct := t.Kind() == reflect.Struct
|
||||
isScanner := reflect.PtrTo(t).Implements(_scannerInterface)
|
||||
if !isStruct {
|
||||
return fmt.Errorf("expected %s but got %s", reflect.Struct, t.Kind())
|
||||
}
|
||||
if isScanner {
|
||||
return fmt.Errorf("structscan expects a struct dest but the provided struct type %s implements scanner", t.Name())
|
||||
}
|
||||
return fmt.Errorf("expected a struct, but struct %s has no exported fields", t.Name())
|
||||
}
|
||||
|
||||
// scanAll scans all rows into a destination, which must be a slice of any
|
||||
// type. If the destination slice type is a Struct, then StructScan will be
|
||||
// used on each row. If the destination is some other kind of base type, then
|
||||
// each row must only have one column which can scan into that type. This
|
||||
// allows you to do something like:
|
||||
//
|
||||
// rows, _ := db.Query("select id from people;")
|
||||
// var ids []int
|
||||
// scanAll(rows, &ids, false)
|
||||
//
|
||||
// and ids will be a list of the id results. I realize that this is a desirable
|
||||
// interface to expose to users, but for now it will only be exposed via changes
|
||||
// to `Get` and `Select`. The reason that this has been implemented like this is
|
||||
// this is the only way to not duplicate reflect work in the new API while
|
||||
// maintaining backwards compatibility.
|
||||
func scanAll(rows rowsi, dest interface{}, structOnly bool) error {
|
||||
var v, vp reflect.Value
|
||||
|
||||
value := reflect.ValueOf(dest)
|
||||
|
||||
// json.Unmarshal returns errors for these
|
||||
if value.Kind() != reflect.Ptr {
|
||||
return errors.New("must pass a pointer, not a value, to StructScan destination")
|
||||
}
|
||||
if value.IsNil() {
|
||||
return errors.New("nil pointer passed to StructScan destination")
|
||||
}
|
||||
direct := reflect.Indirect(value)
|
||||
|
||||
slice, err := baseType(value.Type(), reflect.Slice)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
isPtr := slice.Elem().Kind() == reflect.Ptr
|
||||
base := reflectx.Deref(slice.Elem())
|
||||
scannable := isScannable(base)
|
||||
|
||||
if structOnly && scannable {
|
||||
return structOnlyError(base)
|
||||
}
|
||||
|
||||
columns, err := rows.Columns()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// if it's a base type make sure it only has 1 column; if not return an error
|
||||
if scannable && len(columns) > 1 {
|
||||
return fmt.Errorf("non-struct dest type %s with >1 columns (%d)", base.Kind(), len(columns))
|
||||
}
|
||||
|
||||
if !scannable {
|
||||
var values []interface{}
|
||||
var m *reflectx.Mapper
|
||||
|
||||
switch rows.(type) {
|
||||
case *Rows:
|
||||
m = rows.(*Rows).Mapper
|
||||
default:
|
||||
m = mapper()
|
||||
}
|
||||
|
||||
fields := m.TraversalsByName(base, columns)
|
||||
// if we are not unsafe and are missing fields, return an error
|
||||
if f, err := missingFields(fields); err != nil && !isUnsafe(rows) {
|
||||
return fmt.Errorf("missing destination name %s", columns[f])
|
||||
}
|
||||
values = make([]interface{}, len(columns))
|
||||
|
||||
for rows.Next() {
|
||||
// create a new struct type (which returns PtrTo) and indirect it
|
||||
vp = reflect.New(base)
|
||||
v = reflect.Indirect(vp)
|
||||
|
||||
err = fieldsByTraversal(v, fields, values, true)
|
||||
|
||||
// scan into the struct field pointers and append to our results
|
||||
err = rows.Scan(values...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if isPtr {
|
||||
direct.Set(reflect.Append(direct, vp))
|
||||
} else {
|
||||
direct.Set(reflect.Append(direct, v))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for rows.Next() {
|
||||
vp = reflect.New(base)
|
||||
err = rows.Scan(vp.Interface())
|
||||
// append
|
||||
if isPtr {
|
||||
direct.Set(reflect.Append(direct, vp))
|
||||
} else {
|
||||
direct.Set(reflect.Append(direct, reflect.Indirect(vp)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return rows.Err()
|
||||
}
|
||||
|
||||
// FIXME: StructScan was the very first bit of API in sqlx, and now unfortunately
|
||||
// it doesn't really feel like it's named properly. There is an incongruency
|
||||
// between this and the way that StructScan (which might better be ScanStruct
|
||||
// anyway) works on a rows object.
|
||||
|
||||
// StructScan all rows from an sql.Rows or an sqlx.Rows into the dest slice.
|
||||
// StructScan will scan in the entire rows result, so if you need do not want to
|
||||
// allocate structs for the entire result, use Queryx and see sqlx.Rows.StructScan.
|
||||
// If rows is sqlx.Rows, it will use its mapper, otherwise it will use the default.
|
||||
func StructScan(rows rowsi, dest interface{}) error {
|
||||
return scanAll(rows, dest, true)
|
||||
|
||||
}
|
||||
|
||||
// reflect helpers
|
||||
|
||||
func baseType(t reflect.Type, expected reflect.Kind) (reflect.Type, error) {
|
||||
t = reflectx.Deref(t)
|
||||
if t.Kind() != expected {
|
||||
return nil, fmt.Errorf("expected %s but got %s", expected, t.Kind())
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// fieldsByName fills a values interface with fields from the passed value based
|
||||
// on the traversals in int. If ptrs is true, return addresses instead of values.
|
||||
// We write this instead of using FieldsByName to save allocations and map lookups
|
||||
// when iterating over many rows. Empty traversals will get an interface pointer.
|
||||
// Because of the necessity of requesting ptrs or values, it's considered a bit too
|
||||
// specialized for inclusion in reflectx itself.
|
||||
func fieldsByTraversal(v reflect.Value, traversals [][]int, values []interface{}, ptrs bool) error {
|
||||
v = reflect.Indirect(v)
|
||||
if v.Kind() != reflect.Struct {
|
||||
return errors.New("argument not a struct")
|
||||
}
|
||||
|
||||
for i, traversal := range traversals {
|
||||
if len(traversal) == 0 {
|
||||
values[i] = new(interface{})
|
||||
continue
|
||||
}
|
||||
f := reflectx.FieldByIndexes(v, traversal)
|
||||
if ptrs {
|
||||
values[i] = f.Addr().Interface()
|
||||
} else {
|
||||
values[i] = f.Interface()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func missingFields(transversals [][]int) (field int, err error) {
|
||||
for i, t := range transversals {
|
||||
if len(t) == 0 {
|
||||
return i, errors.New("missing field")
|
||||
}
|
||||
}
|
||||
return 0, nil
|
||||
}
|
1311
vendor/github.com/jmoiron/sqlx/sqlx_test.go
generated
vendored
1311
vendor/github.com/jmoiron/sqlx/sqlx_test.go
generated
vendored
File diff suppressed because it is too large
Load Diff
5
vendor/github.com/jmoiron/sqlx/types/README.md
generated
vendored
5
vendor/github.com/jmoiron/sqlx/types/README.md
generated
vendored
@ -1,5 +0,0 @@
|
||||
# types
|
||||
|
||||
The types package provides some useful types which implement the `sql.Scanner`
|
||||
and `driver.Valuer` interfaces, suitable for use as scan and value targets with
|
||||
database/sql.
|
95
vendor/github.com/jmoiron/sqlx/types/types.go
generated
vendored
95
vendor/github.com/jmoiron/sqlx/types/types.go
generated
vendored
@ -1,95 +0,0 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"database/sql/driver"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
|
||||
"io/ioutil"
|
||||
)
|
||||
|
||||
type GzippedText []byte
|
||||
|
||||
func (g GzippedText) Value() (driver.Value, error) {
|
||||
b := make([]byte, 0, len(g))
|
||||
buf := bytes.NewBuffer(b)
|
||||
w := gzip.NewWriter(buf)
|
||||
w.Write(g)
|
||||
w.Close()
|
||||
return buf.Bytes(), nil
|
||||
|
||||
}
|
||||
|
||||
func (g *GzippedText) Scan(src interface{}) error {
|
||||
var source []byte
|
||||
switch src.(type) {
|
||||
case string:
|
||||
source = []byte(src.(string))
|
||||
case []byte:
|
||||
source = src.([]byte)
|
||||
default:
|
||||
return errors.New("Incompatible type for GzippedText")
|
||||
}
|
||||
reader, err := gzip.NewReader(bytes.NewReader(source))
|
||||
defer reader.Close()
|
||||
b, err := ioutil.ReadAll(reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*g = GzippedText(b)
|
||||
return nil
|
||||
}
|
||||
|
||||
// JsonText is a json.RawMessage, which is a []byte underneath.
|
||||
// Value() validates the json format in the source, and returns an error if
|
||||
// the json is not valid. Scan does no validation. JsonText additionally
|
||||
// implements `Unmarshal`, which unmarshals the json within to an interface{}
|
||||
type JsonText json.RawMessage
|
||||
|
||||
// Returns the *j as the JSON encoding of j.
|
||||
func (j *JsonText) MarshalJSON() ([]byte, error) {
|
||||
return *j, nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON sets *j to a copy of data
|
||||
func (j *JsonText) UnmarshalJSON(data []byte) error {
|
||||
if j == nil {
|
||||
return errors.New("JsonText: UnmarshalJSON on nil pointer")
|
||||
}
|
||||
*j = append((*j)[0:0], data...)
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
// Value returns j as a value. This does a validating unmarshal into another
|
||||
// RawMessage. If j is invalid json, it returns an error.
|
||||
func (j JsonText) Value() (driver.Value, error) {
|
||||
var m json.RawMessage
|
||||
var err = j.Unmarshal(&m)
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
return []byte(j), nil
|
||||
}
|
||||
|
||||
// Scan stores the src in *j. No validation is done.
|
||||
func (j *JsonText) Scan(src interface{}) error {
|
||||
var source []byte
|
||||
switch src.(type) {
|
||||
case string:
|
||||
source = []byte(src.(string))
|
||||
case []byte:
|
||||
source = src.([]byte)
|
||||
default:
|
||||
return errors.New("Incompatible type for JsonText")
|
||||
}
|
||||
*j = JsonText(append((*j)[0:0], source...))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unmarshal unmarshal's the json in j to v, as in json.Unmarshal.
|
||||
func (j *JsonText) Unmarshal(v interface{}) error {
|
||||
return json.Unmarshal([]byte(*j), v)
|
||||
}
|
42
vendor/github.com/jmoiron/sqlx/types/types_test.go
generated
vendored
42
vendor/github.com/jmoiron/sqlx/types/types_test.go
generated
vendored
@ -1,42 +0,0 @@
|
||||
package types
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestGzipText(t *testing.T) {
|
||||
g := GzippedText("Hello, world")
|
||||
v, err := g.Value()
|
||||
if err != nil {
|
||||
t.Errorf("Was not expecting an error")
|
||||
}
|
||||
err = (&g).Scan(v)
|
||||
if err != nil {
|
||||
t.Errorf("Was not expecting an error")
|
||||
}
|
||||
if string(g) != "Hello, world" {
|
||||
t.Errorf("Was expecting the string we sent in (Hello World), got %s", string(g))
|
||||
}
|
||||
}
|
||||
|
||||
func TestJsonText(t *testing.T) {
|
||||
j := JsonText(`{"foo": 1, "bar": 2}`)
|
||||
v, err := j.Value()
|
||||
if err != nil {
|
||||
t.Errorf("Was not expecting an error")
|
||||
}
|
||||
err = (&j).Scan(v)
|
||||
if err != nil {
|
||||
t.Errorf("Was not expecting an error")
|
||||
}
|
||||
m := map[string]interface{}{}
|
||||
j.Unmarshal(&m)
|
||||
|
||||
if m["foo"].(float64) != 1 || m["bar"].(float64) != 2 {
|
||||
t.Errorf("Expected valid json but got some garbage instead? %#v", m)
|
||||
}
|
||||
|
||||
j = JsonText(`{"foo": 1, invalid, false}`)
|
||||
v, err = j.Value()
|
||||
if err == nil {
|
||||
t.Errorf("Was expecting invalid json to fail!")
|
||||
}
|
||||
}
|
191
vendor/github.com/juju/errors/LICENSE
generated
vendored
191
vendor/github.com/juju/errors/LICENSE
generated
vendored
@ -1,191 +0,0 @@
|
||||
All files in this repository are licensed as follows. If you contribute
|
||||
to this repository, it is assumed that you license your contribution
|
||||
under the same license unless you state otherwise.
|
||||
|
||||
All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file.
|
||||
|
||||
This software is licensed under the LGPLv3, included below.
|
||||
|
||||
As a special exception to the GNU Lesser General Public License version 3
|
||||
("LGPL3"), the copyright holders of this Library give you permission to
|
||||
convey to a third party a Combined Work that links statically or dynamically
|
||||
to this Library without providing any Minimal Corresponding Source or
|
||||
Minimal Application Code as set out in 4d or providing the installation
|
||||
information set out in section 4e, provided that you comply with the other
|
||||
provisions of LGPL3 and provided that you meet, for the Application the
|
||||
terms and conditions of the license(s) which apply to the Application.
|
||||
|
||||
Except as stated in this special exception, the provisions of LGPL3 will
|
||||
continue to comply in full to this Library. If you modify this Library, you
|
||||
may apply this exception to your version of this Library, but you are not
|
||||
obliged to do so. If you do not wish to do so, delete this exception
|
||||
statement from your version. This exception does not (and cannot) modify any
|
||||
license terms which apply to the Application, with which you must still
|
||||
comply.
|
||||
|
||||
|
||||
GNU LESSER GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
|
||||
This version of the GNU Lesser General Public License incorporates
|
||||
the terms and conditions of version 3 of the GNU General Public
|
||||
License, supplemented by the additional permissions listed below.
|
||||
|
||||
0. Additional Definitions.
|
||||
|
||||
As used herein, "this License" refers to version 3 of the GNU Lesser
|
||||
General Public License, and the "GNU GPL" refers to version 3 of the GNU
|
||||
General Public License.
|
||||
|
||||
"The Library" refers to a covered work governed by this License,
|
||||
other than an Application or a Combined Work as defined below.
|
||||
|
||||
An "Application" is any work that makes use of an interface provided
|
||||
by the Library, but which is not otherwise based on the Library.
|
||||
Defining a subclass of a class defined by the Library is deemed a mode
|
||||
of using an interface provided by the Library.
|
||||
|
||||
A "Combined Work" is a work produced by combining or linking an
|
||||
Application with the Library. The particular version of the Library
|
||||
with which the Combined Work was made is also called the "Linked
|
||||
Version".
|
||||
|
||||
The "Minimal Corresponding Source" for a Combined Work means the
|
||||
Corresponding Source for the Combined Work, excluding any source code
|
||||
for portions of the Combined Work that, considered in isolation, are
|
||||
based on the Application, and not on the Linked Version.
|
||||
|
||||
The "Corresponding Application Code" for a Combined Work means the
|
||||
object code and/or source code for the Application, including any data
|
||||
and utility programs needed for reproducing the Combined Work from the
|
||||
Application, but excluding the System Libraries of the Combined Work.
|
||||
|
||||
1. Exception to Section 3 of the GNU GPL.
|
||||
|
||||
You may convey a covered work under sections 3 and 4 of this License
|
||||
without being bound by section 3 of the GNU GPL.
|
||||
|
||||
2. Conveying Modified Versions.
|
||||
|
||||
If you modify a copy of the Library, and, in your modifications, a
|
||||
facility refers to a function or data to be supplied by an Application
|
||||
that uses the facility (other than as an argument passed when the
|
||||
facility is invoked), then you may convey a copy of the modified
|
||||
version:
|
||||
|
||||
a) under this License, provided that you make a good faith effort to
|
||||
ensure that, in the event an Application does not supply the
|
||||
function or data, the facility still operates, and performs
|
||||
whatever part of its purpose remains meaningful, or
|
||||
|
||||
b) under the GNU GPL, with none of the additional permissions of
|
||||
this License applicable to that copy.
|
||||
|
||||
3. Object Code Incorporating Material from Library Header Files.
|
||||
|
||||
The object code form of an Application may incorporate material from
|
||||
a header file that is part of the Library. You may convey such object
|
||||
code under terms of your choice, provided that, if the incorporated
|
||||
material is not limited to numerical parameters, data structure
|
||||
layouts and accessors, or small macros, inline functions and templates
|
||||
(ten or fewer lines in length), you do both of the following:
|
||||
|
||||
a) Give prominent notice with each copy of the object code that the
|
||||
Library is used in it and that the Library and its use are
|
||||
covered by this License.
|
||||
|
||||
b) Accompany the object code with a copy of the GNU GPL and this license
|
||||
document.
|
||||
|
||||
4. Combined Works.
|
||||
|
||||
You may convey a Combined Work under terms of your choice that,
|
||||
taken together, effectively do not restrict modification of the
|
||||
portions of the Library contained in the Combined Work and reverse
|
||||
engineering for debugging such modifications, if you also do each of
|
||||
the following:
|
||||
|
||||
a) Give prominent notice with each copy of the Combined Work that
|
||||
the Library is used in it and that the Library and its use are
|
||||
covered by this License.
|
||||
|
||||
b) Accompany the Combined Work with a copy of the GNU GPL and this license
|
||||
document.
|
||||
|
||||
c) For a Combined Work that displays copyright notices during
|
||||
execution, include the copyright notice for the Library among
|
||||
these notices, as well as a reference directing the user to the
|
||||
copies of the GNU GPL and this license document.
|
||||
|
||||
d) Do one of the following:
|
||||
|
||||
0) Convey the Minimal Corresponding Source under the terms of this
|
||||
License, and the Corresponding Application Code in a form
|
||||
suitable for, and under terms that permit, the user to
|
||||
recombine or relink the Application with a modified version of
|
||||
the Linked Version to produce a modified Combined Work, in the
|
||||
manner specified by section 6 of the GNU GPL for conveying
|
||||
Corresponding Source.
|
||||
|
||||
1) Use a suitable shared library mechanism for linking with the
|
||||
Library. A suitable mechanism is one that (a) uses at run time
|
||||
a copy of the Library already present on the user's computer
|
||||
system, and (b) will operate properly with a modified version
|
||||
of the Library that is interface-compatible with the Linked
|
||||
Version.
|
||||
|
||||
e) Provide Installation Information, but only if you would otherwise
|
||||
be required to provide such information under section 6 of the
|
||||
GNU GPL, and only to the extent that such information is
|
||||
necessary to install and execute a modified version of the
|
||||
Combined Work produced by recombining or relinking the
|
||||
Application with a modified version of the Linked Version. (If
|
||||
you use option 4d0, the Installation Information must accompany
|
||||
the Minimal Corresponding Source and Corresponding Application
|
||||
Code. If you use option 4d1, you must provide the Installation
|
||||
Information in the manner specified by section 6 of the GNU GPL
|
||||
for conveying Corresponding Source.)
|
||||
|
||||
5. Combined Libraries.
|
||||
|
||||
You may place library facilities that are a work based on the
|
||||
Library side by side in a single library together with other library
|
||||
facilities that are not Applications and are not covered by this
|
||||
License, and convey such a combined library under terms of your
|
||||
choice, if you do both of the following:
|
||||
|
||||
a) Accompany the combined library with a copy of the same work based
|
||||
on the Library, uncombined with any other library facilities,
|
||||
conveyed under the terms of this License.
|
||||
|
||||
b) Give prominent notice with the combined library that part of it
|
||||
is a work based on the Library, and explaining where to find the
|
||||
accompanying uncombined form of the same work.
|
||||
|
||||
6. Revised Versions of the GNU Lesser General Public License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions
|
||||
of the GNU Lesser General Public License from time to time. Such new
|
||||
versions will be similar in spirit to the present version, but may
|
||||
differ in detail to address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Library as you received it specifies that a certain numbered version
|
||||
of the GNU Lesser General Public License "or any later version"
|
||||
applies to it, you have the option of following the terms and
|
||||
conditions either of that published version or of any later version
|
||||
published by the Free Software Foundation. If the Library as you
|
||||
received it does not specify a version number of the GNU Lesser
|
||||
General Public License, you may choose any version of the GNU Lesser
|
||||
General Public License ever published by the Free Software Foundation.
|
||||
|
||||
If the Library as you received it specifies that a proxy can decide
|
||||
whether future versions of the GNU Lesser General Public License shall
|
||||
apply, that proxy's public statement of acceptance of any version is
|
||||
permanent authorization for you to choose that version for the
|
||||
Library.
|
11
vendor/github.com/juju/errors/Makefile
generated
vendored
11
vendor/github.com/juju/errors/Makefile
generated
vendored
@ -1,11 +0,0 @@
|
||||
default: check
|
||||
|
||||
check:
|
||||
go test && go test -compiler gccgo
|
||||
|
||||
docs:
|
||||
godoc2md github.com/juju/errors > README.md
|
||||
sed -i 's|\[godoc-link-here\]|[![GoDoc](https://godoc.org/github.com/juju/errors?status.svg)](https://godoc.org/github.com/juju/errors)|' README.md
|
||||
|
||||
|
||||
.PHONY: default check docs
|
536
vendor/github.com/juju/errors/README.md
generated
vendored
536
vendor/github.com/juju/errors/README.md
generated
vendored
@ -1,536 +0,0 @@
|
||||
|
||||
# errors
|
||||
import "github.com/juju/errors"
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/juju/errors?status.svg)](https://godoc.org/github.com/juju/errors)
|
||||
|
||||
The juju/errors provides an easy way to annotate errors without losing the
|
||||
orginal error context.
|
||||
|
||||
The exported `New` and `Errorf` functions are designed to replace the
|
||||
`errors.New` and `fmt.Errorf` functions respectively. The same underlying
|
||||
error is there, but the package also records the location at which the error
|
||||
was created.
|
||||
|
||||
A primary use case for this library is to add extra context any time an
|
||||
error is returned from a function.
|
||||
|
||||
|
||||
if err := SomeFunc(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
This instead becomes:
|
||||
|
||||
|
||||
if err := SomeFunc(); err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
which just records the file and line number of the Trace call, or
|
||||
|
||||
|
||||
if err := SomeFunc(); err != nil {
|
||||
return errors.Annotate(err, "more context")
|
||||
}
|
||||
|
||||
which also adds an annotation to the error.
|
||||
|
||||
When you want to check to see if an error is of a particular type, a helper
|
||||
function is normally exported by the package that returned the error, like the
|
||||
`os` package does. The underlying cause of the error is available using the
|
||||
`Cause` function.
|
||||
|
||||
|
||||
os.IsNotExist(errors.Cause(err))
|
||||
|
||||
The result of the `Error()` call on an annotated error is the annotations joined
|
||||
with colons, then the result of the `Error()` method for the underlying error
|
||||
that was the cause.
|
||||
|
||||
|
||||
err := errors.Errorf("original")
|
||||
err = errors.Annotatef(err, "context")
|
||||
err = errors.Annotatef(err, "more context")
|
||||
err.Error() -> "more context: context: original"
|
||||
|
||||
Obviously recording the file, line and functions is not very useful if you
|
||||
cannot get them back out again.
|
||||
|
||||
|
||||
errors.ErrorStack(err)
|
||||
|
||||
will return something like:
|
||||
|
||||
|
||||
first error
|
||||
github.com/juju/errors/annotation_test.go:193:
|
||||
github.com/juju/errors/annotation_test.go:194: annotation
|
||||
github.com/juju/errors/annotation_test.go:195:
|
||||
github.com/juju/errors/annotation_test.go:196: more context
|
||||
github.com/juju/errors/annotation_test.go:197:
|
||||
|
||||
The first error was generated by an external system, so there was no location
|
||||
associated. The second, fourth, and last lines were generated with Trace calls,
|
||||
and the other two through Annotate.
|
||||
|
||||
Sometimes when responding to an error you want to return a more specific error
|
||||
for the situation.
|
||||
|
||||
|
||||
if err := FindField(field); err != nil {
|
||||
return errors.Wrap(err, errors.NotFoundf(field))
|
||||
}
|
||||
|
||||
This returns an error where the complete error stack is still available, and
|
||||
`errors.Cause()` will return the `NotFound` error.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
## func AlreadyExistsf
|
||||
``` go
|
||||
func AlreadyExistsf(format string, args ...interface{}) error
|
||||
```
|
||||
AlreadyExistsf returns an error which satisfies IsAlreadyExists().
|
||||
|
||||
|
||||
## func Annotate
|
||||
``` go
|
||||
func Annotate(other error, message string) error
|
||||
```
|
||||
Annotate is used to add extra context to an existing error. The location of
|
||||
the Annotate call is recorded with the annotations. The file, line and
|
||||
function are also recorded.
|
||||
|
||||
For example:
|
||||
|
||||
|
||||
if err := SomeFunc(); err != nil {
|
||||
return errors.Annotate(err, "failed to frombulate")
|
||||
}
|
||||
|
||||
|
||||
## func Annotatef
|
||||
``` go
|
||||
func Annotatef(other error, format string, args ...interface{}) error
|
||||
```
|
||||
Annotatef is used to add extra context to an existing error. The location of
|
||||
the Annotate call is recorded with the annotations. The file, line and
|
||||
function are also recorded.
|
||||
|
||||
For example:
|
||||
|
||||
|
||||
if err := SomeFunc(); err != nil {
|
||||
return errors.Annotatef(err, "failed to frombulate the %s", arg)
|
||||
}
|
||||
|
||||
|
||||
## func Cause
|
||||
``` go
|
||||
func Cause(err error) error
|
||||
```
|
||||
Cause returns the cause of the given error. This will be either the
|
||||
original error, or the result of a Wrap or Mask call.
|
||||
|
||||
Cause is the usual way to diagnose errors that may have been wrapped by
|
||||
the other errors functions.
|
||||
|
||||
|
||||
## func DeferredAnnotatef
|
||||
``` go
|
||||
func DeferredAnnotatef(err *error, format string, args ...interface{})
|
||||
```
|
||||
DeferredAnnotatef annotates the given error (when it is not nil) with the given
|
||||
format string and arguments (like fmt.Sprintf). If *err is nil, DeferredAnnotatef
|
||||
does nothing. This method is used in a defer statement in order to annotate any
|
||||
resulting error with the same message.
|
||||
|
||||
For example:
|
||||
|
||||
|
||||
defer DeferredAnnotatef(&err, "failed to frombulate the %s", arg)
|
||||
|
||||
|
||||
## func Details
|
||||
``` go
|
||||
func Details(err error) string
|
||||
```
|
||||
Details returns information about the stack of errors wrapped by err, in
|
||||
the format:
|
||||
|
||||
|
||||
[{filename:99: error one} {otherfile:55: cause of error one}]
|
||||
|
||||
This is a terse alternative to ErrorStack as it returns a single line.
|
||||
|
||||
|
||||
## func ErrorStack
|
||||
``` go
|
||||
func ErrorStack(err error) string
|
||||
```
|
||||
ErrorStack returns a string representation of the annotated error. If the
|
||||
error passed as the parameter is not an annotated error, the result is
|
||||
simply the result of the Error() method on that error.
|
||||
|
||||
If the error is an annotated error, a multi-line string is returned where
|
||||
each line represents one entry in the annotation stack. The full filename
|
||||
from the call stack is used in the output.
|
||||
|
||||
|
||||
first error
|
||||
github.com/juju/errors/annotation_test.go:193:
|
||||
github.com/juju/errors/annotation_test.go:194: annotation
|
||||
github.com/juju/errors/annotation_test.go:195:
|
||||
github.com/juju/errors/annotation_test.go:196: more context
|
||||
github.com/juju/errors/annotation_test.go:197:
|
||||
|
||||
|
||||
## func Errorf
|
||||
``` go
|
||||
func Errorf(format string, args ...interface{}) error
|
||||
```
|
||||
Errorf creates a new annotated error and records the location that the
|
||||
error is created. This should be a drop in replacement for fmt.Errorf.
|
||||
|
||||
For example:
|
||||
|
||||
|
||||
return errors.Errorf("validation failed: %s", message)
|
||||
|
||||
|
||||
## func IsAlreadyExists
|
||||
``` go
|
||||
func IsAlreadyExists(err error) bool
|
||||
```
|
||||
IsAlreadyExists reports whether the error was created with
|
||||
AlreadyExistsf() or NewAlreadyExists().
|
||||
|
||||
|
||||
## func IsNotFound
|
||||
``` go
|
||||
func IsNotFound(err error) bool
|
||||
```
|
||||
IsNotFound reports whether err was created with NotFoundf() or
|
||||
NewNotFound().
|
||||
|
||||
|
||||
## func IsNotImplemented
|
||||
``` go
|
||||
func IsNotImplemented(err error) bool
|
||||
```
|
||||
IsNotImplemented reports whether err was created with
|
||||
NotImplementedf() or NewNotImplemented().
|
||||
|
||||
|
||||
## func IsNotSupported
|
||||
``` go
|
||||
func IsNotSupported(err error) bool
|
||||
```
|
||||
IsNotSupported reports whether the error was created with
|
||||
NotSupportedf() or NewNotSupported().
|
||||
|
||||
|
||||
## func IsNotValid
|
||||
``` go
|
||||
func IsNotValid(err error) bool
|
||||
```
|
||||
IsNotValid reports whether the error was created with NotValidf() or
|
||||
NewNotValid().
|
||||
|
||||
|
||||
## func IsUnauthorized
|
||||
``` go
|
||||
func IsUnauthorized(err error) bool
|
||||
```
|
||||
IsUnauthorized reports whether err was created with Unauthorizedf() or
|
||||
NewUnauthorized().
|
||||
|
||||
|
||||
## func Mask
|
||||
``` go
|
||||
func Mask(other error) error
|
||||
```
|
||||
Mask hides the underlying error type, and records the location of the masking.
|
||||
|
||||
|
||||
## func Maskf
|
||||
``` go
|
||||
func Maskf(other error, format string, args ...interface{}) error
|
||||
```
|
||||
Mask masks the given error with the given format string and arguments (like
|
||||
fmt.Sprintf), returning a new error that maintains the error stack, but
|
||||
hides the underlying error type. The error string still contains the full
|
||||
annotations. If you want to hide the annotations, call Wrap.
|
||||
|
||||
|
||||
## func New
|
||||
``` go
|
||||
func New(message string) error
|
||||
```
|
||||
New is a drop in replacement for the standard libary errors module that records
|
||||
the location that the error is created.
|
||||
|
||||
For example:
|
||||
|
||||
|
||||
return errors.New("validation failed")
|
||||
|
||||
|
||||
## func NewAlreadyExists
|
||||
``` go
|
||||
func NewAlreadyExists(err error, msg string) error
|
||||
```
|
||||
NewAlreadyExists returns an error which wraps err and satisfies
|
||||
IsAlreadyExists().
|
||||
|
||||
|
||||
## func NewNotFound
|
||||
``` go
|
||||
func NewNotFound(err error, msg string) error
|
||||
```
|
||||
NewNotFound returns an error which wraps err that satisfies
|
||||
IsNotFound().
|
||||
|
||||
|
||||
## func NewNotImplemented
|
||||
``` go
|
||||
func NewNotImplemented(err error, msg string) error
|
||||
```
|
||||
NewNotImplemented returns an error which wraps err and satisfies
|
||||
IsNotImplemented().
|
||||
|
||||
|
||||
## func NewNotSupported
|
||||
``` go
|
||||
func NewNotSupported(err error, msg string) error
|
||||
```
|
||||
NewNotSupported returns an error which wraps err and satisfies
|
||||
IsNotSupported().
|
||||
|
||||
|
||||
## func NewNotValid
|
||||
``` go
|
||||
func NewNotValid(err error, msg string) error
|
||||
```
|
||||
NewNotValid returns an error which wraps err and satisfies IsNotValid().
|
||||
|
||||
|
||||
## func NewUnauthorized
|
||||
``` go
|
||||
func NewUnauthorized(err error, msg string) error
|
||||
```
|
||||
NewUnauthorized returns an error which wraps err and satisfies
|
||||
IsUnauthorized().
|
||||
|
||||
|
||||
## func NotFoundf
|
||||
``` go
|
||||
func NotFoundf(format string, args ...interface{}) error
|
||||
```
|
||||
NotFoundf returns an error which satisfies IsNotFound().
|
||||
|
||||
|
||||
## func NotImplementedf
|
||||
``` go
|
||||
func NotImplementedf(format string, args ...interface{}) error
|
||||
```
|
||||
NotImplementedf returns an error which satisfies IsNotImplemented().
|
||||
|
||||
|
||||
## func NotSupportedf
|
||||
``` go
|
||||
func NotSupportedf(format string, args ...interface{}) error
|
||||
```
|
||||
NotSupportedf returns an error which satisfies IsNotSupported().
|
||||
|
||||
|
||||
## func NotValidf
|
||||
``` go
|
||||
func NotValidf(format string, args ...interface{}) error
|
||||
```
|
||||
NotValidf returns an error which satisfies IsNotValid().
|
||||
|
||||
|
||||
## func Trace
|
||||
``` go
|
||||
func Trace(other error) error
|
||||
```
|
||||
Trace adds the location of the Trace call to the stack. The Cause of the
|
||||
resulting error is the same as the error parameter. If the other error is
|
||||
nil, the result will be nil.
|
||||
|
||||
For example:
|
||||
|
||||
|
||||
if err := SomeFunc(); err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
|
||||
## func Unauthorizedf
|
||||
``` go
|
||||
func Unauthorizedf(format string, args ...interface{}) error
|
||||
```
|
||||
Unauthorizedf returns an error which satisfies IsUnauthorized().
|
||||
|
||||
|
||||
## func Wrap
|
||||
``` go
|
||||
func Wrap(other, newDescriptive error) error
|
||||
```
|
||||
Wrap changes the Cause of the error. The location of the Wrap call is also
|
||||
stored in the error stack.
|
||||
|
||||
For example:
|
||||
|
||||
|
||||
if err := SomeFunc(); err != nil {
|
||||
newErr := &packageError{"more context", private_value}
|
||||
return errors.Wrap(err, newErr)
|
||||
}
|
||||
|
||||
|
||||
## func Wrapf
|
||||
``` go
|
||||
func Wrapf(other, newDescriptive error, format string, args ...interface{}) error
|
||||
```
|
||||
Wrapf changes the Cause of the error, and adds an annotation. The location
|
||||
of the Wrap call is also stored in the error stack.
|
||||
|
||||
For example:
|
||||
|
||||
|
||||
if err := SomeFunc(); err != nil {
|
||||
return errors.Wrapf(err, simpleErrorType, "invalid value %q", value)
|
||||
}
|
||||
|
||||
|
||||
|
||||
## type Err
|
||||
``` go
|
||||
type Err struct {
|
||||
// contains filtered or unexported fields
|
||||
}
|
||||
```
|
||||
Err holds a description of an error along with information about
|
||||
where the error was created.
|
||||
|
||||
It may be embedded in custom error types to add extra information that
|
||||
this errors package can understand.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
### func NewErr
|
||||
``` go
|
||||
func NewErr(format string, args ...interface{}) Err
|
||||
```
|
||||
NewErr is used to return an Err for the purpose of embedding in other
|
||||
structures. The location is not specified, and needs to be set with a call
|
||||
to SetLocation.
|
||||
|
||||
For example:
|
||||
|
||||
|
||||
type FooError struct {
|
||||
errors.Err
|
||||
code int
|
||||
}
|
||||
|
||||
func NewFooError(code int) error {
|
||||
err := &FooError{errors.NewErr("foo"), code}
|
||||
err.SetLocation(1)
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
### func (\*Err) Cause
|
||||
``` go
|
||||
func (e *Err) Cause() error
|
||||
```
|
||||
The Cause of an error is the most recent error in the error stack that
|
||||
meets one of these criteria: the original error that was raised; the new
|
||||
error that was passed into the Wrap function; the most recently masked
|
||||
error; or nil if the error itself is considered the Cause. Normally this
|
||||
method is not invoked directly, but instead through the Cause stand alone
|
||||
function.
|
||||
|
||||
|
||||
|
||||
### func (\*Err) Error
|
||||
``` go
|
||||
func (e *Err) Error() string
|
||||
```
|
||||
Error implements error.Error.
|
||||
|
||||
|
||||
|
||||
### func (\*Err) Location
|
||||
``` go
|
||||
func (e *Err) Location() (filename string, line int)
|
||||
```
|
||||
Location is the file and line of where the error was most recently
|
||||
created or annotated.
|
||||
|
||||
|
||||
|
||||
### func (\*Err) Message
|
||||
``` go
|
||||
func (e *Err) Message() string
|
||||
```
|
||||
Message returns the message stored with the most recent location. This is
|
||||
the empty string if the most recent call was Trace, or the message stored
|
||||
with Annotate or Mask.
|
||||
|
||||
|
||||
|
||||
### func (\*Err) SetLocation
|
||||
``` go
|
||||
func (e *Err) SetLocation(callDepth int)
|
||||
```
|
||||
SetLocation records the source location of the error at callDepth stack
|
||||
frames above the call.
|
||||
|
||||
|
||||
|
||||
### func (\*Err) StackTrace
|
||||
``` go
|
||||
func (e *Err) StackTrace() []string
|
||||
```
|
||||
StackTrace returns one string for each location recorded in the stack of
|
||||
errors. The first value is the originating error, with a line for each
|
||||
other annotation or tracing of the error.
|
||||
|
||||
|
||||
|
||||
### func (\*Err) Underlying
|
||||
``` go
|
||||
func (e *Err) Underlying() error
|
||||
```
|
||||
Underlying returns the previous error in the error stack, if any. A client
|
||||
should not ever really call this method. It is used to build the error
|
||||
stack and should not be introspected by client calls. Or more
|
||||
specifically, clients should not depend on anything but the `Cause` of an
|
||||
error.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
- - -
|
||||
Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md)
|
81
vendor/github.com/juju/errors/doc.go
generated
vendored
81
vendor/github.com/juju/errors/doc.go
generated
vendored
@ -1,81 +0,0 @@
|
||||
// Copyright 2013, 2014 Canonical Ltd.
|
||||
// Licensed under the LGPLv3, see LICENCE file for details.
|
||||
|
||||
/*
|
||||
[godoc-link-here]
|
||||
|
||||
The juju/errors provides an easy way to annotate errors without losing the
|
||||
orginal error context.
|
||||
|
||||
The exported `New` and `Errorf` functions are designed to replace the
|
||||
`errors.New` and `fmt.Errorf` functions respectively. The same underlying
|
||||
error is there, but the package also records the location at which the error
|
||||
was created.
|
||||
|
||||
A primary use case for this library is to add extra context any time an
|
||||
error is returned from a function.
|
||||
|
||||
if err := SomeFunc(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
This instead becomes:
|
||||
|
||||
if err := SomeFunc(); err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
which just records the file and line number of the Trace call, or
|
||||
|
||||
if err := SomeFunc(); err != nil {
|
||||
return errors.Annotate(err, "more context")
|
||||
}
|
||||
|
||||
which also adds an annotation to the error.
|
||||
|
||||
When you want to check to see if an error is of a particular type, a helper
|
||||
function is normally exported by the package that returned the error, like the
|
||||
`os` package does. The underlying cause of the error is available using the
|
||||
`Cause` function.
|
||||
|
||||
os.IsNotExist(errors.Cause(err))
|
||||
|
||||
The result of the `Error()` call on an annotated error is the annotations joined
|
||||
with colons, then the result of the `Error()` method for the underlying error
|
||||
that was the cause.
|
||||
|
||||
err := errors.Errorf("original")
|
||||
err = errors.Annotatef(err, "context")
|
||||
err = errors.Annotatef(err, "more context")
|
||||
err.Error() -> "more context: context: original"
|
||||
|
||||
Obviously recording the file, line and functions is not very useful if you
|
||||
cannot get them back out again.
|
||||
|
||||
errors.ErrorStack(err)
|
||||
|
||||
will return something like:
|
||||
|
||||
first error
|
||||
github.com/juju/errors/annotation_test.go:193:
|
||||
github.com/juju/errors/annotation_test.go:194: annotation
|
||||
github.com/juju/errors/annotation_test.go:195:
|
||||
github.com/juju/errors/annotation_test.go:196: more context
|
||||
github.com/juju/errors/annotation_test.go:197:
|
||||
|
||||
The first error was generated by an external system, so there was no location
|
||||
associated. The second, fourth, and last lines were generated with Trace calls,
|
||||
and the other two through Annotate.
|
||||
|
||||
Sometimes when responding to an error you want to return a more specific error
|
||||
for the situation.
|
||||
|
||||
if err := FindField(field); err != nil {
|
||||
return errors.Wrap(err, errors.NotFoundf(field))
|
||||
}
|
||||
|
||||
This returns an error where the complete error stack is still available, and
|
||||
`errors.Cause()` will return the `NotFound` error.
|
||||
|
||||
*/
|
||||
package errors
|
122
vendor/github.com/juju/errors/error.go
generated
vendored
122
vendor/github.com/juju/errors/error.go
generated
vendored
@ -1,122 +0,0 @@
|
||||
// Copyright 2014 Canonical Ltd.
|
||||
// Licensed under the LGPLv3, see LICENCE file for details.
|
||||
|
||||
package errors
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// Err holds a description of an error along with information about
|
||||
// where the error was created.
|
||||
//
|
||||
// It may be embedded in custom error types to add extra information that
|
||||
// this errors package can understand.
|
||||
type Err struct {
|
||||
// message holds an annotation of the error.
|
||||
message string
|
||||
|
||||
// cause holds the cause of the error as returned
|
||||
// by the Cause method.
|
||||
cause error
|
||||
|
||||
// previous holds the previous error in the error stack, if any.
|
||||
previous error
|
||||
|
||||
// file and line hold the source code location where the error was
|
||||
// created.
|
||||
file string
|
||||
line int
|
||||
}
|
||||
|
||||
// NewErr is used to return an Err for the purpose of embedding in other
|
||||
// structures. The location is not specified, and needs to be set with a call
|
||||
// to SetLocation.
|
||||
//
|
||||
// For example:
|
||||
// type FooError struct {
|
||||
// errors.Err
|
||||
// code int
|
||||
// }
|
||||
//
|
||||
// func NewFooError(code int) error {
|
||||
// err := &FooError{errors.NewErr("foo"), code}
|
||||
// err.SetLocation(1)
|
||||
// return err
|
||||
// }
|
||||
func NewErr(format string, args ...interface{}) Err {
|
||||
return Err{
|
||||
message: fmt.Sprintf(format, args...),
|
||||
}
|
||||
}
|
||||
|
||||
// Location is the file and line of where the error was most recently
|
||||
// created or annotated.
|
||||
func (e *Err) Location() (filename string, line int) {
|
||||
return e.file, e.line
|
||||
}
|
||||
|
||||
// Underlying returns the previous error in the error stack, if any. A client
|
||||
// should not ever really call this method. It is used to build the error
|
||||
// stack and should not be introspected by client calls. Or more
|
||||
// specifically, clients should not depend on anything but the `Cause` of an
|
||||
// error.
|
||||
func (e *Err) Underlying() error {
|
||||
return e.previous
|
||||
}
|
||||
|
||||
// The Cause of an error is the most recent error in the error stack that
|
||||
// meets one of these criteria: the original error that was raised; the new
|
||||
// error that was passed into the Wrap function; the most recently masked
|
||||
// error; or nil if the error itself is considered the Cause. Normally this
|
||||
// method is not invoked directly, but instead through the Cause stand alone
|
||||
// function.
|
||||
func (e *Err) Cause() error {
|
||||
return e.cause
|
||||
}
|
||||
|
||||
// Message returns the message stored with the most recent location. This is
|
||||
// the empty string if the most recent call was Trace, or the message stored
|
||||
// with Annotate or Mask.
|
||||
func (e *Err) Message() string {
|
||||
return e.message
|
||||
}
|
||||
|
||||
// Error implements error.Error.
|
||||
func (e *Err) Error() string {
|
||||
// We want to walk up the stack of errors showing the annotations
|
||||
// as long as the cause is the same.
|
||||
err := e.previous
|
||||
if !sameError(Cause(err), e.cause) && e.cause != nil {
|
||||
err = e.cause
|
||||
}
|
||||
switch {
|
||||
case err == nil:
|
||||
return e.message
|
||||
case e.message == "":
|
||||
return err.Error()
|
||||
}
|
||||
return fmt.Sprintf("%s: %v", e.message, err)
|
||||
}
|
||||
|
||||
// SetLocation records the source location of the error at callDepth stack
|
||||
// frames above the call.
|
||||
func (e *Err) SetLocation(callDepth int) {
|
||||
_, file, line, _ := runtime.Caller(callDepth + 1)
|
||||
e.file = trimGoPath(file)
|
||||
e.line = line
|
||||
}
|
||||
|
||||
// StackTrace returns one string for each location recorded in the stack of
|
||||
// errors. The first value is the originating error, with a line for each
|
||||
// other annotation or tracing of the error.
|
||||
func (e *Err) StackTrace() []string {
|
||||
return errorStack(e)
|
||||
}
|
||||
|
||||
// Ideally we'd have a way to check identity, but deep equals will do.
|
||||
func sameError(e1, e2 error) bool {
|
||||
return reflect.DeepEqual(e1, e2)
|
||||
}
|
161
vendor/github.com/juju/errors/error_test.go
generated
vendored
161
vendor/github.com/juju/errors/error_test.go
generated
vendored
@ -1,161 +0,0 @@
|
||||
// Copyright 2014 Canonical Ltd.
|
||||
// Licensed under the LGPLv3, see LICENCE file for details.
|
||||
|
||||
package errors_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
|
||||
jc "github.com/juju/testing/checkers"
|
||||
gc "gopkg.in/check.v1"
|
||||
|
||||
"github.com/juju/errors"
|
||||
)
|
||||
|
||||
type errorsSuite struct{}
|
||||
|
||||
var _ = gc.Suite(&errorsSuite{})
|
||||
|
||||
var someErr = errors.New("some error") //err varSomeErr
|
||||
|
||||
func (*errorsSuite) TestErrorString(c *gc.C) {
|
||||
for i, test := range []struct {
|
||||
message string
|
||||
generator func() error
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
message: "uncomparable errors",
|
||||
generator: func() error {
|
||||
err := errors.Annotatef(newNonComparableError("uncomparable"), "annotation")
|
||||
return errors.Annotatef(err, "another")
|
||||
},
|
||||
expected: "another: annotation: uncomparable",
|
||||
}, {
|
||||
message: "Errorf",
|
||||
generator: func() error {
|
||||
return errors.Errorf("first error")
|
||||
},
|
||||
expected: "first error",
|
||||
}, {
|
||||
message: "annotated error",
|
||||
generator: func() error {
|
||||
err := errors.Errorf("first error")
|
||||
return errors.Annotatef(err, "annotation")
|
||||
},
|
||||
expected: "annotation: first error",
|
||||
}, {
|
||||
message: "test annotation format",
|
||||
generator: func() error {
|
||||
err := errors.Errorf("first %s", "error")
|
||||
return errors.Annotatef(err, "%s", "annotation")
|
||||
},
|
||||
expected: "annotation: first error",
|
||||
}, {
|
||||
message: "wrapped error",
|
||||
generator: func() error {
|
||||
err := newError("first error")
|
||||
return errors.Wrap(err, newError("detailed error"))
|
||||
},
|
||||
expected: "detailed error",
|
||||
}, {
|
||||
message: "wrapped annotated error",
|
||||
generator: func() error {
|
||||
err := errors.Errorf("first error")
|
||||
err = errors.Annotatef(err, "annotated")
|
||||
return errors.Wrap(err, fmt.Errorf("detailed error"))
|
||||
},
|
||||
expected: "detailed error",
|
||||
}, {
|
||||
message: "annotated wrapped error",
|
||||
generator: func() error {
|
||||
err := errors.Errorf("first error")
|
||||
err = errors.Wrap(err, fmt.Errorf("detailed error"))
|
||||
return errors.Annotatef(err, "annotated")
|
||||
},
|
||||
expected: "annotated: detailed error",
|
||||
}, {
|
||||
message: "traced, and annotated",
|
||||
generator: func() error {
|
||||
err := errors.New("first error")
|
||||
err = errors.Trace(err)
|
||||
err = errors.Annotate(err, "some context")
|
||||
err = errors.Trace(err)
|
||||
err = errors.Annotate(err, "more context")
|
||||
return errors.Trace(err)
|
||||
},
|
||||
expected: "more context: some context: first error",
|
||||
}, {
|
||||
message: "traced, and annotated, masked and annotated",
|
||||
generator: func() error {
|
||||
err := errors.New("first error")
|
||||
err = errors.Trace(err)
|
||||
err = errors.Annotate(err, "some context")
|
||||
err = errors.Maskf(err, "masked")
|
||||
err = errors.Annotate(err, "more context")
|
||||
return errors.Trace(err)
|
||||
},
|
||||
expected: "more context: masked: some context: first error",
|
||||
},
|
||||
} {
|
||||
c.Logf("%v: %s", i, test.message)
|
||||
err := test.generator()
|
||||
ok := c.Check(err.Error(), gc.Equals, test.expected)
|
||||
if !ok {
|
||||
c.Logf("%#v", test.generator())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type embed struct {
|
||||
errors.Err
|
||||
}
|
||||
|
||||
func newEmbed(format string, args ...interface{}) *embed {
|
||||
err := &embed{errors.NewErr(format, args...)}
|
||||
err.SetLocation(1)
|
||||
return err
|
||||
}
|
||||
|
||||
func (*errorsSuite) TestNewErr(c *gc.C) {
|
||||
if runtime.Compiler == "gccgo" {
|
||||
c.Skip("gccgo can't determine the location")
|
||||
}
|
||||
err := newEmbed("testing %d", 42) //err embedErr
|
||||
c.Assert(err.Error(), gc.Equals, "testing 42")
|
||||
c.Assert(errors.Cause(err), gc.Equals, err)
|
||||
c.Assert(errors.Details(err), jc.Contains, tagToLocation["embedErr"].String())
|
||||
}
|
||||
|
||||
var _ error = (*embed)(nil)
|
||||
|
||||
// This is an uncomparable error type, as it is a struct that supports the
|
||||
// error interface (as opposed to a pointer type).
|
||||
type error_ struct {
|
||||
info string
|
||||
slice []string
|
||||
}
|
||||
|
||||
// Create a non-comparable error
|
||||
func newNonComparableError(message string) error {
|
||||
return error_{info: message}
|
||||
}
|
||||
|
||||
func (e error_) Error() string {
|
||||
return e.info
|
||||
}
|
||||
|
||||
func newError(message string) error {
|
||||
return testError{message}
|
||||
}
|
||||
|
||||
// The testError is a value type error for ease of seeing results
|
||||
// when the test fails.
|
||||
type testError struct {
|
||||
message string
|
||||
}
|
||||
|
||||
func (e testError) Error() string {
|
||||
return e.message
|
||||
}
|
284
vendor/github.com/juju/errors/errortypes.go
generated
vendored
284
vendor/github.com/juju/errors/errortypes.go
generated
vendored
@ -1,284 +0,0 @@
|
||||
// Copyright 2014 Canonical Ltd.
|
||||
// Licensed under the LGPLv3, see LICENCE file for details.
|
||||
|
||||
package errors
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// wrap is a helper to construct an *wrapper.
|
||||
func wrap(err error, format, suffix string, args ...interface{}) Err {
|
||||
newErr := Err{
|
||||
message: fmt.Sprintf(format+suffix, args...),
|
||||
previous: err,
|
||||
}
|
||||
newErr.SetLocation(2)
|
||||
return newErr
|
||||
}
|
||||
|
||||
// notFound represents an error when something has not been found.
|
||||
type notFound struct {
|
||||
Err
|
||||
}
|
||||
|
||||
// NotFoundf returns an error which satisfies IsNotFound().
|
||||
func NotFoundf(format string, args ...interface{}) error {
|
||||
return ¬Found{wrap(nil, format, " not found", args...)}
|
||||
}
|
||||
|
||||
// NewNotFound returns an error which wraps err that satisfies
|
||||
// IsNotFound().
|
||||
func NewNotFound(err error, msg string) error {
|
||||
return ¬Found{wrap(err, msg, "")}
|
||||
}
|
||||
|
||||
// IsNotFound reports whether err was created with NotFoundf() or
|
||||
// NewNotFound().
|
||||
func IsNotFound(err error) bool {
|
||||
err = Cause(err)
|
||||
_, ok := err.(*notFound)
|
||||
return ok
|
||||
}
|
||||
|
||||
// userNotFound represents an error when an inexistent user is looked up.
|
||||
type userNotFound struct {
|
||||
Err
|
||||
}
|
||||
|
||||
// UserNotFoundf returns an error which satisfies IsUserNotFound().
|
||||
func UserNotFoundf(format string, args ...interface{}) error {
|
||||
return &userNotFound{wrap(nil, format, " user not found", args...)}
|
||||
}
|
||||
|
||||
// NewUserNotFound returns an error which wraps err and satisfies
|
||||
// IsUserNotFound().
|
||||
func NewUserNotFound(err error, msg string) error {
|
||||
return &userNotFound{wrap(err, msg, "")}
|
||||
}
|
||||
|
||||
// IsUserNotFound reports whether err was created with UserNotFoundf() or
|
||||
// NewUserNotFound().
|
||||
func IsUserNotFound(err error) bool {
|
||||
err = Cause(err)
|
||||
_, ok := err.(*userNotFound)
|
||||
return ok
|
||||
}
|
||||
|
||||
// unauthorized represents an error when an operation is unauthorized.
|
||||
type unauthorized struct {
|
||||
Err
|
||||
}
|
||||
|
||||
// Unauthorizedf returns an error which satisfies IsUnauthorized().
|
||||
func Unauthorizedf(format string, args ...interface{}) error {
|
||||
return &unauthorized{wrap(nil, format, "", args...)}
|
||||
}
|
||||
|
||||
// NewUnauthorized returns an error which wraps err and satisfies
|
||||
// IsUnauthorized().
|
||||
func NewUnauthorized(err error, msg string) error {
|
||||
return &unauthorized{wrap(err, msg, "")}
|
||||
}
|
||||
|
||||
// IsUnauthorized reports whether err was created with Unauthorizedf() or
|
||||
// NewUnauthorized().
|
||||
func IsUnauthorized(err error) bool {
|
||||
err = Cause(err)
|
||||
_, ok := err.(*unauthorized)
|
||||
return ok
|
||||
}
|
||||
|
||||
// notImplemented represents an error when something is not
|
||||
// implemented.
|
||||
type notImplemented struct {
|
||||
Err
|
||||
}
|
||||
|
||||
// NotImplementedf returns an error which satisfies IsNotImplemented().
|
||||
func NotImplementedf(format string, args ...interface{}) error {
|
||||
return ¬Implemented{wrap(nil, format, " not implemented", args...)}
|
||||
}
|
||||
|
||||
// NewNotImplemented returns an error which wraps err and satisfies
|
||||
// IsNotImplemented().
|
||||
func NewNotImplemented(err error, msg string) error {
|
||||
return ¬Implemented{wrap(err, msg, "")}
|
||||
}
|
||||
|
||||
// IsNotImplemented reports whether err was created with
|
||||
// NotImplementedf() or NewNotImplemented().
|
||||
func IsNotImplemented(err error) bool {
|
||||
err = Cause(err)
|
||||
_, ok := err.(*notImplemented)
|
||||
return ok
|
||||
}
|
||||
|
||||
// alreadyExists represents and error when something already exists.
|
||||
type alreadyExists struct {
|
||||
Err
|
||||
}
|
||||
|
||||
// AlreadyExistsf returns an error which satisfies IsAlreadyExists().
|
||||
func AlreadyExistsf(format string, args ...interface{}) error {
|
||||
return &alreadyExists{wrap(nil, format, " already exists", args...)}
|
||||
}
|
||||
|
||||
// NewAlreadyExists returns an error which wraps err and satisfies
|
||||
// IsAlreadyExists().
|
||||
func NewAlreadyExists(err error, msg string) error {
|
||||
return &alreadyExists{wrap(err, msg, "")}
|
||||
}
|
||||
|
||||
// IsAlreadyExists reports whether the error was created with
|
||||
// AlreadyExistsf() or NewAlreadyExists().
|
||||
func IsAlreadyExists(err error) bool {
|
||||
err = Cause(err)
|
||||
_, ok := err.(*alreadyExists)
|
||||
return ok
|
||||
}
|
||||
|
||||
// notSupported represents an error when something is not supported.
|
||||
type notSupported struct {
|
||||
Err
|
||||
}
|
||||
|
||||
// NotSupportedf returns an error which satisfies IsNotSupported().
|
||||
func NotSupportedf(format string, args ...interface{}) error {
|
||||
return ¬Supported{wrap(nil, format, " not supported", args...)}
|
||||
}
|
||||
|
||||
// NewNotSupported returns an error which wraps err and satisfies
|
||||
// IsNotSupported().
|
||||
func NewNotSupported(err error, msg string) error {
|
||||
return ¬Supported{wrap(err, msg, "")}
|
||||
}
|
||||
|
||||
// IsNotSupported reports whether the error was created with
|
||||
// NotSupportedf() or NewNotSupported().
|
||||
func IsNotSupported(err error) bool {
|
||||
err = Cause(err)
|
||||
_, ok := err.(*notSupported)
|
||||
return ok
|
||||
}
|
||||
|
||||
// notValid represents an error when something is not valid.
|
||||
type notValid struct {
|
||||
Err
|
||||
}
|
||||
|
||||
// NotValidf returns an error which satisfies IsNotValid().
|
||||
func NotValidf(format string, args ...interface{}) error {
|
||||
return ¬Valid{wrap(nil, format, " not valid", args...)}
|
||||
}
|
||||
|
||||
// NewNotValid returns an error which wraps err and satisfies IsNotValid().
|
||||
func NewNotValid(err error, msg string) error {
|
||||
return ¬Valid{wrap(err, msg, "")}
|
||||
}
|
||||
|
||||
// IsNotValid reports whether the error was created with NotValidf() or
|
||||
// NewNotValid().
|
||||
func IsNotValid(err error) bool {
|
||||
err = Cause(err)
|
||||
_, ok := err.(*notValid)
|
||||
return ok
|
||||
}
|
||||
|
||||
// notProvisioned represents an error when something is not yet provisioned.
|
||||
type notProvisioned struct {
|
||||
Err
|
||||
}
|
||||
|
||||
// NotProvisionedf returns an error which satisfies IsNotProvisioned().
|
||||
func NotProvisionedf(format string, args ...interface{}) error {
|
||||
return ¬Provisioned{wrap(nil, format, " not provisioned", args...)}
|
||||
}
|
||||
|
||||
// NewNotProvisioned returns an error which wraps err that satisfies
|
||||
// IsNotProvisioned().
|
||||
func NewNotProvisioned(err error, msg string) error {
|
||||
return ¬Provisioned{wrap(err, msg, "")}
|
||||
}
|
||||
|
||||
// IsNotProvisioned reports whether err was created with NotProvisionedf() or
|
||||
// NewNotProvisioned().
|
||||
func IsNotProvisioned(err error) bool {
|
||||
err = Cause(err)
|
||||
_, ok := err.(*notProvisioned)
|
||||
return ok
|
||||
}
|
||||
|
||||
// notAssigned represents an error when something is not yet assigned to
|
||||
// something else.
|
||||
type notAssigned struct {
|
||||
Err
|
||||
}
|
||||
|
||||
// NotAssignedf returns an error which satisfies IsNotAssigned().
|
||||
func NotAssignedf(format string, args ...interface{}) error {
|
||||
return ¬Assigned{wrap(nil, format, " not assigned", args...)}
|
||||
}
|
||||
|
||||
// NewNotAssigned returns an error which wraps err that satisfies
|
||||
// IsNotAssigned().
|
||||
func NewNotAssigned(err error, msg string) error {
|
||||
return ¬Assigned{wrap(err, msg, "")}
|
||||
}
|
||||
|
||||
// IsNotAssigned reports whether err was created with NotAssignedf() or
|
||||
// NewNotAssigned().
|
||||
func IsNotAssigned(err error) bool {
|
||||
err = Cause(err)
|
||||
_, ok := err.(*notAssigned)
|
||||
return ok
|
||||
}
|
||||
|
||||
// badRequest represents an error when a request has bad parameters.
|
||||
type badRequest struct {
|
||||
Err
|
||||
}
|
||||
|
||||
// BadRequestf returns an error which satisfies IsBadRequest().
|
||||
func BadRequestf(format string, args ...interface{}) error {
|
||||
return &badRequest{wrap(nil, format, "", args...)}
|
||||
}
|
||||
|
||||
// NewBadRequest returns an error which wraps err that satisfies
|
||||
// IsBadRequest().
|
||||
func NewBadRequest(err error, msg string) error {
|
||||
return &badRequest{wrap(err, msg, "")}
|
||||
}
|
||||
|
||||
// IsBadRequest reports whether err was created with BadRequestf() or
|
||||
// NewBadRequest().
|
||||
func IsBadRequest(err error) bool {
|
||||
err = Cause(err)
|
||||
_, ok := err.(*badRequest)
|
||||
return ok
|
||||
}
|
||||
|
||||
// methodNotAllowed represents an error when an HTTP request
|
||||
// is made with an inappropriate method.
|
||||
type methodNotAllowed struct {
|
||||
Err
|
||||
}
|
||||
|
||||
// MethodNotAllowedf returns an error which satisfies IsMethodNotAllowed().
|
||||
func MethodNotAllowedf(format string, args ...interface{}) error {
|
||||
return &methodNotAllowed{wrap(nil, format, "", args...)}
|
||||
}
|
||||
|
||||
// NewMethodNotAllowed returns an error which wraps err that satisfies
|
||||
// IsMethodNotAllowed().
|
||||
func NewMethodNotAllowed(err error, msg string) error {
|
||||
return &methodNotAllowed{wrap(err, msg, "")}
|
||||
}
|
||||
|
||||
// IsMethodNotAllowed reports whether err was created with MethodNotAllowedf() or
|
||||
// NewMethodNotAllowed().
|
||||
func IsMethodNotAllowed(err error) bool {
|
||||
err = Cause(err)
|
||||
_, ok := err.(*methodNotAllowed)
|
||||
return ok
|
||||
}
|
173
vendor/github.com/juju/errors/errortypes_test.go
generated
vendored
173
vendor/github.com/juju/errors/errortypes_test.go
generated
vendored
@ -1,173 +0,0 @@
|
||||
// Copyright 2013, 2014 Canonical Ltd.
|
||||
// Licensed under the LGPLv3, see LICENCE file for details.
|
||||
|
||||
package errors_test
|
||||
|
||||
import (
|
||||
stderrors "errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"runtime"
|
||||
|
||||
"github.com/juju/errors"
|
||||
jc "github.com/juju/testing/checkers"
|
||||
gc "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
// errorInfo holds information about a single error type: a satisfier
|
||||
// function, wrapping and variable arguments constructors and message
|
||||
// suffix.
|
||||
type errorInfo struct {
|
||||
satisfier func(error) bool
|
||||
argsConstructor func(string, ...interface{}) error
|
||||
wrapConstructor func(error, string) error
|
||||
suffix string
|
||||
}
|
||||
|
||||
// allErrors holds information for all defined errors. When adding new
|
||||
// errors, add them here as well to include them in tests.
|
||||
var allErrors = []*errorInfo{
|
||||
&errorInfo{errors.IsNotFound, errors.NotFoundf, errors.NewNotFound, " not found"},
|
||||
&errorInfo{errors.IsUserNotFound, errors.UserNotFoundf, errors.NewUserNotFound, " user not found"},
|
||||
&errorInfo{errors.IsUnauthorized, errors.Unauthorizedf, errors.NewUnauthorized, ""},
|
||||
&errorInfo{errors.IsNotImplemented, errors.NotImplementedf, errors.NewNotImplemented, " not implemented"},
|
||||
&errorInfo{errors.IsAlreadyExists, errors.AlreadyExistsf, errors.NewAlreadyExists, " already exists"},
|
||||
&errorInfo{errors.IsNotSupported, errors.NotSupportedf, errors.NewNotSupported, " not supported"},
|
||||
&errorInfo{errors.IsNotValid, errors.NotValidf, errors.NewNotValid, " not valid"},
|
||||
&errorInfo{errors.IsNotProvisioned, errors.NotProvisionedf, errors.NewNotProvisioned, " not provisioned"},
|
||||
&errorInfo{errors.IsNotAssigned, errors.NotAssignedf, errors.NewNotAssigned, " not assigned"},
|
||||
&errorInfo{errors.IsMethodNotAllowed, errors.MethodNotAllowedf, errors.NewMethodNotAllowed, ""},
|
||||
&errorInfo{errors.IsBadRequest, errors.BadRequestf, errors.NewBadRequest, ""},
|
||||
}
|
||||
|
||||
type errorTypeSuite struct{}
|
||||
|
||||
var _ = gc.Suite(&errorTypeSuite{})
|
||||
|
||||
func (t *errorInfo) satisfierName() string {
|
||||
value := reflect.ValueOf(t.satisfier)
|
||||
f := runtime.FuncForPC(value.Pointer())
|
||||
return f.Name()
|
||||
}
|
||||
|
||||
func (t *errorInfo) equal(t0 *errorInfo) bool {
|
||||
if t0 == nil {
|
||||
return false
|
||||
}
|
||||
return t.satisfierName() == t0.satisfierName()
|
||||
}
|
||||
|
||||
type errorTest struct {
|
||||
err error
|
||||
message string
|
||||
errInfo *errorInfo
|
||||
}
|
||||
|
||||
func deferredAnnotatef(err error, format string, args ...interface{}) error {
|
||||
errors.DeferredAnnotatef(&err, format, args...)
|
||||
return err
|
||||
}
|
||||
|
||||
func mustSatisfy(c *gc.C, err error, errInfo *errorInfo) {
|
||||
if errInfo != nil {
|
||||
msg := fmt.Sprintf("%#v must satisfy %v", err, errInfo.satisfierName())
|
||||
c.Check(err, jc.Satisfies, errInfo.satisfier, gc.Commentf(msg))
|
||||
}
|
||||
}
|
||||
|
||||
func mustNotSatisfy(c *gc.C, err error, errInfo *errorInfo) {
|
||||
if errInfo != nil {
|
||||
msg := fmt.Sprintf("%#v must not satisfy %v", err, errInfo.satisfierName())
|
||||
c.Check(err, gc.Not(jc.Satisfies), errInfo.satisfier, gc.Commentf(msg))
|
||||
}
|
||||
}
|
||||
|
||||
func checkErrorMatches(c *gc.C, err error, message string, errInfo *errorInfo) {
|
||||
if message == "<nil>" {
|
||||
c.Check(err, gc.IsNil)
|
||||
c.Check(errInfo, gc.IsNil)
|
||||
} else {
|
||||
c.Check(err, gc.ErrorMatches, message)
|
||||
}
|
||||
}
|
||||
|
||||
func runErrorTests(c *gc.C, errorTests []errorTest, checkMustSatisfy bool) {
|
||||
for i, t := range errorTests {
|
||||
c.Logf("test %d: %T: %v", i, t.err, t.err)
|
||||
checkErrorMatches(c, t.err, t.message, t.errInfo)
|
||||
if checkMustSatisfy {
|
||||
mustSatisfy(c, t.err, t.errInfo)
|
||||
}
|
||||
|
||||
// Check all other satisfiers to make sure none match.
|
||||
for _, otherErrInfo := range allErrors {
|
||||
if checkMustSatisfy && otherErrInfo.equal(t.errInfo) {
|
||||
continue
|
||||
}
|
||||
mustNotSatisfy(c, t.err, otherErrInfo)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (*errorTypeSuite) TestDeferredAnnotatef(c *gc.C) {
|
||||
// Ensure DeferredAnnotatef annotates the errors.
|
||||
errorTests := []errorTest{}
|
||||
for _, errInfo := range allErrors {
|
||||
errorTests = append(errorTests, []errorTest{{
|
||||
deferredAnnotatef(nil, "comment"),
|
||||
"<nil>",
|
||||
nil,
|
||||
}, {
|
||||
deferredAnnotatef(stderrors.New("blast"), "comment"),
|
||||
"comment: blast",
|
||||
nil,
|
||||
}, {
|
||||
deferredAnnotatef(errInfo.argsConstructor("foo %d", 42), "comment %d", 69),
|
||||
"comment 69: foo 42" + errInfo.suffix,
|
||||
errInfo,
|
||||
}, {
|
||||
deferredAnnotatef(errInfo.argsConstructor(""), "comment"),
|
||||
"comment: " + errInfo.suffix,
|
||||
errInfo,
|
||||
}, {
|
||||
deferredAnnotatef(errInfo.wrapConstructor(stderrors.New("pow!"), "woo"), "comment"),
|
||||
"comment: woo: pow!",
|
||||
errInfo,
|
||||
}}...)
|
||||
}
|
||||
|
||||
runErrorTests(c, errorTests, true)
|
||||
}
|
||||
|
||||
func (*errorTypeSuite) TestAllErrors(c *gc.C) {
|
||||
errorTests := []errorTest{}
|
||||
for _, errInfo := range allErrors {
|
||||
errorTests = append(errorTests, []errorTest{{
|
||||
nil,
|
||||
"<nil>",
|
||||
nil,
|
||||
}, {
|
||||
errInfo.argsConstructor("foo %d", 42),
|
||||
"foo 42" + errInfo.suffix,
|
||||
errInfo,
|
||||
}, {
|
||||
errInfo.argsConstructor(""),
|
||||
errInfo.suffix,
|
||||
errInfo,
|
||||
}, {
|
||||
errInfo.wrapConstructor(stderrors.New("pow!"), "prefix"),
|
||||
"prefix: pow!",
|
||||
errInfo,
|
||||
}, {
|
||||
errInfo.wrapConstructor(stderrors.New("pow!"), ""),
|
||||
"pow!",
|
||||
errInfo,
|
||||
}, {
|
||||
errInfo.wrapConstructor(nil, "prefix"),
|
||||
"prefix",
|
||||
errInfo,
|
||||
}}...)
|
||||
}
|
||||
|
||||
runErrorTests(c, errorTests, true)
|
||||
}
|
23
vendor/github.com/juju/errors/example_test.go
generated
vendored
23
vendor/github.com/juju/errors/example_test.go
generated
vendored
@ -1,23 +0,0 @@
|
||||
// Copyright 2013, 2014 Canonical Ltd.
|
||||
// Licensed under the LGPLv3, see LICENCE file for details.
|
||||
|
||||
package errors_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/juju/errors"
|
||||
)
|
||||
|
||||
func ExampleTrace() {
|
||||
var err1 error = fmt.Errorf("something wicked this way comes")
|
||||
var err2 error = nil
|
||||
|
||||
// Tracing a non nil error will return an error
|
||||
fmt.Println(errors.Trace(err1))
|
||||
// Tracing nil will return nil
|
||||
fmt.Println(errors.Trace(err2))
|
||||
|
||||
// Output: something wicked this way comes
|
||||
// <nil>
|
||||
}
|
12
vendor/github.com/juju/errors/export_test.go
generated
vendored
12
vendor/github.com/juju/errors/export_test.go
generated
vendored
@ -1,12 +0,0 @@
|
||||
// Copyright 2013, 2014 Canonical Ltd.
|
||||
// Licensed under the LGPLv3, see LICENCE file for details.
|
||||
|
||||
package errors
|
||||
|
||||
// Since variables are declared before the init block, in order to get the goPath
|
||||
// we need to return it rather than just reference it.
|
||||
func GoPath() string {
|
||||
return goPath
|
||||
}
|
||||
|
||||
var TrimGoPath = trimGoPath
|
330
vendor/github.com/juju/errors/functions.go
generated
vendored
330
vendor/github.com/juju/errors/functions.go
generated
vendored
@ -1,330 +0,0 @@
|
||||
// Copyright 2014 Canonical Ltd.
|
||||
// Licensed under the LGPLv3, see LICENCE file for details.
|
||||
|
||||
package errors
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// New is a drop in replacement for the standard libary errors module that records
|
||||
// the location that the error is created.
|
||||
//
|
||||
// For example:
|
||||
// return errors.New("validation failed")
|
||||
//
|
||||
func New(message string) error {
|
||||
err := &Err{message: message}
|
||||
err.SetLocation(1)
|
||||
return err
|
||||
}
|
||||
|
||||
// Errorf creates a new annotated error and records the location that the
|
||||
// error is created. This should be a drop in replacement for fmt.Errorf.
|
||||
//
|
||||
// For example:
|
||||
// return errors.Errorf("validation failed: %s", message)
|
||||
//
|
||||
func Errorf(format string, args ...interface{}) error {
|
||||
err := &Err{message: fmt.Sprintf(format, args...)}
|
||||
err.SetLocation(1)
|
||||
return err
|
||||
}
|
||||
|
||||
// Trace adds the location of the Trace call to the stack. The Cause of the
|
||||
// resulting error is the same as the error parameter. If the other error is
|
||||
// nil, the result will be nil.
|
||||
//
|
||||
// For example:
|
||||
// if err := SomeFunc(); err != nil {
|
||||
// return errors.Trace(err)
|
||||
// }
|
||||
//
|
||||
func Trace(other error) error {
|
||||
if other == nil {
|
||||
return nil
|
||||
}
|
||||
err := &Err{previous: other, cause: Cause(other)}
|
||||
err.SetLocation(1)
|
||||
return err
|
||||
}
|
||||
|
||||
// Annotate is used to add extra context to an existing error. The location of
|
||||
// the Annotate call is recorded with the annotations. The file, line and
|
||||
// function are also recorded.
|
||||
//
|
||||
// For example:
|
||||
// if err := SomeFunc(); err != nil {
|
||||
// return errors.Annotate(err, "failed to frombulate")
|
||||
// }
|
||||
//
|
||||
func Annotate(other error, message string) error {
|
||||
if other == nil {
|
||||
return nil
|
||||
}
|
||||
err := &Err{
|
||||
previous: other,
|
||||
cause: Cause(other),
|
||||
message: message,
|
||||
}
|
||||
err.SetLocation(1)
|
||||
return err
|
||||
}
|
||||
|
||||
// Annotatef is used to add extra context to an existing error. The location of
|
||||
// the Annotate call is recorded with the annotations. The file, line and
|
||||
// function are also recorded.
|
||||
//
|
||||
// For example:
|
||||
// if err := SomeFunc(); err != nil {
|
||||
// return errors.Annotatef(err, "failed to frombulate the %s", arg)
|
||||
// }
|
||||
//
|
||||
func Annotatef(other error, format string, args ...interface{}) error {
|
||||
if other == nil {
|
||||
return nil
|
||||
}
|
||||
err := &Err{
|
||||
previous: other,
|
||||
cause: Cause(other),
|
||||
message: fmt.Sprintf(format, args...),
|
||||
}
|
||||
err.SetLocation(1)
|
||||
return err
|
||||
}
|
||||
|
||||
// DeferredAnnotatef annotates the given error (when it is not nil) with the given
|
||||
// format string and arguments (like fmt.Sprintf). If *err is nil, DeferredAnnotatef
|
||||
// does nothing. This method is used in a defer statement in order to annotate any
|
||||
// resulting error with the same message.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// defer DeferredAnnotatef(&err, "failed to frombulate the %s", arg)
|
||||
//
|
||||
func DeferredAnnotatef(err *error, format string, args ...interface{}) {
|
||||
if *err == nil {
|
||||
return
|
||||
}
|
||||
newErr := &Err{
|
||||
message: fmt.Sprintf(format, args...),
|
||||
cause: Cause(*err),
|
||||
previous: *err,
|
||||
}
|
||||
newErr.SetLocation(1)
|
||||
*err = newErr
|
||||
}
|
||||
|
||||
// Wrap changes the Cause of the error. The location of the Wrap call is also
|
||||
// stored in the error stack.
|
||||
//
|
||||
// For example:
|
||||
// if err := SomeFunc(); err != nil {
|
||||
// newErr := &packageError{"more context", private_value}
|
||||
// return errors.Wrap(err, newErr)
|
||||
// }
|
||||
//
|
||||
func Wrap(other, newDescriptive error) error {
|
||||
err := &Err{
|
||||
previous: other,
|
||||
cause: newDescriptive,
|
||||
}
|
||||
err.SetLocation(1)
|
||||
return err
|
||||
}
|
||||
|
||||
// Wrapf changes the Cause of the error, and adds an annotation. The location
|
||||
// of the Wrap call is also stored in the error stack.
|
||||
//
|
||||
// For example:
|
||||
// if err := SomeFunc(); err != nil {
|
||||
// return errors.Wrapf(err, simpleErrorType, "invalid value %q", value)
|
||||
// }
|
||||
//
|
||||
func Wrapf(other, newDescriptive error, format string, args ...interface{}) error {
|
||||
err := &Err{
|
||||
message: fmt.Sprintf(format, args...),
|
||||
previous: other,
|
||||
cause: newDescriptive,
|
||||
}
|
||||
err.SetLocation(1)
|
||||
return err
|
||||
}
|
||||
|
||||
// Mask masks the given error with the given format string and arguments (like
|
||||
// fmt.Sprintf), returning a new error that maintains the error stack, but
|
||||
// hides the underlying error type. The error string still contains the full
|
||||
// annotations. If you want to hide the annotations, call Wrap.
|
||||
func Maskf(other error, format string, args ...interface{}) error {
|
||||
if other == nil {
|
||||
return nil
|
||||
}
|
||||
err := &Err{
|
||||
message: fmt.Sprintf(format, args...),
|
||||
previous: other,
|
||||
}
|
||||
err.SetLocation(1)
|
||||
return err
|
||||
}
|
||||
|
||||
// Mask hides the underlying error type, and records the location of the masking.
|
||||
func Mask(other error) error {
|
||||
if other == nil {
|
||||
return nil
|
||||
}
|
||||
err := &Err{
|
||||
previous: other,
|
||||
}
|
||||
err.SetLocation(1)
|
||||
return err
|
||||
}
|
||||
|
||||
// Cause returns the cause of the given error. This will be either the
|
||||
// original error, or the result of a Wrap or Mask call.
|
||||
//
|
||||
// Cause is the usual way to diagnose errors that may have been wrapped by
|
||||
// the other errors functions.
|
||||
func Cause(err error) error {
|
||||
var diag error
|
||||
if err, ok := err.(causer); ok {
|
||||
diag = err.Cause()
|
||||
}
|
||||
if diag != nil {
|
||||
return diag
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
type causer interface {
|
||||
Cause() error
|
||||
}
|
||||
|
||||
type wrapper interface {
|
||||
// Message returns the top level error message,
|
||||
// not including the message from the Previous
|
||||
// error.
|
||||
Message() string
|
||||
|
||||
// Underlying returns the Previous error, or nil
|
||||
// if there is none.
|
||||
Underlying() error
|
||||
}
|
||||
|
||||
type locationer interface {
|
||||
Location() (string, int)
|
||||
}
|
||||
|
||||
var (
|
||||
_ wrapper = (*Err)(nil)
|
||||
_ locationer = (*Err)(nil)
|
||||
_ causer = (*Err)(nil)
|
||||
)
|
||||
|
||||
// Details returns information about the stack of errors wrapped by err, in
|
||||
// the format:
|
||||
//
|
||||
// [{filename:99: error one} {otherfile:55: cause of error one}]
|
||||
//
|
||||
// This is a terse alternative to ErrorStack as it returns a single line.
|
||||
func Details(err error) string {
|
||||
if err == nil {
|
||||
return "[]"
|
||||
}
|
||||
var s []byte
|
||||
s = append(s, '[')
|
||||
for {
|
||||
s = append(s, '{')
|
||||
if err, ok := err.(locationer); ok {
|
||||
file, line := err.Location()
|
||||
if file != "" {
|
||||
s = append(s, fmt.Sprintf("%s:%d", file, line)...)
|
||||
s = append(s, ": "...)
|
||||
}
|
||||
}
|
||||
if cerr, ok := err.(wrapper); ok {
|
||||
s = append(s, cerr.Message()...)
|
||||
err = cerr.Underlying()
|
||||
} else {
|
||||
s = append(s, err.Error()...)
|
||||
err = nil
|
||||
}
|
||||
s = append(s, '}')
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
s = append(s, ' ')
|
||||
}
|
||||
s = append(s, ']')
|
||||
return string(s)
|
||||
}
|
||||
|
||||
// ErrorStack returns a string representation of the annotated error. If the
|
||||
// error passed as the parameter is not an annotated error, the result is
|
||||
// simply the result of the Error() method on that error.
|
||||
//
|
||||
// If the error is an annotated error, a multi-line string is returned where
|
||||
// each line represents one entry in the annotation stack. The full filename
|
||||
// from the call stack is used in the output.
|
||||
//
|
||||
// first error
|
||||
// github.com/juju/errors/annotation_test.go:193:
|
||||
// github.com/juju/errors/annotation_test.go:194: annotation
|
||||
// github.com/juju/errors/annotation_test.go:195:
|
||||
// github.com/juju/errors/annotation_test.go:196: more context
|
||||
// github.com/juju/errors/annotation_test.go:197:
|
||||
func ErrorStack(err error) string {
|
||||
return strings.Join(errorStack(err), "\n")
|
||||
}
|
||||
|
||||
func errorStack(err error) []string {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// We want the first error first
|
||||
var lines []string
|
||||
for {
|
||||
var buff []byte
|
||||
if err, ok := err.(locationer); ok {
|
||||
file, line := err.Location()
|
||||
// Strip off the leading GOPATH/src path elements.
|
||||
file = trimGoPath(file)
|
||||
if file != "" {
|
||||
buff = append(buff, fmt.Sprintf("%s:%d", file, line)...)
|
||||
buff = append(buff, ": "...)
|
||||
}
|
||||
}
|
||||
if cerr, ok := err.(wrapper); ok {
|
||||
message := cerr.Message()
|
||||
buff = append(buff, message...)
|
||||
// If there is a cause for this error, and it is different to the cause
|
||||
// of the underlying error, then output the error string in the stack trace.
|
||||
var cause error
|
||||
if err1, ok := err.(causer); ok {
|
||||
cause = err1.Cause()
|
||||
}
|
||||
err = cerr.Underlying()
|
||||
if cause != nil && !sameError(Cause(err), cause) {
|
||||
if message != "" {
|
||||
buff = append(buff, ": "...)
|
||||
}
|
||||
buff = append(buff, cause.Error()...)
|
||||
}
|
||||
} else {
|
||||
buff = append(buff, err.Error()...)
|
||||
err = nil
|
||||
}
|
||||
lines = append(lines, string(buff))
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
// reverse the lines to get the original error, which was at the end of
|
||||
// the list, back to the start.
|
||||
var result []string
|
||||
for i := len(lines); i > 0; i-- {
|
||||
result = append(result, lines[i-1])
|
||||
}
|
||||
return result
|
||||
}
|
305
vendor/github.com/juju/errors/functions_test.go
generated
vendored
305
vendor/github.com/juju/errors/functions_test.go
generated
vendored
@ -1,305 +0,0 @@
|
||||
// Copyright 2014 Canonical Ltd.
|
||||
// Licensed under the LGPLv3, see LICENCE file for details.
|
||||
|
||||
package errors_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
jc "github.com/juju/testing/checkers"
|
||||
gc "gopkg.in/check.v1"
|
||||
|
||||
"github.com/juju/errors"
|
||||
)
|
||||
|
||||
type functionSuite struct {
|
||||
}
|
||||
|
||||
var _ = gc.Suite(&functionSuite{})
|
||||
|
||||
func (*functionSuite) TestNew(c *gc.C) {
|
||||
err := errors.New("testing") //err newTest
|
||||
c.Assert(err.Error(), gc.Equals, "testing")
|
||||
c.Assert(errors.Cause(err), gc.Equals, err)
|
||||
c.Assert(errors.Details(err), jc.Contains, tagToLocation["newTest"].String())
|
||||
}
|
||||
|
||||
func (*functionSuite) TestErrorf(c *gc.C) {
|
||||
err := errors.Errorf("testing %d", 42) //err errorfTest
|
||||
c.Assert(err.Error(), gc.Equals, "testing 42")
|
||||
c.Assert(errors.Cause(err), gc.Equals, err)
|
||||
c.Assert(errors.Details(err), jc.Contains, tagToLocation["errorfTest"].String())
|
||||
}
|
||||
|
||||
func (*functionSuite) TestTrace(c *gc.C) {
|
||||
first := errors.New("first")
|
||||
err := errors.Trace(first) //err traceTest
|
||||
c.Assert(err.Error(), gc.Equals, "first")
|
||||
c.Assert(errors.Cause(err), gc.Equals, first)
|
||||
c.Assert(errors.Details(err), jc.Contains, tagToLocation["traceTest"].String())
|
||||
|
||||
c.Assert(errors.Trace(nil), gc.IsNil)
|
||||
}
|
||||
|
||||
func (*functionSuite) TestAnnotate(c *gc.C) {
|
||||
first := errors.New("first")
|
||||
err := errors.Annotate(first, "annotation") //err annotateTest
|
||||
c.Assert(err.Error(), gc.Equals, "annotation: first")
|
||||
c.Assert(errors.Cause(err), gc.Equals, first)
|
||||
c.Assert(errors.Details(err), jc.Contains, tagToLocation["annotateTest"].String())
|
||||
|
||||
c.Assert(errors.Annotate(nil, "annotate"), gc.IsNil)
|
||||
}
|
||||
|
||||
func (*functionSuite) TestAnnotatef(c *gc.C) {
|
||||
first := errors.New("first")
|
||||
err := errors.Annotatef(first, "annotation %d", 2) //err annotatefTest
|
||||
c.Assert(err.Error(), gc.Equals, "annotation 2: first")
|
||||
c.Assert(errors.Cause(err), gc.Equals, first)
|
||||
c.Assert(errors.Details(err), jc.Contains, tagToLocation["annotatefTest"].String())
|
||||
|
||||
c.Assert(errors.Annotatef(nil, "annotate"), gc.IsNil)
|
||||
}
|
||||
|
||||
func (*functionSuite) TestDeferredAnnotatef(c *gc.C) {
|
||||
// NOTE: this test fails with gccgo
|
||||
if runtime.Compiler == "gccgo" {
|
||||
c.Skip("gccgo can't determine the location")
|
||||
}
|
||||
first := errors.New("first")
|
||||
test := func() (err error) {
|
||||
defer errors.DeferredAnnotatef(&err, "deferred %s", "annotate")
|
||||
return first
|
||||
} //err deferredAnnotate
|
||||
err := test()
|
||||
c.Assert(err.Error(), gc.Equals, "deferred annotate: first")
|
||||
c.Assert(errors.Cause(err), gc.Equals, first)
|
||||
c.Assert(errors.Details(err), jc.Contains, tagToLocation["deferredAnnotate"].String())
|
||||
|
||||
err = nil
|
||||
errors.DeferredAnnotatef(&err, "deferred %s", "annotate")
|
||||
c.Assert(err, gc.IsNil)
|
||||
}
|
||||
|
||||
func (*functionSuite) TestWrap(c *gc.C) {
|
||||
first := errors.New("first") //err wrapFirst
|
||||
detailed := errors.New("detailed")
|
||||
err := errors.Wrap(first, detailed) //err wrapTest
|
||||
c.Assert(err.Error(), gc.Equals, "detailed")
|
||||
c.Assert(errors.Cause(err), gc.Equals, detailed)
|
||||
c.Assert(errors.Details(err), jc.Contains, tagToLocation["wrapFirst"].String())
|
||||
c.Assert(errors.Details(err), jc.Contains, tagToLocation["wrapTest"].String())
|
||||
}
|
||||
|
||||
func (*functionSuite) TestWrapOfNil(c *gc.C) {
|
||||
detailed := errors.New("detailed")
|
||||
err := errors.Wrap(nil, detailed) //err nilWrapTest
|
||||
c.Assert(err.Error(), gc.Equals, "detailed")
|
||||
c.Assert(errors.Cause(err), gc.Equals, detailed)
|
||||
c.Assert(errors.Details(err), jc.Contains, tagToLocation["nilWrapTest"].String())
|
||||
}
|
||||
|
||||
func (*functionSuite) TestWrapf(c *gc.C) {
|
||||
first := errors.New("first") //err wrapfFirst
|
||||
detailed := errors.New("detailed")
|
||||
err := errors.Wrapf(first, detailed, "value %d", 42) //err wrapfTest
|
||||
c.Assert(err.Error(), gc.Equals, "value 42: detailed")
|
||||
c.Assert(errors.Cause(err), gc.Equals, detailed)
|
||||
c.Assert(errors.Details(err), jc.Contains, tagToLocation["wrapfFirst"].String())
|
||||
c.Assert(errors.Details(err), jc.Contains, tagToLocation["wrapfTest"].String())
|
||||
}
|
||||
|
||||
func (*functionSuite) TestWrapfOfNil(c *gc.C) {
|
||||
detailed := errors.New("detailed")
|
||||
err := errors.Wrapf(nil, detailed, "value %d", 42) //err nilWrapfTest
|
||||
c.Assert(err.Error(), gc.Equals, "value 42: detailed")
|
||||
c.Assert(errors.Cause(err), gc.Equals, detailed)
|
||||
c.Assert(errors.Details(err), jc.Contains, tagToLocation["nilWrapfTest"].String())
|
||||
}
|
||||
|
||||
func (*functionSuite) TestMask(c *gc.C) {
|
||||
first := errors.New("first")
|
||||
err := errors.Mask(first) //err maskTest
|
||||
c.Assert(err.Error(), gc.Equals, "first")
|
||||
c.Assert(errors.Cause(err), gc.Equals, err)
|
||||
c.Assert(errors.Details(err), jc.Contains, tagToLocation["maskTest"].String())
|
||||
|
||||
c.Assert(errors.Mask(nil), gc.IsNil)
|
||||
}
|
||||
|
||||
func (*functionSuite) TestMaskf(c *gc.C) {
|
||||
first := errors.New("first")
|
||||
err := errors.Maskf(first, "masked %d", 42) //err maskfTest
|
||||
c.Assert(err.Error(), gc.Equals, "masked 42: first")
|
||||
c.Assert(errors.Cause(err), gc.Equals, err)
|
||||
c.Assert(errors.Details(err), jc.Contains, tagToLocation["maskfTest"].String())
|
||||
|
||||
c.Assert(errors.Maskf(nil, "mask"), gc.IsNil)
|
||||
}
|
||||
|
||||
func (*functionSuite) TestCause(c *gc.C) {
|
||||
c.Assert(errors.Cause(nil), gc.IsNil)
|
||||
c.Assert(errors.Cause(someErr), gc.Equals, someErr)
|
||||
|
||||
fmtErr := fmt.Errorf("simple")
|
||||
c.Assert(errors.Cause(fmtErr), gc.Equals, fmtErr)
|
||||
|
||||
err := errors.Wrap(someErr, fmtErr)
|
||||
c.Assert(errors.Cause(err), gc.Equals, fmtErr)
|
||||
|
||||
err = errors.Annotate(err, "annotated")
|
||||
c.Assert(errors.Cause(err), gc.Equals, fmtErr)
|
||||
|
||||
err = errors.Maskf(err, "maksed")
|
||||
c.Assert(errors.Cause(err), gc.Equals, err)
|
||||
|
||||
// Look for a file that we know isn't there.
|
||||
dir := c.MkDir()
|
||||
_, err = os.Stat(filepath.Join(dir, "not-there"))
|
||||
c.Assert(os.IsNotExist(err), jc.IsTrue)
|
||||
|
||||
err = errors.Annotatef(err, "wrap it")
|
||||
// Now the error itself isn't a 'IsNotExist'.
|
||||
c.Assert(os.IsNotExist(err), jc.IsFalse)
|
||||
// However if we use the Check method, it is.
|
||||
c.Assert(os.IsNotExist(errors.Cause(err)), jc.IsTrue)
|
||||
}
|
||||
|
||||
func (s *functionSuite) TestDetails(c *gc.C) {
|
||||
if runtime.Compiler == "gccgo" {
|
||||
c.Skip("gccgo can't determine the location")
|
||||
}
|
||||
c.Assert(errors.Details(nil), gc.Equals, "[]")
|
||||
|
||||
otherErr := fmt.Errorf("other")
|
||||
checkDetails(c, otherErr, "[{other}]")
|
||||
|
||||
err0 := newEmbed("foo") //err TestStack#0
|
||||
checkDetails(c, err0, "[{$TestStack#0$: foo}]")
|
||||
|
||||
err1 := errors.Annotate(err0, "bar") //err TestStack#1
|
||||
checkDetails(c, err1, "[{$TestStack#1$: bar} {$TestStack#0$: foo}]")
|
||||
|
||||
err2 := errors.Trace(err1) //err TestStack#2
|
||||
checkDetails(c, err2, "[{$TestStack#2$: } {$TestStack#1$: bar} {$TestStack#0$: foo}]")
|
||||
}
|
||||
|
||||
type tracer interface {
|
||||
StackTrace() []string
|
||||
}
|
||||
|
||||
func (*functionSuite) TestErrorStack(c *gc.C) {
|
||||
for i, test := range []struct {
|
||||
message string
|
||||
generator func() error
|
||||
expected string
|
||||
tracer bool
|
||||
}{
|
||||
{
|
||||
message: "nil",
|
||||
generator: func() error {
|
||||
return nil
|
||||
},
|
||||
}, {
|
||||
message: "raw error",
|
||||
generator: func() error {
|
||||
return fmt.Errorf("raw")
|
||||
},
|
||||
expected: "raw",
|
||||
}, {
|
||||
message: "single error stack",
|
||||
generator: func() error {
|
||||
return errors.New("first error") //err single
|
||||
},
|
||||
expected: "$single$: first error",
|
||||
tracer: true,
|
||||
}, {
|
||||
message: "annotated error",
|
||||
generator: func() error {
|
||||
err := errors.New("first error") //err annotated-0
|
||||
return errors.Annotate(err, "annotation") //err annotated-1
|
||||
},
|
||||
expected: "" +
|
||||
"$annotated-0$: first error\n" +
|
||||
"$annotated-1$: annotation",
|
||||
tracer: true,
|
||||
}, {
|
||||
message: "wrapped error",
|
||||
generator: func() error {
|
||||
err := errors.New("first error") //err wrapped-0
|
||||
return errors.Wrap(err, newError("detailed error")) //err wrapped-1
|
||||
},
|
||||
expected: "" +
|
||||
"$wrapped-0$: first error\n" +
|
||||
"$wrapped-1$: detailed error",
|
||||
tracer: true,
|
||||
}, {
|
||||
message: "annotated wrapped error",
|
||||
generator: func() error {
|
||||
err := errors.Errorf("first error") //err ann-wrap-0
|
||||
err = errors.Wrap(err, fmt.Errorf("detailed error")) //err ann-wrap-1
|
||||
return errors.Annotatef(err, "annotated") //err ann-wrap-2
|
||||
},
|
||||
expected: "" +
|
||||
"$ann-wrap-0$: first error\n" +
|
||||
"$ann-wrap-1$: detailed error\n" +
|
||||
"$ann-wrap-2$: annotated",
|
||||
tracer: true,
|
||||
}, {
|
||||
message: "traced, and annotated",
|
||||
generator: func() error {
|
||||
err := errors.New("first error") //err stack-0
|
||||
err = errors.Trace(err) //err stack-1
|
||||
err = errors.Annotate(err, "some context") //err stack-2
|
||||
err = errors.Trace(err) //err stack-3
|
||||
err = errors.Annotate(err, "more context") //err stack-4
|
||||
return errors.Trace(err) //err stack-5
|
||||
},
|
||||
expected: "" +
|
||||
"$stack-0$: first error\n" +
|
||||
"$stack-1$: \n" +
|
||||
"$stack-2$: some context\n" +
|
||||
"$stack-3$: \n" +
|
||||
"$stack-4$: more context\n" +
|
||||
"$stack-5$: ",
|
||||
tracer: true,
|
||||
}, {
|
||||
message: "uncomparable, wrapped with a value error",
|
||||
generator: func() error {
|
||||
err := newNonComparableError("first error") //err mixed-0
|
||||
err = errors.Trace(err) //err mixed-1
|
||||
err = errors.Wrap(err, newError("value error")) //err mixed-2
|
||||
err = errors.Maskf(err, "masked") //err mixed-3
|
||||
err = errors.Annotate(err, "more context") //err mixed-4
|
||||
return errors.Trace(err) //err mixed-5
|
||||
},
|
||||
expected: "" +
|
||||
"first error\n" +
|
||||
"$mixed-1$: \n" +
|
||||
"$mixed-2$: value error\n" +
|
||||
"$mixed-3$: masked\n" +
|
||||
"$mixed-4$: more context\n" +
|
||||
"$mixed-5$: ",
|
||||
tracer: true,
|
||||
},
|
||||
} {
|
||||
c.Logf("%v: %s", i, test.message)
|
||||
err := test.generator()
|
||||
expected := replaceLocations(test.expected)
|
||||
stack := errors.ErrorStack(err)
|
||||
ok := c.Check(stack, gc.Equals, expected)
|
||||
if !ok {
|
||||
c.Logf("%#v", err)
|
||||
}
|
||||
tracer, ok := err.(tracer)
|
||||
c.Check(ok, gc.Equals, test.tracer)
|
||||
if ok {
|
||||
stackTrace := tracer.StackTrace()
|
||||
c.Check(stackTrace, gc.DeepEquals, strings.Split(stack, "\n"))
|
||||
}
|
||||
}
|
||||
}
|
95
vendor/github.com/juju/errors/package_test.go
generated
vendored
95
vendor/github.com/juju/errors/package_test.go
generated
vendored
@ -1,95 +0,0 @@
|
||||
// Copyright 2013, 2014 Canonical Ltd.
|
||||
// Licensed under the LGPLv3, see LICENCE file for details.
|
||||
|
||||
package errors_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
gc "gopkg.in/check.v1"
|
||||
|
||||
"github.com/juju/errors"
|
||||
)
|
||||
|
||||
func Test(t *testing.T) {
|
||||
gc.TestingT(t)
|
||||
}
|
||||
|
||||
func checkDetails(c *gc.C, err error, details string) {
|
||||
c.Assert(err, gc.NotNil)
|
||||
expectedDetails := replaceLocations(details)
|
||||
c.Assert(errors.Details(err), gc.Equals, expectedDetails)
|
||||
}
|
||||
|
||||
func checkErr(c *gc.C, err, cause error, msg string, details string) {
|
||||
c.Assert(err, gc.NotNil)
|
||||
c.Assert(err.Error(), gc.Equals, msg)
|
||||
c.Assert(errors.Cause(err), gc.Equals, cause)
|
||||
expectedDetails := replaceLocations(details)
|
||||
c.Assert(errors.Details(err), gc.Equals, expectedDetails)
|
||||
}
|
||||
|
||||
func replaceLocations(line string) string {
|
||||
result := ""
|
||||
for {
|
||||
i := strings.Index(line, "$")
|
||||
if i == -1 {
|
||||
break
|
||||
}
|
||||
result += line[0:i]
|
||||
line = line[i+1:]
|
||||
i = strings.Index(line, "$")
|
||||
if i == -1 {
|
||||
panic("no second $")
|
||||
}
|
||||
result += location(line[0:i]).String()
|
||||
line = line[i+1:]
|
||||
}
|
||||
result += line
|
||||
return result
|
||||
}
|
||||
|
||||
func location(tag string) Location {
|
||||
loc, ok := tagToLocation[tag]
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("tag %q not found", tag))
|
||||
}
|
||||
return loc
|
||||
}
|
||||
|
||||
type Location struct {
|
||||
file string
|
||||
line int
|
||||
}
|
||||
|
||||
func (loc Location) String() string {
|
||||
return fmt.Sprintf("%s:%d", loc.file, loc.line)
|
||||
}
|
||||
|
||||
var tagToLocation = make(map[string]Location)
|
||||
|
||||
func setLocationsForErrorTags(filename string) {
|
||||
data, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
filename = "github.com/juju/errors/" + filename
|
||||
lines := strings.Split(string(data), "\n")
|
||||
for i, line := range lines {
|
||||
if j := strings.Index(line, "//err "); j >= 0 {
|
||||
tag := line[j+len("//err "):]
|
||||
if _, found := tagToLocation[tag]; found {
|
||||
panic(fmt.Sprintf("tag %q already processed previously", tag))
|
||||
}
|
||||
tagToLocation[tag] = Location{file: filename, line: i + 1}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
setLocationsForErrorTags("error_test.go")
|
||||
setLocationsForErrorTags("functions_test.go")
|
||||
}
|
35
vendor/github.com/juju/errors/path.go
generated
vendored
35
vendor/github.com/juju/errors/path.go
generated
vendored
@ -1,35 +0,0 @@
|
||||
// Copyright 2013, 2014 Canonical Ltd.
|
||||
// Licensed under the LGPLv3, see LICENCE file for details.
|
||||
|
||||
package errors
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// prefixSize is used internally to trim the user specific path from the
|
||||
// front of the returned filenames from the runtime call stack.
|
||||
var prefixSize int
|
||||
|
||||
// goPath is the deduced path based on the location of this file as compiled.
|
||||
var goPath string
|
||||
|
||||
func init() {
|
||||
_, file, _, ok := runtime.Caller(0)
|
||||
if ok {
|
||||
// We know that the end of the file should be:
|
||||
// github.com/juju/errors/path.go
|
||||
size := len(file)
|
||||
suffix := len("github.com/juju/errors/path.go")
|
||||
goPath = file[:size-suffix]
|
||||
prefixSize = len(goPath)
|
||||
}
|
||||
}
|
||||
|
||||
func trimGoPath(filename string) string {
|
||||
if strings.HasPrefix(filename, goPath) {
|
||||
return filename[prefixSize:]
|
||||
}
|
||||
return filename
|
||||
}
|
29
vendor/github.com/juju/errors/path_test.go
generated
vendored
29
vendor/github.com/juju/errors/path_test.go
generated
vendored
@ -1,29 +0,0 @@
|
||||
// Copyright 2013, 2014 Canonical Ltd.
|
||||
// Licensed under the LGPLv3, see LICENCE file for details.
|
||||
|
||||
package errors_test
|
||||
|
||||
import (
|
||||
"path"
|
||||
|
||||
gc "gopkg.in/check.v1"
|
||||
|
||||
"github.com/juju/errors"
|
||||
)
|
||||
|
||||
type pathSuite struct{}
|
||||
|
||||
var _ = gc.Suite(&pathSuite{})
|
||||
|
||||
func (*pathSuite) TestGoPathSet(c *gc.C) {
|
||||
c.Assert(errors.GoPath(), gc.Not(gc.Equals), "")
|
||||
}
|
||||
|
||||
func (*pathSuite) TestTrimGoPath(c *gc.C) {
|
||||
relativeImport := "github.com/foo/bar/baz.go"
|
||||
filename := path.Join(errors.GoPath(), relativeImport)
|
||||
c.Assert(errors.TrimGoPath(filename), gc.Equals, relativeImport)
|
||||
|
||||
absoluteImport := "/usr/share/foo/bar/baz.go"
|
||||
c.Assert(errors.TrimGoPath(absoluteImport), gc.Equals, absoluteImport)
|
||||
}
|
50
vendor/github.com/ngaut/deadline/rw.go
generated
vendored
50
vendor/github.com/ngaut/deadline/rw.go
generated
vendored
@ -1,50 +0,0 @@
|
||||
package deadline
|
||||
|
||||
import (
|
||||
"io"
|
||||
"time"
|
||||
)
|
||||
|
||||
type DeadlineReader interface {
|
||||
io.Reader
|
||||
SetReadDeadline(t time.Time) error
|
||||
}
|
||||
|
||||
type DeadlineWriter interface {
|
||||
io.Writer
|
||||
SetWriteDeadline(t time.Time) error
|
||||
}
|
||||
|
||||
type DeadlineReadWriter interface {
|
||||
io.ReadWriter
|
||||
SetReadDeadline(t time.Time) error
|
||||
SetWriteDeadline(t time.Time) error
|
||||
}
|
||||
|
||||
type deadlineReader struct {
|
||||
DeadlineReader
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
func (r *deadlineReader) Read(p []byte) (int, error) {
|
||||
r.DeadlineReader.SetReadDeadline(time.Now().Add(r.timeout))
|
||||
return r.DeadlineReader.Read(p)
|
||||
}
|
||||
|
||||
func NewDeadlineReader(r DeadlineReader, timeout time.Duration) io.Reader {
|
||||
return &deadlineReader{DeadlineReader: r, timeout: timeout}
|
||||
}
|
||||
|
||||
type deadlineWriter struct {
|
||||
DeadlineWriter
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
func (r *deadlineWriter) Write(p []byte) (int, error) {
|
||||
r.DeadlineWriter.SetWriteDeadline(time.Now().Add(r.timeout))
|
||||
return r.DeadlineWriter.Write(p)
|
||||
}
|
||||
|
||||
func NewDeadlineWriter(r DeadlineWriter, timeout time.Duration) io.Writer {
|
||||
return &deadlineWriter{DeadlineWriter: r, timeout: timeout}
|
||||
}
|
165
vendor/github.com/ngaut/log/LICENSE
generated
vendored
165
vendor/github.com/ngaut/log/LICENSE
generated
vendored
@ -1,165 +0,0 @@
|
||||
GNU LESSER GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
|
||||
This version of the GNU Lesser General Public License incorporates
|
||||
the terms and conditions of version 3 of the GNU General Public
|
||||
License, supplemented by the additional permissions listed below.
|
||||
|
||||
0. Additional Definitions.
|
||||
|
||||
As used herein, "this License" refers to version 3 of the GNU Lesser
|
||||
General Public License, and the "GNU GPL" refers to version 3 of the GNU
|
||||
General Public License.
|
||||
|
||||
"The Library" refers to a covered work governed by this License,
|
||||
other than an Application or a Combined Work as defined below.
|
||||
|
||||
An "Application" is any work that makes use of an interface provided
|
||||
by the Library, but which is not otherwise based on the Library.
|
||||
Defining a subclass of a class defined by the Library is deemed a mode
|
||||
of using an interface provided by the Library.
|
||||
|
||||
A "Combined Work" is a work produced by combining or linking an
|
||||
Application with the Library. The particular version of the Library
|
||||
with which the Combined Work was made is also called the "Linked
|
||||
Version".
|
||||
|
||||
The "Minimal Corresponding Source" for a Combined Work means the
|
||||
Corresponding Source for the Combined Work, excluding any source code
|
||||
for portions of the Combined Work that, considered in isolation, are
|
||||
based on the Application, and not on the Linked Version.
|
||||
|
||||
The "Corresponding Application Code" for a Combined Work means the
|
||||
object code and/or source code for the Application, including any data
|
||||
and utility programs needed for reproducing the Combined Work from the
|
||||
Application, but excluding the System Libraries of the Combined Work.
|
||||
|
||||
1. Exception to Section 3 of the GNU GPL.
|
||||
|
||||
You may convey a covered work under sections 3 and 4 of this License
|
||||
without being bound by section 3 of the GNU GPL.
|
||||
|
||||
2. Conveying Modified Versions.
|
||||
|
||||
If you modify a copy of the Library, and, in your modifications, a
|
||||
facility refers to a function or data to be supplied by an Application
|
||||
that uses the facility (other than as an argument passed when the
|
||||
facility is invoked), then you may convey a copy of the modified
|
||||
version:
|
||||
|
||||
a) under this License, provided that you make a good faith effort to
|
||||
ensure that, in the event an Application does not supply the
|
||||
function or data, the facility still operates, and performs
|
||||
whatever part of its purpose remains meaningful, or
|
||||
|
||||
b) under the GNU GPL, with none of the additional permissions of
|
||||
this License applicable to that copy.
|
||||
|
||||
3. Object Code Incorporating Material from Library Header Files.
|
||||
|
||||
The object code form of an Application may incorporate material from
|
||||
a header file that is part of the Library. You may convey such object
|
||||
code under terms of your choice, provided that, if the incorporated
|
||||
material is not limited to numerical parameters, data structure
|
||||
layouts and accessors, or small macros, inline functions and templates
|
||||
(ten or fewer lines in length), you do both of the following:
|
||||
|
||||
a) Give prominent notice with each copy of the object code that the
|
||||
Library is used in it and that the Library and its use are
|
||||
covered by this License.
|
||||
|
||||
b) Accompany the object code with a copy of the GNU GPL and this license
|
||||
document.
|
||||
|
||||
4. Combined Works.
|
||||
|
||||
You may convey a Combined Work under terms of your choice that,
|
||||
taken together, effectively do not restrict modification of the
|
||||
portions of the Library contained in the Combined Work and reverse
|
||||
engineering for debugging such modifications, if you also do each of
|
||||
the following:
|
||||
|
||||
a) Give prominent notice with each copy of the Combined Work that
|
||||
the Library is used in it and that the Library and its use are
|
||||
covered by this License.
|
||||
|
||||
b) Accompany the Combined Work with a copy of the GNU GPL and this license
|
||||
document.
|
||||
|
||||
c) For a Combined Work that displays copyright notices during
|
||||
execution, include the copyright notice for the Library among
|
||||
these notices, as well as a reference directing the user to the
|
||||
copies of the GNU GPL and this license document.
|
||||
|
||||
d) Do one of the following:
|
||||
|
||||
0) Convey the Minimal Corresponding Source under the terms of this
|
||||
License, and the Corresponding Application Code in a form
|
||||
suitable for, and under terms that permit, the user to
|
||||
recombine or relink the Application with a modified version of
|
||||
the Linked Version to produce a modified Combined Work, in the
|
||||
manner specified by section 6 of the GNU GPL for conveying
|
||||
Corresponding Source.
|
||||
|
||||
1) Use a suitable shared library mechanism for linking with the
|
||||
Library. A suitable mechanism is one that (a) uses at run time
|
||||
a copy of the Library already present on the user's computer
|
||||
system, and (b) will operate properly with a modified version
|
||||
of the Library that is interface-compatible with the Linked
|
||||
Version.
|
||||
|
||||
e) Provide Installation Information, but only if you would otherwise
|
||||
be required to provide such information under section 6 of the
|
||||
GNU GPL, and only to the extent that such information is
|
||||
necessary to install and execute a modified version of the
|
||||
Combined Work produced by recombining or relinking the
|
||||
Application with a modified version of the Linked Version. (If
|
||||
you use option 4d0, the Installation Information must accompany
|
||||
the Minimal Corresponding Source and Corresponding Application
|
||||
Code. If you use option 4d1, you must provide the Installation
|
||||
Information in the manner specified by section 6 of the GNU GPL
|
||||
for conveying Corresponding Source.)
|
||||
|
||||
5. Combined Libraries.
|
||||
|
||||
You may place library facilities that are a work based on the
|
||||
Library side by side in a single library together with other library
|
||||
facilities that are not Applications and are not covered by this
|
||||
License, and convey such a combined library under terms of your
|
||||
choice, if you do both of the following:
|
||||
|
||||
a) Accompany the combined library with a copy of the same work based
|
||||
on the Library, uncombined with any other library facilities,
|
||||
conveyed under the terms of this License.
|
||||
|
||||
b) Give prominent notice with the combined library that part of it
|
||||
is a work based on the Library, and explaining where to find the
|
||||
accompanying uncombined form of the same work.
|
||||
|
||||
6. Revised Versions of the GNU Lesser General Public License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions
|
||||
of the GNU Lesser General Public License from time to time. Such new
|
||||
versions will be similar in spirit to the present version, but may
|
||||
differ in detail to address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Library as you received it specifies that a certain numbered version
|
||||
of the GNU Lesser General Public License "or any later version"
|
||||
applies to it, you have the option of following the terms and
|
||||
conditions either of that published version or of any later version
|
||||
published by the Free Software Foundation. If the Library as you
|
||||
received it does not specify a version number of the GNU Lesser
|
||||
General Public License, you may choose any version of the GNU Lesser
|
||||
General Public License ever published by the Free Software Foundation.
|
||||
|
||||
If the Library as you received it specifies that a proxy can decide
|
||||
whether future versions of the GNU Lesser General Public License shall
|
||||
apply, that proxy's public statement of acceptance of any version is
|
||||
permanent authorization for you to choose that version for the
|
||||
Library.
|
2
vendor/github.com/ngaut/log/README.md
generated
vendored
2
vendor/github.com/ngaut/log/README.md
generated
vendored
@ -1,2 +0,0 @@
|
||||
logging
|
||||
=======
|
18
vendor/github.com/ngaut/log/crash_unix.go
generated
vendored
18
vendor/github.com/ngaut/log/crash_unix.go
generated
vendored
@ -1,18 +0,0 @@
|
||||
// +build freebsd openbsd netbsd dragonfly darwin linux
|
||||
|
||||
package log
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func CrashLog(file string) {
|
||||
f, err := os.OpenFile(file, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666)
|
||||
if err != nil {
|
||||
log.Println(err.Error())
|
||||
} else {
|
||||
syscall.Dup2(int(f.Fd()), 2)
|
||||
}
|
||||
}
|
37
vendor/github.com/ngaut/log/crash_win.go
generated
vendored
37
vendor/github.com/ngaut/log/crash_win.go
generated
vendored
@ -1,37 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
package log
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
var (
|
||||
kernel32 = syscall.MustLoadDLL("kernel32.dll")
|
||||
procSetStdHandle = kernel32.MustFindProc("SetStdHandle")
|
||||
)
|
||||
|
||||
func setStdHandle(stdhandle int32, handle syscall.Handle) error {
|
||||
r0, _, e1 := syscall.Syscall(procSetStdHandle.Addr(), 2, uintptr(stdhandle), uintptr(handle), 0)
|
||||
if r0 == 0 {
|
||||
if e1 != 0 {
|
||||
return error(e1)
|
||||
}
|
||||
return syscall.EINVAL
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func CrashLog(file string) {
|
||||
f, err := os.OpenFile(file, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666)
|
||||
if err != nil {
|
||||
log.Println(err.Error())
|
||||
} else {
|
||||
err = setStdHandle(syscall.STD_ERROR_HANDLE, syscall.Handle(f.Fd()))
|
||||
if err != nil {
|
||||
log.Println(err.Error())
|
||||
}
|
||||
}
|
||||
}
|
380
vendor/github.com/ngaut/log/log.go
generated
vendored
380
vendor/github.com/ngaut/log/log.go
generated
vendored
@ -1,380 +0,0 @@
|
||||
//high level log wrapper, so it can output different log based on level
|
||||
package log
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
Ldate = log.Ldate
|
||||
Llongfile = log.Llongfile
|
||||
Lmicroseconds = log.Lmicroseconds
|
||||
Lshortfile = log.Lshortfile
|
||||
LstdFlags = log.LstdFlags
|
||||
Ltime = log.Ltime
|
||||
)
|
||||
|
||||
type (
|
||||
LogLevel int
|
||||
LogType int
|
||||
)
|
||||
|
||||
const (
|
||||
LOG_FATAL = LogType(0x1)
|
||||
LOG_ERROR = LogType(0x2)
|
||||
LOG_WARNING = LogType(0x4)
|
||||
LOG_INFO = LogType(0x8)
|
||||
LOG_DEBUG = LogType(0x10)
|
||||
)
|
||||
|
||||
const (
|
||||
LOG_LEVEL_NONE = LogLevel(0x0)
|
||||
LOG_LEVEL_FATAL = LOG_LEVEL_NONE | LogLevel(LOG_FATAL)
|
||||
LOG_LEVEL_ERROR = LOG_LEVEL_FATAL | LogLevel(LOG_ERROR)
|
||||
LOG_LEVEL_WARN = LOG_LEVEL_ERROR | LogLevel(LOG_WARNING)
|
||||
LOG_LEVEL_INFO = LOG_LEVEL_WARN | LogLevel(LOG_INFO)
|
||||
LOG_LEVEL_DEBUG = LOG_LEVEL_INFO | LogLevel(LOG_DEBUG)
|
||||
LOG_LEVEL_ALL = LOG_LEVEL_DEBUG
|
||||
)
|
||||
|
||||
const FORMAT_TIME_DAY string = "20060102"
|
||||
const FORMAT_TIME_HOUR string = "2006010215"
|
||||
|
||||
var _log *logger = New()
|
||||
|
||||
func init() {
|
||||
SetFlags(Ldate | Ltime | Lshortfile)
|
||||
SetHighlighting(runtime.GOOS != "windows")
|
||||
}
|
||||
|
||||
func Logger() *log.Logger {
|
||||
return _log._log
|
||||
}
|
||||
|
||||
func SetLevel(level LogLevel) {
|
||||
_log.SetLevel(level)
|
||||
}
|
||||
func GetLogLevel() LogLevel {
|
||||
return _log.level
|
||||
}
|
||||
|
||||
func SetOutput(out io.Writer) {
|
||||
_log.SetOutput(out)
|
||||
}
|
||||
|
||||
func SetOutputByName(path string) error {
|
||||
return _log.SetOutputByName(path)
|
||||
}
|
||||
|
||||
func SetFlags(flags int) {
|
||||
_log._log.SetFlags(flags)
|
||||
}
|
||||
|
||||
func Info(v ...interface{}) {
|
||||
_log.Info(v...)
|
||||
}
|
||||
|
||||
func Infof(format string, v ...interface{}) {
|
||||
_log.Infof(format, v...)
|
||||
}
|
||||
|
||||
func Debug(v ...interface{}) {
|
||||
_log.Debug(v...)
|
||||
}
|
||||
|
||||
func Debugf(format string, v ...interface{}) {
|
||||
_log.Debugf(format, v...)
|
||||
}
|
||||
|
||||
func Warn(v ...interface{}) {
|
||||
_log.Warning(v...)
|
||||
}
|
||||
|
||||
func Warnf(format string, v ...interface{}) {
|
||||
_log.Warningf(format, v...)
|
||||
}
|
||||
|
||||
func Warning(v ...interface{}) {
|
||||
_log.Warning(v...)
|
||||
}
|
||||
|
||||
func Warningf(format string, v ...interface{}) {
|
||||
_log.Warningf(format, v...)
|
||||
}
|
||||
|
||||
func Error(v ...interface{}) {
|
||||
_log.Error(v...)
|
||||
}
|
||||
|
||||
func Errorf(format string, v ...interface{}) {
|
||||
_log.Errorf(format, v...)
|
||||
}
|
||||
|
||||
func Fatal(v ...interface{}) {
|
||||
_log.Fatal(v...)
|
||||
}
|
||||
|
||||
func Fatalf(format string, v ...interface{}) {
|
||||
_log.Fatalf(format, v...)
|
||||
}
|
||||
|
||||
func SetLevelByString(level string) {
|
||||
_log.SetLevelByString(level)
|
||||
}
|
||||
|
||||
func SetHighlighting(highlighting bool) {
|
||||
_log.SetHighlighting(highlighting)
|
||||
}
|
||||
|
||||
func SetRotateByDay() {
|
||||
_log.SetRotateByDay()
|
||||
}
|
||||
|
||||
func SetRotateByHour() {
|
||||
_log.SetRotateByHour()
|
||||
}
|
||||
|
||||
type logger struct {
|
||||
_log *log.Logger
|
||||
level LogLevel
|
||||
highlighting bool
|
||||
|
||||
dailyRolling bool
|
||||
hourRolling bool
|
||||
|
||||
fileName string
|
||||
logSuffix string
|
||||
fd *os.File
|
||||
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
func (l *logger) SetHighlighting(highlighting bool) {
|
||||
l.highlighting = highlighting
|
||||
}
|
||||
|
||||
func (l *logger) SetLevel(level LogLevel) {
|
||||
l.level = level
|
||||
}
|
||||
|
||||
func (l *logger) SetLevelByString(level string) {
|
||||
l.level = StringToLogLevel(level)
|
||||
}
|
||||
|
||||
func (l *logger) SetRotateByDay() {
|
||||
l.dailyRolling = true
|
||||
l.logSuffix = genDayTime(time.Now())
|
||||
}
|
||||
|
||||
func (l *logger) SetRotateByHour() {
|
||||
l.hourRolling = true
|
||||
l.logSuffix = genHourTime(time.Now())
|
||||
}
|
||||
|
||||
func (l *logger) rotate() error {
|
||||
l.lock.Lock()
|
||||
defer l.lock.Unlock()
|
||||
|
||||
var suffix string
|
||||
if l.dailyRolling {
|
||||
suffix = genDayTime(time.Now())
|
||||
} else if l.hourRolling {
|
||||
suffix = genHourTime(time.Now())
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Notice: if suffix is not equal to l.LogSuffix, then rotate
|
||||
if suffix != l.logSuffix {
|
||||
err := l.doRotate(suffix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *logger) doRotate(suffix string) error {
|
||||
// Notice: Not check error, is this ok?
|
||||
l.fd.Close()
|
||||
|
||||
lastFileName := l.fileName + "." + l.logSuffix
|
||||
err := os.Rename(l.fileName, lastFileName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = l.SetOutputByName(l.fileName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
l.logSuffix = suffix
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *logger) SetOutput(out io.Writer) {
|
||||
l._log = log.New(out, l._log.Prefix(), l._log.Flags())
|
||||
}
|
||||
|
||||
func (l *logger) SetOutputByName(path string) error {
|
||||
f, err := os.OpenFile(path, os.O_CREATE|os.O_APPEND|os.O_RDWR, 0666)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
l.SetOutput(f)
|
||||
|
||||
l.fileName = path
|
||||
l.fd = f
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (l *logger) log(t LogType, v ...interface{}) {
|
||||
if l.level|LogLevel(t) != l.level {
|
||||
return
|
||||
}
|
||||
|
||||
err := l.rotate()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%s\n", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
v1 := make([]interface{}, len(v)+2)
|
||||
logStr, logColor := LogTypeToString(t)
|
||||
if l.highlighting {
|
||||
v1[0] = "\033" + logColor + "m[" + logStr + "]"
|
||||
copy(v1[1:], v)
|
||||
v1[len(v)+1] = "\033[0m"
|
||||
} else {
|
||||
v1[0] = "[" + logStr + "]"
|
||||
copy(v1[1:], v)
|
||||
v1[len(v)+1] = ""
|
||||
}
|
||||
|
||||
s := fmt.Sprintln(v1...)
|
||||
l._log.Output(4, s)
|
||||
}
|
||||
|
||||
func (l *logger) logf(t LogType, format string, v ...interface{}) {
|
||||
if l.level|LogLevel(t) != l.level {
|
||||
return
|
||||
}
|
||||
|
||||
err := l.rotate()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%s\n", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
logStr, logColor := LogTypeToString(t)
|
||||
var s string
|
||||
if l.highlighting {
|
||||
s = "\033" + logColor + "m[" + logStr + "] " + fmt.Sprintf(format, v...) + "\033[0m"
|
||||
} else {
|
||||
s = "[" + logStr + "] " + fmt.Sprintf(format, v...)
|
||||
}
|
||||
l._log.Output(4, s)
|
||||
}
|
||||
|
||||
func (l *logger) Fatal(v ...interface{}) {
|
||||
l.log(LOG_FATAL, v...)
|
||||
os.Exit(-1)
|
||||
}
|
||||
|
||||
func (l *logger) Fatalf(format string, v ...interface{}) {
|
||||
l.logf(LOG_FATAL, format, v...)
|
||||
os.Exit(-1)
|
||||
}
|
||||
|
||||
func (l *logger) Error(v ...interface{}) {
|
||||
l.log(LOG_ERROR, v...)
|
||||
}
|
||||
|
||||
func (l *logger) Errorf(format string, v ...interface{}) {
|
||||
l.logf(LOG_ERROR, format, v...)
|
||||
}
|
||||
|
||||
func (l *logger) Warning(v ...interface{}) {
|
||||
l.log(LOG_WARNING, v...)
|
||||
}
|
||||
|
||||
func (l *logger) Warningf(format string, v ...interface{}) {
|
||||
l.logf(LOG_WARNING, format, v...)
|
||||
}
|
||||
|
||||
func (l *logger) Debug(v ...interface{}) {
|
||||
l.log(LOG_DEBUG, v...)
|
||||
}
|
||||
|
||||
func (l *logger) Debugf(format string, v ...interface{}) {
|
||||
l.logf(LOG_DEBUG, format, v...)
|
||||
}
|
||||
|
||||
func (l *logger) Info(v ...interface{}) {
|
||||
l.log(LOG_INFO, v...)
|
||||
}
|
||||
|
||||
func (l *logger) Infof(format string, v ...interface{}) {
|
||||
l.logf(LOG_INFO, format, v...)
|
||||
}
|
||||
|
||||
func StringToLogLevel(level string) LogLevel {
|
||||
switch level {
|
||||
case "fatal":
|
||||
return LOG_LEVEL_FATAL
|
||||
case "error":
|
||||
return LOG_LEVEL_ERROR
|
||||
case "warn":
|
||||
return LOG_LEVEL_WARN
|
||||
case "warning":
|
||||
return LOG_LEVEL_WARN
|
||||
case "debug":
|
||||
return LOG_LEVEL_DEBUG
|
||||
case "info":
|
||||
return LOG_LEVEL_INFO
|
||||
}
|
||||
return LOG_LEVEL_ALL
|
||||
}
|
||||
|
||||
func LogTypeToString(t LogType) (string, string) {
|
||||
switch t {
|
||||
case LOG_FATAL:
|
||||
return "fatal", "[0;31"
|
||||
case LOG_ERROR:
|
||||
return "error", "[0;31"
|
||||
case LOG_WARNING:
|
||||
return "warning", "[0;33"
|
||||
case LOG_DEBUG:
|
||||
return "debug", "[0;36"
|
||||
case LOG_INFO:
|
||||
return "info", "[0;37"
|
||||
}
|
||||
return "unknown", "[0;37"
|
||||
}
|
||||
|
||||
func genDayTime(t time.Time) string {
|
||||
return t.Format(FORMAT_TIME_DAY)
|
||||
}
|
||||
|
||||
func genHourTime(t time.Time) string {
|
||||
return t.Format(FORMAT_TIME_HOUR)
|
||||
}
|
||||
|
||||
func New() *logger {
|
||||
return Newlogger(os.Stderr, "")
|
||||
}
|
||||
|
||||
func Newlogger(w io.Writer, prefix string) *logger {
|
||||
return &logger{_log: log.New(w, prefix, LstdFlags), level: LOG_LEVEL_ALL, highlighting: true}
|
||||
}
|
197
vendor/github.com/ngaut/log/log_test.go
generated
vendored
197
vendor/github.com/ngaut/log/log_test.go
generated
vendored
@ -1,197 +0,0 @@
|
||||
package log
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func isFileExists(name string) bool {
|
||||
f, err := os.Stat(name)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if f.IsDir() {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func parseDate(value string, format string) (time.Time, error) {
|
||||
tt, err := time.ParseInLocation(format, value, time.Local)
|
||||
if err != nil {
|
||||
fmt.Println("[Error]" + err.Error())
|
||||
return tt, err
|
||||
}
|
||||
|
||||
return tt, nil
|
||||
}
|
||||
|
||||
func checkLogData(fileName string, containData string, num int64) error {
|
||||
input, err := os.OpenFile(fileName, os.O_RDONLY, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer input.Close()
|
||||
|
||||
var lineNum int64
|
||||
br := bufio.NewReader(input)
|
||||
for {
|
||||
line, err := br.ReadString('\n')
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
|
||||
realLine := strings.TrimRight(line, "\n")
|
||||
if strings.Contains(realLine, containData) {
|
||||
lineNum += 1
|
||||
}
|
||||
}
|
||||
|
||||
// check whether num is equal to lineNum
|
||||
if lineNum != num {
|
||||
return fmt.Errorf("checkLogData fail - %d vs %d", lineNum, num)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestDayRotateCase(t *testing.T) {
|
||||
_log = New()
|
||||
|
||||
logName := "example_day_test.log"
|
||||
if isFileExists(logName) {
|
||||
err := os.Remove(logName)
|
||||
if err != nil {
|
||||
t.Errorf("Remove old log file fail - %s, %s\n", err.Error(), logName)
|
||||
}
|
||||
}
|
||||
|
||||
SetRotateByDay()
|
||||
err := SetOutputByName(logName)
|
||||
if err != nil {
|
||||
t.Errorf("SetOutputByName fail - %s, %s\n", err.Error(), logName)
|
||||
}
|
||||
|
||||
if _log.logSuffix == "" {
|
||||
t.Errorf("bad log suffix fail - %s\n", _log.logSuffix)
|
||||
}
|
||||
|
||||
day, err := parseDate(_log.logSuffix, FORMAT_TIME_DAY)
|
||||
if err != nil {
|
||||
t.Errorf("parseDate fail - %s, %s\n", err.Error(), _log.logSuffix)
|
||||
}
|
||||
|
||||
_log.Info("Test data")
|
||||
_log.Infof("Test data - %s", day.String())
|
||||
|
||||
// mock log suffix to check rotate
|
||||
lastDay := day.AddDate(0, 0, -1)
|
||||
_log.logSuffix = genDayTime(lastDay)
|
||||
oldLogSuffix := _log.logSuffix
|
||||
|
||||
_log.Info("Test new data")
|
||||
_log.Infof("Test new data - %s", day.String())
|
||||
|
||||
err = _log.fd.Close()
|
||||
if err != nil {
|
||||
t.Errorf("close log fd fail - %s, %s\n", err.Error(), _log.fileName)
|
||||
}
|
||||
|
||||
// check both old and new log file datas
|
||||
oldLogName := logName + "." + oldLogSuffix
|
||||
err = checkLogData(oldLogName, "Test data", 2)
|
||||
if err != nil {
|
||||
t.Errorf("old log file checkLogData fail - %s, %s\n", err.Error(), oldLogName)
|
||||
}
|
||||
|
||||
err = checkLogData(logName, "Test new data", 2)
|
||||
if err != nil {
|
||||
t.Errorf("new log file checkLogData fail - %s, %s\n", err.Error(), logName)
|
||||
}
|
||||
|
||||
// remove test log files
|
||||
err = os.Remove(oldLogName)
|
||||
if err != nil {
|
||||
t.Errorf("Remove final old log file fail - %s, %s\n", err.Error(), oldLogName)
|
||||
}
|
||||
|
||||
err = os.Remove(logName)
|
||||
if err != nil {
|
||||
t.Errorf("Remove final new log file fail - %s, %s\n", err.Error(), logName)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHourRotateCase(t *testing.T) {
|
||||
_log = New()
|
||||
|
||||
logName := "example_hour_test.log"
|
||||
if isFileExists(logName) {
|
||||
err := os.Remove(logName)
|
||||
if err != nil {
|
||||
t.Errorf("Remove old log file fail - %s, %s\n", err.Error(), logName)
|
||||
}
|
||||
}
|
||||
|
||||
SetRotateByHour()
|
||||
err := SetOutputByName(logName)
|
||||
if err != nil {
|
||||
t.Errorf("SetOutputByName fail - %s, %s\n", err.Error(), logName)
|
||||
}
|
||||
|
||||
if _log.logSuffix == "" {
|
||||
t.Errorf("bad log suffix fail - %s\n", _log.logSuffix)
|
||||
}
|
||||
|
||||
hour, err := parseDate(_log.logSuffix, FORMAT_TIME_HOUR)
|
||||
if err != nil {
|
||||
t.Errorf("parseDate fail - %s, %s\n", err.Error(), _log.logSuffix)
|
||||
}
|
||||
|
||||
_log.Info("Test data")
|
||||
_log.Infof("Test data - %s", hour.String())
|
||||
|
||||
// mock log suffix to check rotate
|
||||
lastHour := hour.Add(time.Duration(-1 * time.Hour))
|
||||
_log.logSuffix = genHourTime(lastHour)
|
||||
oldLogSuffix := _log.logSuffix
|
||||
|
||||
_log.Info("Test new data")
|
||||
_log.Infof("Test new data - %s", hour.String())
|
||||
|
||||
err = _log.fd.Close()
|
||||
if err != nil {
|
||||
t.Errorf("close log fd fail - %s, %s\n", err.Error(), _log.fileName)
|
||||
}
|
||||
|
||||
// check both old and new log file datas
|
||||
oldLogName := logName + "." + oldLogSuffix
|
||||
err = checkLogData(oldLogName, "Test data", 2)
|
||||
if err != nil {
|
||||
t.Errorf("old log file checkLogData fail - %s, %s\n", err.Error(), oldLogName)
|
||||
}
|
||||
|
||||
err = checkLogData(logName, "Test new data", 2)
|
||||
if err != nil {
|
||||
t.Errorf("new log file checkLogData fail - %s, %s\n", err.Error(), logName)
|
||||
}
|
||||
|
||||
// remove test log files
|
||||
err = os.Remove(oldLogName)
|
||||
if err != nil {
|
||||
t.Errorf("Remove final old log file fail - %s, %s\n", err.Error(), oldLogName)
|
||||
}
|
||||
|
||||
err = os.Remove(logName)
|
||||
if err != nil {
|
||||
t.Errorf("Remove final new log file fail - %s, %s\n", err.Error(), logName)
|
||||
}
|
||||
}
|
72
vendor/github.com/ngaut/pools/id_pool.go
generated
vendored
72
vendor/github.com/ngaut/pools/id_pool.go
generated
vendored
@ -1,72 +0,0 @@
|
||||
// Copyright 2014, Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package pools
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// IDPool is used to ensure that the set of IDs in use concurrently never
|
||||
// contains any duplicates. The IDs start at 1 and increase without bound, but
|
||||
// will never be larger than the peak number of concurrent uses.
|
||||
//
|
||||
// IDPool's Get() and Set() methods can be used concurrently.
|
||||
type IDPool struct {
|
||||
sync.Mutex
|
||||
|
||||
// used holds the set of values that have been returned to us with Put().
|
||||
used map[uint32]bool
|
||||
// maxUsed remembers the largest value we've given out.
|
||||
maxUsed uint32
|
||||
}
|
||||
|
||||
// NewIDPool creates and initializes an IDPool.
|
||||
func NewIDPool() *IDPool {
|
||||
return &IDPool{
|
||||
used: make(map[uint32]bool),
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns an ID that is unique among currently active users of this pool.
|
||||
func (pool *IDPool) Get() (id uint32) {
|
||||
pool.Lock()
|
||||
defer pool.Unlock()
|
||||
|
||||
// Pick a value that's been returned, if any.
|
||||
for key, _ := range pool.used {
|
||||
delete(pool.used, key)
|
||||
return key
|
||||
}
|
||||
|
||||
// No recycled IDs are available, so increase the pool size.
|
||||
pool.maxUsed += 1
|
||||
return pool.maxUsed
|
||||
}
|
||||
|
||||
// Put recycles an ID back into the pool for others to use. Putting back a value
|
||||
// or 0, or a value that is not currently "checked out", will result in a panic
|
||||
// because that should never happen except in the case of a programming error.
|
||||
func (pool *IDPool) Put(id uint32) {
|
||||
pool.Lock()
|
||||
defer pool.Unlock()
|
||||
|
||||
if id < 1 || id > pool.maxUsed {
|
||||
panic(fmt.Errorf("IDPool.Put(%v): invalid value, must be in the range [1,%v]", id, pool.maxUsed))
|
||||
}
|
||||
|
||||
if pool.used[id] {
|
||||
panic(fmt.Errorf("IDPool.Put(%v): can't put value that was already recycled", id))
|
||||
}
|
||||
|
||||
// If we're recycling maxUsed, just shrink the pool.
|
||||
if id == pool.maxUsed {
|
||||
pool.maxUsed = id - 1
|
||||
return
|
||||
}
|
||||
|
||||
// Add it to the set of recycled IDs.
|
||||
pool.used[id] = true
|
||||
}
|
118
vendor/github.com/ngaut/pools/id_pool_test.go
generated
vendored
118
vendor/github.com/ngaut/pools/id_pool_test.go
generated
vendored
@ -1,118 +0,0 @@
|
||||
// Copyright 2014, Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package pools
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func (pool *IDPool) want(want *IDPool, t *testing.T) {
|
||||
if pool.maxUsed != want.maxUsed {
|
||||
t.Errorf("pool.maxUsed = %#v, want %#v", pool.maxUsed, want.maxUsed)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(pool.used, want.used) {
|
||||
t.Errorf("pool.used = %#v, want %#v", pool.used, want.used)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIDPoolFirstGet(t *testing.T) {
|
||||
pool := NewIDPool()
|
||||
|
||||
if got := pool.Get(); got != 1 {
|
||||
t.Errorf("pool.Get() = %v, want 1", got)
|
||||
}
|
||||
|
||||
pool.want(&IDPool{used: map[uint32]bool{}, maxUsed: 1}, t)
|
||||
}
|
||||
|
||||
func TestIDPoolSecondGet(t *testing.T) {
|
||||
pool := NewIDPool()
|
||||
pool.Get()
|
||||
|
||||
if got := pool.Get(); got != 2 {
|
||||
t.Errorf("pool.Get() = %v, want 2", got)
|
||||
}
|
||||
|
||||
pool.want(&IDPool{used: map[uint32]bool{}, maxUsed: 2}, t)
|
||||
}
|
||||
|
||||
func TestIDPoolPutToUsedSet(t *testing.T) {
|
||||
pool := NewIDPool()
|
||||
id1 := pool.Get()
|
||||
pool.Get()
|
||||
pool.Put(id1)
|
||||
|
||||
pool.want(&IDPool{used: map[uint32]bool{1: true}, maxUsed: 2}, t)
|
||||
}
|
||||
|
||||
func TestIDPoolPutMaxUsed1(t *testing.T) {
|
||||
pool := NewIDPool()
|
||||
id1 := pool.Get()
|
||||
pool.Put(id1)
|
||||
|
||||
pool.want(&IDPool{used: map[uint32]bool{}, maxUsed: 0}, t)
|
||||
}
|
||||
|
||||
func TestIDPoolPutMaxUsed2(t *testing.T) {
|
||||
pool := NewIDPool()
|
||||
pool.Get()
|
||||
id2 := pool.Get()
|
||||
pool.Put(id2)
|
||||
|
||||
pool.want(&IDPool{used: map[uint32]bool{}, maxUsed: 1}, t)
|
||||
}
|
||||
|
||||
func TestIDPoolGetFromUsedSet(t *testing.T) {
|
||||
pool := NewIDPool()
|
||||
id1 := pool.Get()
|
||||
pool.Get()
|
||||
pool.Put(id1)
|
||||
|
||||
if got := pool.Get(); got != 1 {
|
||||
t.Errorf("pool.Get() = %v, want 1", got)
|
||||
}
|
||||
|
||||
pool.want(&IDPool{used: map[uint32]bool{}, maxUsed: 2}, t)
|
||||
}
|
||||
|
||||
func wantError(want string, t *testing.T) {
|
||||
rec := recover()
|
||||
if rec == nil {
|
||||
t.Errorf("expected panic, but there wasn't one")
|
||||
}
|
||||
err, ok := rec.(error)
|
||||
if !ok || !strings.Contains(err.Error(), want) {
|
||||
t.Errorf("wrong error, got '%v', want '%v'", err, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIDPoolPut0(t *testing.T) {
|
||||
pool := NewIDPool()
|
||||
pool.Get()
|
||||
|
||||
defer wantError("invalid value", t)
|
||||
pool.Put(0)
|
||||
}
|
||||
|
||||
func TestIDPoolPutInvalid(t *testing.T) {
|
||||
pool := NewIDPool()
|
||||
pool.Get()
|
||||
|
||||
defer wantError("invalid value", t)
|
||||
pool.Put(5)
|
||||
}
|
||||
|
||||
func TestIDPoolPutDuplicate(t *testing.T) {
|
||||
pool := NewIDPool()
|
||||
pool.Get()
|
||||
pool.Get()
|
||||
pool.Put(1)
|
||||
|
||||
defer wantError("already recycled", t)
|
||||
pool.Put(1)
|
||||
}
|
149
vendor/github.com/ngaut/pools/numbered.go
generated
vendored
149
vendor/github.com/ngaut/pools/numbered.go
generated
vendored
@ -1,149 +0,0 @@
|
||||
// Copyright 2012, Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package pools
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Numbered allows you to manage resources by tracking them with numbers.
|
||||
// There are no interface restrictions on what you can track.
|
||||
type Numbered struct {
|
||||
mu sync.Mutex
|
||||
empty *sync.Cond // Broadcast when pool becomes empty
|
||||
resources map[int64]*numberedWrapper
|
||||
}
|
||||
|
||||
type numberedWrapper struct {
|
||||
val interface{}
|
||||
inUse bool
|
||||
purpose string
|
||||
timeCreated time.Time
|
||||
timeUsed time.Time
|
||||
}
|
||||
|
||||
func NewNumbered() *Numbered {
|
||||
n := &Numbered{resources: make(map[int64]*numberedWrapper)}
|
||||
n.empty = sync.NewCond(&n.mu)
|
||||
return n
|
||||
}
|
||||
|
||||
// Register starts tracking a resource by the supplied id.
|
||||
// It does not lock the object.
|
||||
// It returns an error if the id already exists.
|
||||
func (nu *Numbered) Register(id int64, val interface{}) error {
|
||||
nu.mu.Lock()
|
||||
defer nu.mu.Unlock()
|
||||
if _, ok := nu.resources[id]; ok {
|
||||
return fmt.Errorf("already present")
|
||||
}
|
||||
now := time.Now()
|
||||
nu.resources[id] = &numberedWrapper{
|
||||
val: val,
|
||||
timeCreated: now,
|
||||
timeUsed: now,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unregiester forgets the specified resource.
|
||||
// If the resource is not present, it's ignored.
|
||||
func (nu *Numbered) Unregister(id int64) {
|
||||
nu.mu.Lock()
|
||||
defer nu.mu.Unlock()
|
||||
delete(nu.resources, id)
|
||||
if len(nu.resources) == 0 {
|
||||
nu.empty.Broadcast()
|
||||
}
|
||||
}
|
||||
|
||||
// Get locks the resource for use. It accepts a purpose as a string.
|
||||
// If it cannot be found, it returns a "not found" error. If in use,
|
||||
// it returns a "in use: purpose" error.
|
||||
func (nu *Numbered) Get(id int64, purpose string) (val interface{}, err error) {
|
||||
nu.mu.Lock()
|
||||
defer nu.mu.Unlock()
|
||||
nw, ok := nu.resources[id]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("not found")
|
||||
}
|
||||
if nw.inUse {
|
||||
return nil, fmt.Errorf("in use: %s", nw.purpose)
|
||||
}
|
||||
nw.inUse = true
|
||||
nw.purpose = purpose
|
||||
return nw.val, nil
|
||||
}
|
||||
|
||||
// Put unlocks a resource for someone else to use.
|
||||
func (nu *Numbered) Put(id int64) {
|
||||
nu.mu.Lock()
|
||||
defer nu.mu.Unlock()
|
||||
if nw, ok := nu.resources[id]; ok {
|
||||
nw.inUse = false
|
||||
nw.purpose = ""
|
||||
nw.timeUsed = time.Now()
|
||||
}
|
||||
}
|
||||
|
||||
// GetOutdated returns a list of resources that are older than age, and locks them.
|
||||
// It does not return any resources that are already locked.
|
||||
func (nu *Numbered) GetOutdated(age time.Duration, purpose string) (vals []interface{}) {
|
||||
nu.mu.Lock()
|
||||
defer nu.mu.Unlock()
|
||||
now := time.Now()
|
||||
for _, nw := range nu.resources {
|
||||
if nw.inUse {
|
||||
continue
|
||||
}
|
||||
if nw.timeCreated.Add(age).Sub(now) <= 0 {
|
||||
nw.inUse = true
|
||||
nw.purpose = purpose
|
||||
vals = append(vals, nw.val)
|
||||
}
|
||||
}
|
||||
return vals
|
||||
}
|
||||
|
||||
// GetIdle returns a list of resurces that have been idle for longer
|
||||
// than timeout, and locks them. It does not return any resources that
|
||||
// are already locked.
|
||||
func (nu *Numbered) GetIdle(timeout time.Duration, purpose string) (vals []interface{}) {
|
||||
nu.mu.Lock()
|
||||
defer nu.mu.Unlock()
|
||||
now := time.Now()
|
||||
for _, nw := range nu.resources {
|
||||
if nw.inUse {
|
||||
continue
|
||||
}
|
||||
if nw.timeUsed.Add(timeout).Sub(now) <= 0 {
|
||||
nw.inUse = true
|
||||
nw.purpose = purpose
|
||||
vals = append(vals, nw.val)
|
||||
}
|
||||
}
|
||||
return vals
|
||||
}
|
||||
|
||||
// WaitForEmpty returns as soon as the pool becomes empty
|
||||
func (nu *Numbered) WaitForEmpty() {
|
||||
nu.mu.Lock()
|
||||
defer nu.mu.Unlock()
|
||||
for len(nu.resources) != 0 {
|
||||
nu.empty.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
func (nu *Numbered) StatsJSON() string {
|
||||
return fmt.Sprintf("{\"Size\": %v}", nu.Size())
|
||||
}
|
||||
|
||||
func (nu *Numbered) Size() (size int64) {
|
||||
nu.mu.Lock()
|
||||
defer nu.mu.Unlock()
|
||||
return int64(len(nu.resources))
|
||||
}
|
84
vendor/github.com/ngaut/pools/numbered_test.go
generated
vendored
84
vendor/github.com/ngaut/pools/numbered_test.go
generated
vendored
@ -1,84 +0,0 @@
|
||||
// Copyright 2012, Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package pools
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestNumbered(t *testing.T) {
|
||||
id := int64(0)
|
||||
p := NewNumbered()
|
||||
|
||||
var err error
|
||||
if err = p.Register(id, id); err != nil {
|
||||
t.Errorf("Error %v", err)
|
||||
}
|
||||
if err = p.Register(id, id); err.Error() != "already present" {
|
||||
t.Errorf("want 'already present', got '%v'", err)
|
||||
}
|
||||
var v interface{}
|
||||
if v, err = p.Get(id, "test"); err != nil {
|
||||
t.Errorf("Error %v", err)
|
||||
}
|
||||
if v.(int64) != id {
|
||||
t.Errorf("want %v, got %v", id, v.(int64))
|
||||
}
|
||||
if v, err = p.Get(id, "test1"); err.Error() != "in use: test" {
|
||||
t.Errorf("want 'in use: test', got '%v'", err)
|
||||
}
|
||||
p.Put(id)
|
||||
if v, err = p.Get(1, "test2"); err.Error() != "not found" {
|
||||
t.Errorf("want 'not found', got '%v'", err)
|
||||
}
|
||||
p.Unregister(1) // Should not fail
|
||||
p.Unregister(0)
|
||||
// p is now empty
|
||||
|
||||
p.Register(id, id)
|
||||
id++
|
||||
p.Register(id, id)
|
||||
time.Sleep(300 * time.Millisecond)
|
||||
id++
|
||||
p.Register(id, id)
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// p has 0, 1, 2 (0 & 1 are aged)
|
||||
vals := p.GetOutdated(200*time.Millisecond, "by outdated")
|
||||
if len(vals) != 2 {
|
||||
t.Errorf("want 2, got %v", len(vals))
|
||||
}
|
||||
if v, err = p.Get(vals[0].(int64), "test1"); err.Error() != "in use: by outdated" {
|
||||
t.Errorf("want 'in use: by outdated', got '%v'", err)
|
||||
}
|
||||
for _, v := range vals {
|
||||
p.Put(v.(int64))
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// p has 0, 1, 2 (2 is idle)
|
||||
vals = p.GetIdle(200*time.Millisecond, "by idle")
|
||||
if len(vals) != 1 {
|
||||
t.Errorf("want 1, got %v", len(vals))
|
||||
}
|
||||
if v, err = p.Get(vals[0].(int64), "test1"); err.Error() != "in use: by idle" {
|
||||
t.Errorf("want 'in use: by idle', got '%v'", err)
|
||||
}
|
||||
if vals[0].(int64) != 2 {
|
||||
t.Errorf("want 2, got %v", vals[0])
|
||||
}
|
||||
p.Unregister(vals[0].(int64))
|
||||
|
||||
// p has 0 & 1
|
||||
if p.Size() != 2 {
|
||||
t.Errorf("want 2, got %v", p.Size())
|
||||
}
|
||||
go func() {
|
||||
p.Unregister(0)
|
||||
p.Unregister(1)
|
||||
}()
|
||||
p.WaitForEmpty()
|
||||
}
|
228
vendor/github.com/ngaut/pools/resource_pool.go
generated
vendored
228
vendor/github.com/ngaut/pools/resource_pool.go
generated
vendored
@ -1,228 +0,0 @@
|
||||
// Copyright 2012, Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package pools provides functionality to manage and reuse resources
|
||||
// like connections.
|
||||
package pools
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ngaut/sync2"
|
||||
)
|
||||
|
||||
var (
|
||||
CLOSED_ERR = fmt.Errorf("ResourcePool is closed")
|
||||
)
|
||||
|
||||
// Factory is a function that can be used to create a resource.
|
||||
type Factory func() (Resource, error)
|
||||
|
||||
// Every resource needs to suport the Resource interface.
|
||||
// Thread synchronization between Close() and IsClosed()
|
||||
// is the responsibility the caller.
|
||||
type Resource interface {
|
||||
Close()
|
||||
}
|
||||
|
||||
// ResourcePool allows you to use a pool of resources.
|
||||
type ResourcePool struct {
|
||||
resources chan resourceWrapper
|
||||
factory Factory
|
||||
capacity sync2.AtomicInt64
|
||||
idleTimeout sync2.AtomicDuration
|
||||
|
||||
// stats
|
||||
waitCount sync2.AtomicInt64
|
||||
waitTime sync2.AtomicDuration
|
||||
}
|
||||
|
||||
type resourceWrapper struct {
|
||||
resource Resource
|
||||
timeUsed time.Time
|
||||
}
|
||||
|
||||
// NewResourcePool creates a new ResourcePool pool.
|
||||
// capacity is the initial capacity of the pool.
|
||||
// maxCap is the maximum capacity.
|
||||
// If a resource is unused beyond idleTimeout, it's discarded.
|
||||
// An idleTimeout of 0 means that there is no timeout.
|
||||
func NewResourcePool(factory Factory, capacity, maxCap int, idleTimeout time.Duration) *ResourcePool {
|
||||
if capacity <= 0 || maxCap <= 0 || capacity > maxCap {
|
||||
panic(fmt.Errorf("Invalid/out of range capacity"))
|
||||
}
|
||||
rp := &ResourcePool{
|
||||
resources: make(chan resourceWrapper, maxCap),
|
||||
factory: factory,
|
||||
capacity: sync2.AtomicInt64(capacity),
|
||||
idleTimeout: sync2.AtomicDuration(idleTimeout),
|
||||
}
|
||||
for i := 0; i < capacity; i++ {
|
||||
rp.resources <- resourceWrapper{}
|
||||
}
|
||||
return rp
|
||||
}
|
||||
|
||||
// Close empties the pool calling Close on all its resources.
|
||||
// You can call Close while there are outstanding resources.
|
||||
// It waits for all resources to be returned (Put).
|
||||
// After a Close, Get and TryGet are not allowed.
|
||||
func (rp *ResourcePool) Close() {
|
||||
rp.SetCapacity(0)
|
||||
}
|
||||
|
||||
func (rp *ResourcePool) IsClosed() (closed bool) {
|
||||
return rp.capacity.Get() == 0
|
||||
}
|
||||
|
||||
// Get will return the next available resource. If capacity
|
||||
// has not been reached, it will create a new one using the factory. Otherwise,
|
||||
// it will indefinitely wait till the next resource becomes available.
|
||||
func (rp *ResourcePool) Get() (resource Resource, err error) {
|
||||
return rp.get(true)
|
||||
}
|
||||
|
||||
// TryGet will return the next available resource. If none is available, and capacity
|
||||
// has not been reached, it will create a new one using the factory. Otherwise,
|
||||
// it will return nil with no error.
|
||||
func (rp *ResourcePool) TryGet() (resource Resource, err error) {
|
||||
return rp.get(false)
|
||||
}
|
||||
|
||||
func (rp *ResourcePool) get(wait bool) (resource Resource, err error) {
|
||||
// Fetch
|
||||
var wrapper resourceWrapper
|
||||
var ok bool
|
||||
select {
|
||||
case wrapper, ok = <-rp.resources:
|
||||
default:
|
||||
if !wait {
|
||||
return nil, nil
|
||||
}
|
||||
startTime := time.Now()
|
||||
wrapper, ok = <-rp.resources
|
||||
rp.recordWait(startTime)
|
||||
}
|
||||
if !ok {
|
||||
return nil, CLOSED_ERR
|
||||
}
|
||||
|
||||
// Unwrap
|
||||
timeout := rp.idleTimeout.Get()
|
||||
if wrapper.resource != nil && timeout > 0 && wrapper.timeUsed.Add(timeout).Sub(time.Now()) < 0 {
|
||||
wrapper.resource.Close()
|
||||
wrapper.resource = nil
|
||||
}
|
||||
if wrapper.resource == nil {
|
||||
wrapper.resource, err = rp.factory()
|
||||
if err != nil {
|
||||
rp.resources <- resourceWrapper{}
|
||||
}
|
||||
}
|
||||
return wrapper.resource, err
|
||||
}
|
||||
|
||||
// Put will return a resource to the pool. For every successful Get,
|
||||
// a corresponding Put is required. If you no longer need a resource,
|
||||
// you will need to call Put(nil) instead of returning the closed resource.
|
||||
// The will eventually cause a new resource to be created in its place.
|
||||
func (rp *ResourcePool) Put(resource Resource) {
|
||||
var wrapper resourceWrapper
|
||||
if resource != nil {
|
||||
wrapper = resourceWrapper{resource, time.Now()}
|
||||
}
|
||||
select {
|
||||
case rp.resources <- wrapper:
|
||||
default:
|
||||
panic(fmt.Errorf("Attempt to Put into a full ResourcePool"))
|
||||
}
|
||||
}
|
||||
|
||||
// SetCapacity changes the capacity of the pool.
|
||||
// You can use it to shrink or expand, but not beyond
|
||||
// the max capacity. If the change requires the pool
|
||||
// to be shrunk, SetCapacity waits till the necessary
|
||||
// number of resources are returned to the pool.
|
||||
// A SetCapacity of 0 is equivalent to closing the ResourcePool.
|
||||
func (rp *ResourcePool) SetCapacity(capacity int) error {
|
||||
if capacity < 0 || capacity > cap(rp.resources) {
|
||||
return fmt.Errorf("capacity %d is out of range", capacity)
|
||||
}
|
||||
|
||||
// Atomically swap new capacity with old, but only
|
||||
// if old capacity is non-zero.
|
||||
var oldcap int
|
||||
for {
|
||||
oldcap = int(rp.capacity.Get())
|
||||
if oldcap == 0 {
|
||||
return CLOSED_ERR
|
||||
}
|
||||
if oldcap == capacity {
|
||||
return nil
|
||||
}
|
||||
if rp.capacity.CompareAndSwap(int64(oldcap), int64(capacity)) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if capacity < oldcap {
|
||||
for i := 0; i < oldcap-capacity; i++ {
|
||||
wrapper := <-rp.resources
|
||||
if wrapper.resource != nil {
|
||||
wrapper.resource.Close()
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for i := 0; i < capacity-oldcap; i++ {
|
||||
rp.resources <- resourceWrapper{}
|
||||
}
|
||||
}
|
||||
if capacity == 0 {
|
||||
close(rp.resources)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rp *ResourcePool) recordWait(start time.Time) {
|
||||
rp.waitCount.Add(1)
|
||||
rp.waitTime.Add(time.Now().Sub(start))
|
||||
}
|
||||
|
||||
func (rp *ResourcePool) SetIdleTimeout(idleTimeout time.Duration) {
|
||||
rp.idleTimeout.Set(idleTimeout)
|
||||
}
|
||||
|
||||
func (rp *ResourcePool) StatsJSON() string {
|
||||
c, a, mx, wc, wt, it := rp.Stats()
|
||||
return fmt.Sprintf(`{"Capacity": %v, "Available": %v, "MaxCapacity": %v, "WaitCount": %v, "WaitTime": %v, "IdleTimeout": %v}`, c, a, mx, wc, int64(wt), int64(it))
|
||||
}
|
||||
|
||||
func (rp *ResourcePool) Stats() (capacity, available, maxCap, waitCount int64, waitTime, idleTimeout time.Duration) {
|
||||
return rp.Capacity(), rp.Available(), rp.MaxCap(), rp.WaitCount(), rp.WaitTime(), rp.IdleTimeout()
|
||||
}
|
||||
|
||||
func (rp *ResourcePool) Capacity() int64 {
|
||||
return rp.capacity.Get()
|
||||
}
|
||||
|
||||
func (rp *ResourcePool) Available() int64 {
|
||||
return int64(len(rp.resources))
|
||||
}
|
||||
|
||||
func (rp *ResourcePool) MaxCap() int64 {
|
||||
return int64(cap(rp.resources))
|
||||
}
|
||||
|
||||
func (rp *ResourcePool) WaitCount() int64 {
|
||||
return rp.waitCount.Get()
|
||||
}
|
||||
|
||||
func (rp *ResourcePool) WaitTime() time.Duration {
|
||||
return rp.waitTime.Get()
|
||||
}
|
||||
|
||||
func (rp *ResourcePool) IdleTimeout() time.Duration {
|
||||
return rp.idleTimeout.Get()
|
||||
}
|
487
vendor/github.com/ngaut/pools/resource_pool_test.go
generated
vendored
487
vendor/github.com/ngaut/pools/resource_pool_test.go
generated
vendored
@ -1,487 +0,0 @@
|
||||
// Copyright 2012, Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package pools
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ngaut/sync2"
|
||||
)
|
||||
|
||||
var lastId, count sync2.AtomicInt64
|
||||
|
||||
type TestResource struct {
|
||||
num int64
|
||||
closed bool
|
||||
}
|
||||
|
||||
func (tr *TestResource) Close() {
|
||||
if !tr.closed {
|
||||
count.Add(-1)
|
||||
tr.closed = true
|
||||
}
|
||||
}
|
||||
|
||||
func (tr *TestResource) IsClosed() bool {
|
||||
return tr.closed
|
||||
}
|
||||
|
||||
func PoolFactory() (Resource, error) {
|
||||
count.Add(1)
|
||||
return &TestResource{lastId.Add(1), false}, nil
|
||||
}
|
||||
|
||||
func FailFactory() (Resource, error) {
|
||||
return nil, errors.New("Failed")
|
||||
}
|
||||
|
||||
func SlowFailFactory() (Resource, error) {
|
||||
time.Sleep(10 * time.Nanosecond)
|
||||
return nil, errors.New("Failed")
|
||||
}
|
||||
|
||||
func TestOpen(t *testing.T) {
|
||||
lastId.Set(0)
|
||||
count.Set(0)
|
||||
p := NewResourcePool(PoolFactory, 6, 6, time.Second)
|
||||
p.SetCapacity(5)
|
||||
var resources [10]Resource
|
||||
|
||||
// Test Get
|
||||
for i := 0; i < 5; i++ {
|
||||
r, err := p.Get()
|
||||
resources[i] = r
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error %v", err)
|
||||
}
|
||||
_, available, _, waitCount, waitTime, _ := p.Stats()
|
||||
if available != int64(5-i-1) {
|
||||
t.Errorf("expecting %d, received %d", 5-i-1, available)
|
||||
}
|
||||
if waitCount != 0 {
|
||||
t.Errorf("expecting 0, received %d", waitCount)
|
||||
}
|
||||
if waitTime != 0 {
|
||||
t.Errorf("expecting 0, received %d", waitTime)
|
||||
}
|
||||
if lastId.Get() != int64(i+1) {
|
||||
t.Errorf("Expecting %d, received %d", i+1, lastId.Get())
|
||||
}
|
||||
if count.Get() != int64(i+1) {
|
||||
t.Errorf("Expecting %d, received %d", i+1, count.Get())
|
||||
}
|
||||
}
|
||||
|
||||
// Test TryGet
|
||||
r, err := p.TryGet()
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error %v", err)
|
||||
}
|
||||
if r != nil {
|
||||
t.Errorf("Expecting nil")
|
||||
}
|
||||
for i := 0; i < 5; i++ {
|
||||
p.Put(resources[i])
|
||||
_, available, _, _, _, _ := p.Stats()
|
||||
if available != int64(i+1) {
|
||||
t.Errorf("expecting %d, received %d", 5-i-1, available)
|
||||
}
|
||||
}
|
||||
for i := 0; i < 5; i++ {
|
||||
r, err := p.TryGet()
|
||||
resources[i] = r
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error %v", err)
|
||||
}
|
||||
if r == nil {
|
||||
t.Errorf("Expecting non-nil")
|
||||
}
|
||||
if lastId.Get() != 5 {
|
||||
t.Errorf("Expecting 5, received %d", lastId.Get())
|
||||
}
|
||||
if count.Get() != 5 {
|
||||
t.Errorf("Expecting 5, received %d", count.Get())
|
||||
}
|
||||
}
|
||||
|
||||
// Test that Get waits
|
||||
ch := make(chan bool)
|
||||
go func() {
|
||||
for i := 0; i < 5; i++ {
|
||||
r, err := p.Get()
|
||||
if err != nil {
|
||||
t.Errorf("Get failed: %v", err)
|
||||
}
|
||||
resources[i] = r
|
||||
}
|
||||
for i := 0; i < 5; i++ {
|
||||
p.Put(resources[i])
|
||||
}
|
||||
ch <- true
|
||||
}()
|
||||
for i := 0; i < 5; i++ {
|
||||
// Sleep to ensure the goroutine waits
|
||||
time.Sleep(10 * time.Nanosecond)
|
||||
p.Put(resources[i])
|
||||
}
|
||||
<-ch
|
||||
_, _, _, waitCount, waitTime, _ := p.Stats()
|
||||
if waitCount != 5 {
|
||||
t.Errorf("Expecting 5, received %d", waitCount)
|
||||
}
|
||||
if waitTime == 0 {
|
||||
t.Errorf("Expecting non-zero")
|
||||
}
|
||||
if lastId.Get() != 5 {
|
||||
t.Errorf("Expecting 5, received %d", lastId.Get())
|
||||
}
|
||||
|
||||
// Test Close resource
|
||||
r, err = p.Get()
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error %v", err)
|
||||
}
|
||||
r.Close()
|
||||
p.Put(nil)
|
||||
if count.Get() != 4 {
|
||||
t.Errorf("Expecting 4, received %d", count.Get())
|
||||
}
|
||||
for i := 0; i < 5; i++ {
|
||||
r, err := p.Get()
|
||||
if err != nil {
|
||||
t.Errorf("Get failed: %v", err)
|
||||
}
|
||||
resources[i] = r
|
||||
}
|
||||
for i := 0; i < 5; i++ {
|
||||
p.Put(resources[i])
|
||||
}
|
||||
if count.Get() != 5 {
|
||||
t.Errorf("Expecting 5, received %d", count.Get())
|
||||
}
|
||||
if lastId.Get() != 6 {
|
||||
t.Errorf("Expecting 6, received %d", lastId.Get())
|
||||
}
|
||||
|
||||
// SetCapacity
|
||||
p.SetCapacity(3)
|
||||
if count.Get() != 3 {
|
||||
t.Errorf("Expecting 3, received %d", count.Get())
|
||||
}
|
||||
if lastId.Get() != 6 {
|
||||
t.Errorf("Expecting 6, received %d", lastId.Get())
|
||||
}
|
||||
capacity, available, _, _, _, _ := p.Stats()
|
||||
if capacity != 3 {
|
||||
t.Errorf("Expecting 3, received %d", capacity)
|
||||
}
|
||||
if available != 3 {
|
||||
t.Errorf("Expecting 3, received %d", available)
|
||||
}
|
||||
p.SetCapacity(6)
|
||||
capacity, available, _, _, _, _ = p.Stats()
|
||||
if capacity != 6 {
|
||||
t.Errorf("Expecting 6, received %d", capacity)
|
||||
}
|
||||
if available != 6 {
|
||||
t.Errorf("Expecting 6, received %d", available)
|
||||
}
|
||||
for i := 0; i < 6; i++ {
|
||||
r, err := p.Get()
|
||||
if err != nil {
|
||||
t.Errorf("Get failed: %v", err)
|
||||
}
|
||||
resources[i] = r
|
||||
}
|
||||
for i := 0; i < 6; i++ {
|
||||
p.Put(resources[i])
|
||||
}
|
||||
if count.Get() != 6 {
|
||||
t.Errorf("Expecting 5, received %d", count.Get())
|
||||
}
|
||||
if lastId.Get() != 9 {
|
||||
t.Errorf("Expecting 9, received %d", lastId.Get())
|
||||
}
|
||||
|
||||
// Close
|
||||
p.Close()
|
||||
capacity, available, _, _, _, _ = p.Stats()
|
||||
if capacity != 0 {
|
||||
t.Errorf("Expecting 0, received %d", capacity)
|
||||
}
|
||||
if available != 0 {
|
||||
t.Errorf("Expecting 0, received %d", available)
|
||||
}
|
||||
if count.Get() != 0 {
|
||||
t.Errorf("Expecting 0, received %d", count.Get())
|
||||
}
|
||||
}
|
||||
|
||||
func TestShrinking(t *testing.T) {
|
||||
lastId.Set(0)
|
||||
count.Set(0)
|
||||
p := NewResourcePool(PoolFactory, 5, 5, time.Second)
|
||||
var resources [10]Resource
|
||||
// Leave one empty slot in the pool
|
||||
for i := 0; i < 4; i++ {
|
||||
r, err := p.Get()
|
||||
if err != nil {
|
||||
t.Errorf("Get failed: %v", err)
|
||||
}
|
||||
resources[i] = r
|
||||
}
|
||||
go p.SetCapacity(3)
|
||||
time.Sleep(10 * time.Nanosecond)
|
||||
stats := p.StatsJSON()
|
||||
expected := `{"Capacity": 3, "Available": 0, "MaxCapacity": 5, "WaitCount": 0, "WaitTime": 0, "IdleTimeout": 1000000000}`
|
||||
if stats != expected {
|
||||
t.Errorf(`expecting '%s', received '%s'`, expected, stats)
|
||||
}
|
||||
|
||||
// TryGet is allowed when shrinking
|
||||
r, err := p.TryGet()
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error %v", err)
|
||||
}
|
||||
if r != nil {
|
||||
t.Errorf("Expecting nil")
|
||||
}
|
||||
|
||||
// Get is allowed when shrinking, but it will wait
|
||||
getdone := make(chan bool)
|
||||
go func() {
|
||||
r, err := p.Get()
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error %v", err)
|
||||
}
|
||||
p.Put(r)
|
||||
getdone <- true
|
||||
}()
|
||||
|
||||
// Put is allowed when shrinking. It's necessary.
|
||||
for i := 0; i < 4; i++ {
|
||||
p.Put(resources[i])
|
||||
}
|
||||
// Wait for Get test to complete
|
||||
<-getdone
|
||||
stats = p.StatsJSON()
|
||||
expected = `{"Capacity": 3, "Available": 3, "MaxCapacity": 5, "WaitCount": 0, "WaitTime": 0, "IdleTimeout": 1000000000}`
|
||||
if stats != expected {
|
||||
t.Errorf(`expecting '%s', received '%s'`, expected, stats)
|
||||
}
|
||||
if count.Get() != 3 {
|
||||
t.Errorf("Expecting 3, received %d", count.Get())
|
||||
}
|
||||
|
||||
// Ensure no deadlock if SetCapacity is called after we start
|
||||
// waiting for a resource
|
||||
for i := 0; i < 3; i++ {
|
||||
resources[i], err = p.Get()
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error %v", err)
|
||||
}
|
||||
}
|
||||
// This will wait because pool is empty
|
||||
go func() {
|
||||
r, err := p.Get()
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error %v", err)
|
||||
}
|
||||
p.Put(r)
|
||||
getdone <- true
|
||||
}()
|
||||
time.Sleep(10 * time.Nanosecond)
|
||||
|
||||
// This will wait till we Put
|
||||
go p.SetCapacity(2)
|
||||
time.Sleep(10 * time.Nanosecond)
|
||||
|
||||
// This should not hang
|
||||
for i := 0; i < 3; i++ {
|
||||
p.Put(resources[i])
|
||||
}
|
||||
<-getdone
|
||||
capacity, available, _, _, _, _ := p.Stats()
|
||||
if capacity != 2 {
|
||||
t.Errorf("Expecting 2, received %d", capacity)
|
||||
}
|
||||
if available != 2 {
|
||||
t.Errorf("Expecting 2, received %d", available)
|
||||
}
|
||||
if count.Get() != 2 {
|
||||
t.Errorf("Expecting 2, received %d", count.Get())
|
||||
}
|
||||
|
||||
// Test race condition of SetCapacity with itself
|
||||
p.SetCapacity(3)
|
||||
for i := 0; i < 3; i++ {
|
||||
resources[i], err = p.Get()
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error %v", err)
|
||||
}
|
||||
}
|
||||
// This will wait because pool is empty
|
||||
go func() {
|
||||
r, err := p.Get()
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error %v", err)
|
||||
}
|
||||
p.Put(r)
|
||||
getdone <- true
|
||||
}()
|
||||
time.Sleep(10 * time.Nanosecond)
|
||||
|
||||
// This will wait till we Put
|
||||
go p.SetCapacity(2)
|
||||
time.Sleep(10 * time.Nanosecond)
|
||||
go p.SetCapacity(4)
|
||||
time.Sleep(10 * time.Nanosecond)
|
||||
|
||||
// This should not hang
|
||||
for i := 0; i < 3; i++ {
|
||||
p.Put(resources[i])
|
||||
}
|
||||
<-getdone
|
||||
|
||||
err = p.SetCapacity(-1)
|
||||
if err == nil {
|
||||
t.Errorf("Expecting error")
|
||||
}
|
||||
err = p.SetCapacity(255555)
|
||||
if err == nil {
|
||||
t.Errorf("Expecting error")
|
||||
}
|
||||
|
||||
capacity, available, _, _, _, _ = p.Stats()
|
||||
if capacity != 4 {
|
||||
t.Errorf("Expecting 4, received %d", capacity)
|
||||
}
|
||||
if available != 4 {
|
||||
t.Errorf("Expecting 4, received %d", available)
|
||||
}
|
||||
}
|
||||
|
||||
func TestClosing(t *testing.T) {
|
||||
lastId.Set(0)
|
||||
count.Set(0)
|
||||
p := NewResourcePool(PoolFactory, 5, 5, time.Second)
|
||||
var resources [10]Resource
|
||||
for i := 0; i < 5; i++ {
|
||||
r, err := p.Get()
|
||||
if err != nil {
|
||||
t.Errorf("Get failed: %v", err)
|
||||
}
|
||||
resources[i] = r
|
||||
}
|
||||
ch := make(chan bool)
|
||||
go func() {
|
||||
p.Close()
|
||||
ch <- true
|
||||
}()
|
||||
|
||||
// Wait for goroutine to call Close
|
||||
time.Sleep(10 * time.Nanosecond)
|
||||
stats := p.StatsJSON()
|
||||
expected := `{"Capacity": 0, "Available": 0, "MaxCapacity": 5, "WaitCount": 0, "WaitTime": 0, "IdleTimeout": 1000000000}`
|
||||
if stats != expected {
|
||||
t.Errorf(`expecting '%s', received '%s'`, expected, stats)
|
||||
}
|
||||
|
||||
// Put is allowed when closing
|
||||
for i := 0; i < 5; i++ {
|
||||
p.Put(resources[i])
|
||||
}
|
||||
|
||||
// Wait for Close to return
|
||||
<-ch
|
||||
|
||||
// SetCapacity must be ignored after Close
|
||||
err := p.SetCapacity(1)
|
||||
if err == nil {
|
||||
t.Errorf("expecting error")
|
||||
}
|
||||
|
||||
stats = p.StatsJSON()
|
||||
expected = `{"Capacity": 0, "Available": 0, "MaxCapacity": 5, "WaitCount": 0, "WaitTime": 0, "IdleTimeout": 1000000000}`
|
||||
if stats != expected {
|
||||
t.Errorf(`expecting '%s', received '%s'`, expected, stats)
|
||||
}
|
||||
if lastId.Get() != 5 {
|
||||
t.Errorf("Expecting 5, received %d", count.Get())
|
||||
}
|
||||
if count.Get() != 0 {
|
||||
t.Errorf("Expecting 0, received %d", count.Get())
|
||||
}
|
||||
}
|
||||
|
||||
func TestIdleTimeout(t *testing.T) {
|
||||
lastId.Set(0)
|
||||
count.Set(0)
|
||||
p := NewResourcePool(PoolFactory, 1, 1, 10*time.Nanosecond)
|
||||
defer p.Close()
|
||||
|
||||
r, err := p.Get()
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error %v", err)
|
||||
}
|
||||
p.Put(r)
|
||||
if lastId.Get() != 1 {
|
||||
t.Errorf("Expecting 1, received %d", count.Get())
|
||||
}
|
||||
if count.Get() != 1 {
|
||||
t.Errorf("Expecting 1, received %d", count.Get())
|
||||
}
|
||||
time.Sleep(20 * time.Nanosecond)
|
||||
r, err = p.Get()
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error %v", err)
|
||||
}
|
||||
if lastId.Get() != 2 {
|
||||
t.Errorf("Expecting 2, received %d", count.Get())
|
||||
}
|
||||
if count.Get() != 1 {
|
||||
t.Errorf("Expecting 1, received %d", count.Get())
|
||||
}
|
||||
p.Put(r)
|
||||
}
|
||||
|
||||
func TestCreateFail(t *testing.T) {
|
||||
lastId.Set(0)
|
||||
count.Set(0)
|
||||
p := NewResourcePool(FailFactory, 5, 5, time.Second)
|
||||
defer p.Close()
|
||||
if _, err := p.Get(); err.Error() != "Failed" {
|
||||
t.Errorf("Expecting Failed, received %v", err)
|
||||
}
|
||||
stats := p.StatsJSON()
|
||||
expected := `{"Capacity": 5, "Available": 5, "MaxCapacity": 5, "WaitCount": 0, "WaitTime": 0, "IdleTimeout": 1000000000}`
|
||||
if stats != expected {
|
||||
t.Errorf(`expecting '%s', received '%s'`, expected, stats)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSlowCreateFail(t *testing.T) {
|
||||
lastId.Set(0)
|
||||
count.Set(0)
|
||||
p := NewResourcePool(SlowFailFactory, 2, 2, time.Second)
|
||||
defer p.Close()
|
||||
ch := make(chan bool)
|
||||
// The third Get should not wait indefinitely
|
||||
for i := 0; i < 3; i++ {
|
||||
go func() {
|
||||
p.Get()
|
||||
ch <- true
|
||||
}()
|
||||
}
|
||||
for i := 0; i < 3; i++ {
|
||||
<-ch
|
||||
}
|
||||
_, available, _, _, _, _ := p.Stats()
|
||||
if available != 2 {
|
||||
t.Errorf("Expecting 2, received %d", available)
|
||||
}
|
||||
}
|
214
vendor/github.com/ngaut/pools/roundrobin.go
generated
vendored
214
vendor/github.com/ngaut/pools/roundrobin.go
generated
vendored
@ -1,214 +0,0 @@
|
||||
// Copyright 2012, Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package pools
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// RoundRobin is deprecated. Use ResourcePool instead.
|
||||
// RoundRobin allows you to use a pool of resources in a round robin fashion.
|
||||
type RoundRobin struct {
|
||||
mu sync.Mutex
|
||||
available *sync.Cond
|
||||
resources chan fifoWrapper
|
||||
size int64
|
||||
factory Factory
|
||||
idleTimeout time.Duration
|
||||
|
||||
// stats
|
||||
waitCount int64
|
||||
waitTime time.Duration
|
||||
}
|
||||
|
||||
type fifoWrapper struct {
|
||||
resource Resource
|
||||
timeUsed time.Time
|
||||
}
|
||||
|
||||
// NewRoundRobin creates a new RoundRobin pool.
|
||||
// capacity is the maximum number of resources RoundRobin will create.
|
||||
// factory will be the function used to create resources.
|
||||
// If a resource is unused beyond idleTimeout, it's discarded.
|
||||
func NewRoundRobin(capacity int, idleTimeout time.Duration) *RoundRobin {
|
||||
r := &RoundRobin{
|
||||
resources: make(chan fifoWrapper, capacity),
|
||||
size: 0,
|
||||
idleTimeout: idleTimeout,
|
||||
}
|
||||
r.available = sync.NewCond(&r.mu)
|
||||
return r
|
||||
}
|
||||
|
||||
// Open starts allowing the creation of resources
|
||||
func (rr *RoundRobin) Open(factory Factory) {
|
||||
rr.mu.Lock()
|
||||
defer rr.mu.Unlock()
|
||||
rr.factory = factory
|
||||
}
|
||||
|
||||
// Close empties the pool calling Close on all its resources.
|
||||
// It waits for all resources to be returned (Put).
|
||||
func (rr *RoundRobin) Close() {
|
||||
rr.mu.Lock()
|
||||
defer rr.mu.Unlock()
|
||||
for rr.size > 0 {
|
||||
select {
|
||||
case fw := <-rr.resources:
|
||||
go fw.resource.Close()
|
||||
rr.size--
|
||||
default:
|
||||
rr.available.Wait()
|
||||
}
|
||||
}
|
||||
rr.factory = nil
|
||||
}
|
||||
|
||||
func (rr *RoundRobin) IsClosed() bool {
|
||||
return rr.factory == nil
|
||||
}
|
||||
|
||||
// Get will return the next available resource. If none is available, and capacity
|
||||
// has not been reached, it will create a new one using the factory. Otherwise,
|
||||
// it will indefinitely wait till the next resource becomes available.
|
||||
func (rr *RoundRobin) Get() (resource Resource, err error) {
|
||||
return rr.get(true)
|
||||
}
|
||||
|
||||
// TryGet will return the next available resource. If none is available, and capacity
|
||||
// has not been reached, it will create a new one using the factory. Otherwise,
|
||||
// it will return nil with no error.
|
||||
func (rr *RoundRobin) TryGet() (resource Resource, err error) {
|
||||
return rr.get(false)
|
||||
}
|
||||
|
||||
func (rr *RoundRobin) get(wait bool) (resource Resource, err error) {
|
||||
rr.mu.Lock()
|
||||
defer rr.mu.Unlock()
|
||||
// Any waits in this loop will release the lock, and it will be
|
||||
// reacquired before the waits return.
|
||||
for {
|
||||
select {
|
||||
case fw := <-rr.resources:
|
||||
// Found a free resource in the channel
|
||||
if rr.idleTimeout > 0 && fw.timeUsed.Add(rr.idleTimeout).Sub(time.Now()) < 0 {
|
||||
// resource has been idle for too long. Discard & go for next.
|
||||
go fw.resource.Close()
|
||||
rr.size--
|
||||
// Nobody else should be waiting, but signal anyway.
|
||||
rr.available.Signal()
|
||||
continue
|
||||
}
|
||||
return fw.resource, nil
|
||||
default:
|
||||
// resource channel is empty
|
||||
if rr.size >= int64(cap(rr.resources)) {
|
||||
// The pool is full
|
||||
if wait {
|
||||
start := time.Now()
|
||||
rr.available.Wait()
|
||||
rr.recordWait(start)
|
||||
continue
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
// Pool is not full. Create a resource.
|
||||
if resource, err = rr.waitForCreate(); err != nil {
|
||||
// size was decremented, and somebody could be waiting.
|
||||
rr.available.Signal()
|
||||
return nil, err
|
||||
}
|
||||
// Creation successful. Account for this by incrementing size.
|
||||
rr.size++
|
||||
return resource, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (rr *RoundRobin) recordWait(start time.Time) {
|
||||
rr.waitCount++
|
||||
rr.waitTime += time.Now().Sub(start)
|
||||
}
|
||||
|
||||
func (rr *RoundRobin) waitForCreate() (resource Resource, err error) {
|
||||
// Prevent thundering herd: increment size before creating resource, and decrement after.
|
||||
rr.size++
|
||||
rr.mu.Unlock()
|
||||
defer func() {
|
||||
rr.mu.Lock()
|
||||
rr.size--
|
||||
}()
|
||||
return rr.factory()
|
||||
}
|
||||
|
||||
// Put will return a resource to the pool. You MUST return every resource to the pool,
|
||||
// even if it's closed. If a resource is closed, you should call Put(nil).
|
||||
func (rr *RoundRobin) Put(resource Resource) {
|
||||
rr.mu.Lock()
|
||||
defer rr.available.Signal()
|
||||
defer rr.mu.Unlock()
|
||||
|
||||
if rr.size > int64(cap(rr.resources)) {
|
||||
if resource != nil {
|
||||
go resource.Close()
|
||||
}
|
||||
rr.size--
|
||||
} else if resource == nil {
|
||||
rr.size--
|
||||
} else {
|
||||
if len(rr.resources) == cap(rr.resources) {
|
||||
panic("unexpected")
|
||||
}
|
||||
rr.resources <- fifoWrapper{resource, time.Now()}
|
||||
}
|
||||
}
|
||||
|
||||
// Set capacity changes the capacity of the pool.
|
||||
// You can use it to expand or shrink.
|
||||
func (rr *RoundRobin) SetCapacity(capacity int) error {
|
||||
rr.mu.Lock()
|
||||
defer rr.available.Broadcast()
|
||||
defer rr.mu.Unlock()
|
||||
|
||||
nr := make(chan fifoWrapper, capacity)
|
||||
// This loop transfers resources from the old channel
|
||||
// to the new one, until it fills up or runs out.
|
||||
// It discards extras, if any.
|
||||
for {
|
||||
select {
|
||||
case fw := <-rr.resources:
|
||||
if len(nr) < cap(nr) {
|
||||
nr <- fw
|
||||
} else {
|
||||
go fw.resource.Close()
|
||||
rr.size--
|
||||
}
|
||||
continue
|
||||
default:
|
||||
}
|
||||
break
|
||||
}
|
||||
rr.resources = nr
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rr *RoundRobin) SetIdleTimeout(idleTimeout time.Duration) {
|
||||
rr.mu.Lock()
|
||||
defer rr.mu.Unlock()
|
||||
rr.idleTimeout = idleTimeout
|
||||
}
|
||||
|
||||
func (rr *RoundRobin) StatsJSON() string {
|
||||
s, c, a, wc, wt, it := rr.Stats()
|
||||
return fmt.Sprintf("{\"Size\": %v, \"Capacity\": %v, \"Available\": %v, \"WaitCount\": %v, \"WaitTime\": %v, \"IdleTimeout\": %v}", s, c, a, wc, int64(wt), int64(it))
|
||||
}
|
||||
|
||||
func (rr *RoundRobin) Stats() (size, capacity, available, waitCount int64, waitTime, idleTimeout time.Duration) {
|
||||
rr.mu.Lock()
|
||||
defer rr.mu.Unlock()
|
||||
return rr.size, int64(cap(rr.resources)), int64(len(rr.resources)), rr.waitCount, rr.waitTime, rr.idleTimeout
|
||||
}
|
126
vendor/github.com/ngaut/pools/roundrobin_test.go
generated
vendored
126
vendor/github.com/ngaut/pools/roundrobin_test.go
generated
vendored
@ -1,126 +0,0 @@
|
||||
// Copyright 2012, Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package pools
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestPool(t *testing.T) {
|
||||
lastId.Set(0)
|
||||
p := NewRoundRobin(5, time.Duration(10e9))
|
||||
p.Open(PoolFactory)
|
||||
defer p.Close()
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
r, err := p.TryGet()
|
||||
if err != nil {
|
||||
t.Errorf("TryGet failed: %v", err)
|
||||
}
|
||||
if r.(*TestResource).num != 1 {
|
||||
t.Errorf("Expecting 1, received %d", r.(*TestResource).num)
|
||||
}
|
||||
p.Put(r)
|
||||
}
|
||||
// p = [1]
|
||||
|
||||
all := make([]Resource, 5)
|
||||
for i := 0; i < 5; i++ {
|
||||
if all[i], _ = p.TryGet(); all[i] == nil {
|
||||
t.Errorf("TryGet failed with nil")
|
||||
}
|
||||
}
|
||||
// all = [1-5], p is empty
|
||||
if none, _ := p.TryGet(); none != nil {
|
||||
t.Errorf("TryGet failed with non-nil")
|
||||
}
|
||||
|
||||
ch := make(chan bool)
|
||||
go ResourceWait(p, t, ch)
|
||||
time.Sleep(1e8)
|
||||
for i := 0; i < 5; i++ {
|
||||
p.Put(all[i])
|
||||
}
|
||||
// p = [1-5]
|
||||
<-ch
|
||||
// p = [1-5]
|
||||
if p.waitCount != 1 {
|
||||
t.Errorf("Expecting 1, received %d", p.waitCount)
|
||||
}
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
all[i], _ = p.Get()
|
||||
}
|
||||
// all = [1-5], p is empty
|
||||
all[0].(*TestResource).Close()
|
||||
p.Put(nil)
|
||||
for i := 1; i < 5; i++ {
|
||||
p.Put(all[i])
|
||||
}
|
||||
// p = [2-5]
|
||||
|
||||
for i := 0; i < 4; i++ {
|
||||
r, _ := p.Get()
|
||||
if r.(*TestResource).num != int64(i+2) {
|
||||
t.Errorf("Expecting %d, received %d", i+2, r.(*TestResource).num)
|
||||
}
|
||||
p.Put(r)
|
||||
}
|
||||
|
||||
p.SetCapacity(3)
|
||||
// p = [2-4]
|
||||
if p.size != 3 {
|
||||
t.Errorf("Expecting 3, received %d", p.size)
|
||||
}
|
||||
|
||||
p.SetIdleTimeout(time.Duration(1e8))
|
||||
time.Sleep(2e8)
|
||||
r, _ := p.Get()
|
||||
if r.(*TestResource).num != 6 {
|
||||
t.Errorf("Expecting 6, received %d", r.(*TestResource).num)
|
||||
}
|
||||
p.Put(r)
|
||||
// p = [6]
|
||||
}
|
||||
|
||||
func TestPoolFail(t *testing.T) {
|
||||
p := NewRoundRobin(5, time.Duration(10e9))
|
||||
p.Open(FailFactory)
|
||||
defer p.Close()
|
||||
if _, err := p.Get(); err.Error() != "Failed" {
|
||||
t.Errorf("Expecting Failed, received %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPoolFullFail(t *testing.T) {
|
||||
p := NewRoundRobin(2, time.Duration(10e9))
|
||||
p.Open(SlowFailFactory)
|
||||
defer p.Close()
|
||||
ch := make(chan bool)
|
||||
// The third get should not wait indefinitely
|
||||
for i := 0; i < 3; i++ {
|
||||
go func() {
|
||||
p.Get()
|
||||
ch <- true
|
||||
}()
|
||||
}
|
||||
for i := 0; i < 3; i++ {
|
||||
<-ch
|
||||
}
|
||||
}
|
||||
|
||||
func ResourceWait(p *RoundRobin, t *testing.T, ch chan bool) {
|
||||
for i := 0; i < 5; i++ {
|
||||
if r, err := p.Get(); err != nil {
|
||||
t.Errorf("TryGet failed: %v", err)
|
||||
} else if r.(*TestResource).num != int64(i+1) {
|
||||
t.Errorf("Expecting %d, received %d", i+1, r.(*TestResource).num)
|
||||
} else {
|
||||
p.Put(r)
|
||||
}
|
||||
}
|
||||
ch <- true
|
||||
}
|
28
vendor/github.com/ngaut/pools/vitess_license
generated
vendored
28
vendor/github.com/ngaut/pools/vitess_license
generated
vendored
@ -1,28 +0,0 @@
|
||||
Copyright 2012, Google Inc.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
114
vendor/github.com/ngaut/sync2/atomic.go
generated
vendored
114
vendor/github.com/ngaut/sync2/atomic.go
generated
vendored
@ -1,114 +0,0 @@
|
||||
// Copyright 2013, Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sync2
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
type AtomicInt32 int32
|
||||
|
||||
func (i *AtomicInt32) Add(n int32) int32 {
|
||||
return atomic.AddInt32((*int32)(i), n)
|
||||
}
|
||||
|
||||
func (i *AtomicInt32) Set(n int32) {
|
||||
atomic.StoreInt32((*int32)(i), n)
|
||||
}
|
||||
|
||||
func (i *AtomicInt32) Get() int32 {
|
||||
return atomic.LoadInt32((*int32)(i))
|
||||
}
|
||||
|
||||
func (i *AtomicInt32) CompareAndSwap(oldval, newval int32) (swapped bool) {
|
||||
return atomic.CompareAndSwapInt32((*int32)(i), oldval, newval)
|
||||
}
|
||||
|
||||
type AtomicUint32 uint32
|
||||
|
||||
func (i *AtomicUint32) Add(n uint32) uint32 {
|
||||
return atomic.AddUint32((*uint32)(i), n)
|
||||
}
|
||||
|
||||
func (i *AtomicUint32) Set(n uint32) {
|
||||
atomic.StoreUint32((*uint32)(i), n)
|
||||
}
|
||||
|
||||
func (i *AtomicUint32) Get() uint32 {
|
||||
return atomic.LoadUint32((*uint32)(i))
|
||||
}
|
||||
|
||||
func (i *AtomicUint32) CompareAndSwap(oldval, newval uint32) (swapped bool) {
|
||||
return atomic.CompareAndSwapUint32((*uint32)(i), oldval, newval)
|
||||
}
|
||||
|
||||
type AtomicInt64 int64
|
||||
|
||||
func (i *AtomicInt64) Add(n int64) int64 {
|
||||
return atomic.AddInt64((*int64)(i), n)
|
||||
}
|
||||
|
||||
func (i *AtomicInt64) Set(n int64) {
|
||||
atomic.StoreInt64((*int64)(i), n)
|
||||
}
|
||||
|
||||
func (i *AtomicInt64) Get() int64 {
|
||||
return atomic.LoadInt64((*int64)(i))
|
||||
}
|
||||
|
||||
func (i *AtomicInt64) CompareAndSwap(oldval, newval int64) (swapped bool) {
|
||||
return atomic.CompareAndSwapInt64((*int64)(i), oldval, newval)
|
||||
}
|
||||
|
||||
type AtomicDuration int64
|
||||
|
||||
func (d *AtomicDuration) Add(duration time.Duration) time.Duration {
|
||||
return time.Duration(atomic.AddInt64((*int64)(d), int64(duration)))
|
||||
}
|
||||
|
||||
func (d *AtomicDuration) Set(duration time.Duration) {
|
||||
atomic.StoreInt64((*int64)(d), int64(duration))
|
||||
}
|
||||
|
||||
func (d *AtomicDuration) Get() time.Duration {
|
||||
return time.Duration(atomic.LoadInt64((*int64)(d)))
|
||||
}
|
||||
|
||||
func (d *AtomicDuration) CompareAndSwap(oldval, newval time.Duration) (swapped bool) {
|
||||
return atomic.CompareAndSwapInt64((*int64)(d), int64(oldval), int64(newval))
|
||||
}
|
||||
|
||||
// AtomicString gives you atomic-style APIs for string, but
|
||||
// it's only a convenience wrapper that uses a mutex. So, it's
|
||||
// not as efficient as the rest of the atomic types.
|
||||
type AtomicString struct {
|
||||
mu sync.Mutex
|
||||
str string
|
||||
}
|
||||
|
||||
func (s *AtomicString) Set(str string) {
|
||||
s.mu.Lock()
|
||||
s.str = str
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *AtomicString) Get() string {
|
||||
s.mu.Lock()
|
||||
str := s.str
|
||||
s.mu.Unlock()
|
||||
return str
|
||||
}
|
||||
|
||||
func (s *AtomicString) CompareAndSwap(oldval, newval string) (swqpped bool) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.str == oldval {
|
||||
s.str = newval
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
32
vendor/github.com/ngaut/sync2/atomic_test.go
generated
vendored
32
vendor/github.com/ngaut/sync2/atomic_test.go
generated
vendored
@ -1,32 +0,0 @@
|
||||
// Copyright 2013, Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sync2
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAtomicString(t *testing.T) {
|
||||
var s AtomicString
|
||||
if s.Get() != "" {
|
||||
t.Errorf("want empty, got %s", s.Get())
|
||||
}
|
||||
s.Set("a")
|
||||
if s.Get() != "a" {
|
||||
t.Errorf("want a, got %s", s.Get())
|
||||
}
|
||||
if s.CompareAndSwap("b", "c") {
|
||||
t.Errorf("want false, got true")
|
||||
}
|
||||
if s.Get() != "a" {
|
||||
t.Errorf("want a, got %s", s.Get())
|
||||
}
|
||||
if !s.CompareAndSwap("a", "c") {
|
||||
t.Errorf("want true, got false")
|
||||
}
|
||||
if s.Get() != "c" {
|
||||
t.Errorf("want c, got %s", s.Get())
|
||||
}
|
||||
}
|
56
vendor/github.com/ngaut/sync2/cond.go
generated
vendored
56
vendor/github.com/ngaut/sync2/cond.go
generated
vendored
@ -1,56 +0,0 @@
|
||||
// Copyright 2013, Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sync2
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Cond is an alternate implementation of sync.Cond
|
||||
type Cond struct {
|
||||
L sync.Locker
|
||||
sema chan struct{}
|
||||
waiters AtomicInt64
|
||||
}
|
||||
|
||||
func NewCond(l sync.Locker) *Cond {
|
||||
return &Cond{L: l, sema: make(chan struct{})}
|
||||
}
|
||||
|
||||
func (c *Cond) Wait() {
|
||||
c.waiters.Add(1)
|
||||
c.L.Unlock()
|
||||
<-c.sema
|
||||
c.L.Lock()
|
||||
}
|
||||
|
||||
func (c *Cond) Signal() {
|
||||
for {
|
||||
w := c.waiters.Get()
|
||||
if w == 0 {
|
||||
return
|
||||
}
|
||||
if c.waiters.CompareAndSwap(w, w-1) {
|
||||
break
|
||||
}
|
||||
}
|
||||
c.sema <- struct{}{}
|
||||
}
|
||||
|
||||
func (c *Cond) Broadcast() {
|
||||
var w int64
|
||||
for {
|
||||
w = c.waiters.Get()
|
||||
if w == 0 {
|
||||
return
|
||||
}
|
||||
if c.waiters.CompareAndSwap(w, 0) {
|
||||
break
|
||||
}
|
||||
}
|
||||
for i := int64(0); i < w; i++ {
|
||||
c.sema <- struct{}{}
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user