Add go mod (#935)
* Add a go.mod file * run go mod vendor again * Move to a well-supported ini file reader * Remove GO111MODULE=off * Use go 1.16 * Rename github.com/outbrain/golib -> github.com/openark/golib * Remove *.go-e files * Fix for `strconv.ParseInt: parsing "": invalid syntax` error * Add test for '[osc]' section Co-authored-by: Nate Wernimont <nate.wernimont@workiva.com>
This commit is contained in:
parent
aef2a69903
commit
47d49c6b92
3
build.sh
3
build.sh
@ -23,11 +23,10 @@ function build {
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# TODO: remove GO111MODULE once gh-ost uses Go modules
|
|
||||||
echo "Building ${osname} binary"
|
echo "Building ${osname} binary"
|
||||||
export GOOS
|
export GOOS
|
||||||
export GOARCH
|
export GOARCH
|
||||||
GO111MODULE=off go build -ldflags "$ldflags" -o $buildpath/$target go/cmd/gh-ost/main.go
|
go build -ldflags "$ldflags" -o $buildpath/$target go/cmd/gh-ost/main.go
|
||||||
|
|
||||||
if [ $? -ne 0 ]; then
|
if [ $? -ne 0 ]; then
|
||||||
echo "Build failed for ${osname}"
|
echo "Build failed for ${osname}"
|
||||||
|
18
go.mod
Normal file
18
go.mod
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
module github.com/github/gh-ost
|
||||||
|
|
||||||
|
go 1.16
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/go-ini/ini v1.62.0
|
||||||
|
github.com/go-sql-driver/mysql v1.5.0
|
||||||
|
github.com/openark/golib v0.0.0-20210531070646-355f37940af8
|
||||||
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
|
github.com/satori/go.uuid v1.2.0
|
||||||
|
github.com/siddontang/go-mysql v1.1.0
|
||||||
|
github.com/smartystreets/goconvey v1.6.4 // indirect
|
||||||
|
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83
|
||||||
|
golang.org/x/net v0.0.0-20210224082022-3d97a244fca7
|
||||||
|
golang.org/x/text v0.3.5
|
||||||
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
||||||
|
gopkg.in/ini.v1 v1.62.0 // indirect
|
||||||
|
)
|
72
go.sum
Normal file
72
go.sum
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/go-ini/ini v1.62.0 h1:7VJT/ZXjzqSrvtraFp4ONq80hTcRQth1c9ZnQ3uNQvU=
|
||||||
|
github.com/go-ini/ini v1.62.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
||||||
|
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||||
|
github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=
|
||||||
|
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||||
|
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
|
||||||
|
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||||
|
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||||
|
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||||
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
|
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
|
||||||
|
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||||
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
|
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||||
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
|
github.com/openark/golib v0.0.0-20210531070646-355f37940af8 h1:9ciIHNuyFqRWi9NpMNw9sVLB6z1ItpP5ZhTY9Q1xVu4=
|
||||||
|
github.com/openark/golib v0.0.0-20210531070646-355f37940af8/go.mod h1:1jj8x1eDVZxgc/Z4VyamX4qTbAdHPUQA6NeVtCd8Sl8=
|
||||||
|
github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8 h1:USx2/E1bX46VG32FIw034Au6seQ2fY9NEILmNh/UlQg=
|
||||||
|
github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8/go.mod h1:B1+S9LNcuMyLH/4HMTViQOJevkGiik3wW2AN9zb2fNQ=
|
||||||
|
github.com/pingcap/errors v0.11.0 h1:DCJQB8jrHbQ1VVlMFIrbj2ApScNNotVmkSNplu2yUt4=
|
||||||
|
github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
|
||||||
|
github.com/pingcap/parser v0.0.0-20190506092653-e336082eb825/go.mod h1:1FNvfp9+J0wvc4kl8eGNh7Rqrxveg15jJoWo/a0uHwA=
|
||||||
|
github.com/pingcap/tipb v0.0.0-20190428032612-535e1abaa330/go.mod h1:RtkHW8WbcNxj8lsbzjaILci01CtYnYbIkQhjyZWrWVI=
|
||||||
|
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||||
|
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
|
||||||
|
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||||
|
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 h1:pntxY8Ary0t43dCZ5dqY4YTJCObLY1kIXl0uzMv+7DE=
|
||||||
|
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
|
||||||
|
github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726 h1:xT+JlYxNGqyT+XcU8iUrN18JYed2TvG9yN5ULG2jATM=
|
||||||
|
github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726/go.mod h1:3yhqj7WBBfRhbBlzyOC3gUxftwsU0u8gqevxwIHQpMw=
|
||||||
|
github.com/siddontang/go-log v0.0.0-20180807004314-8d05993dda07 h1:oI+RNwuC9jF2g2lP0u0cVEEZrc/AYBCuFdvwrLWM/6Q=
|
||||||
|
github.com/siddontang/go-log v0.0.0-20180807004314-8d05993dda07/go.mod h1:yFdBgwXP24JziuRl2NMUahT7nGLNOKi1SIiFxMttVD4=
|
||||||
|
github.com/siddontang/go-mysql v1.1.0 h1:NfkS1skrPwUd3hsUqhc6jrv24dKTNMANxKRmDsf1fMc=
|
||||||
|
github.com/siddontang/go-mysql v1.1.0/go.mod h1:+W4RCzesQDI11HvIkaDjS8yM36SpAnGNQ7jmTLn5BnU=
|
||||||
|
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||||
|
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
|
||||||
|
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||||
|
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
|
||||||
|
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||||
|
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
|
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 h1:/ZScEX8SfEmUGRHs0gxpqteO5nfNW6axyZbBdw9A12g=
|
||||||
|
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||||
|
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20210224082022-3d97a244fca7 h1:OgUuv8lsRpBibGNbSizVwKWlysjaNzmC9gYMhPVfqFM=
|
||||||
|
golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 h1:nxC68pudNYkKU6jWhgrqdreuFiOQWj1Fs7T3VrH4Pjw=
|
||||||
|
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||||
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
|
||||||
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
|
golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ=
|
||||||
|
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||||
|
gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU=
|
||||||
|
gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
@ -19,10 +19,9 @@ import (
|
|||||||
|
|
||||||
"github.com/github/gh-ost/go/mysql"
|
"github.com/github/gh-ost/go/mysql"
|
||||||
"github.com/github/gh-ost/go/sql"
|
"github.com/github/gh-ost/go/sql"
|
||||||
"github.com/outbrain/golib/log"
|
"github.com/openark/golib/log"
|
||||||
|
|
||||||
"gopkg.in/gcfg.v1"
|
"github.com/go-ini/ini"
|
||||||
gcfgscanner "gopkg.in/gcfg.v1/scanner"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// RowsEstimateMethod is the type of row number estimation
|
// RowsEstimateMethod is the type of row number estimation
|
||||||
@ -807,10 +806,39 @@ func (this *MigrationContext) ReadConfigFile() error {
|
|||||||
if this.ConfigFile == "" {
|
if this.ConfigFile == "" {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
gcfg.RelaxedParserMode = true
|
cfg, err := ini.Load(this.ConfigFile)
|
||||||
gcfgscanner.RelaxedScannerMode = true
|
if err != nil {
|
||||||
if err := gcfg.ReadFileInto(&this.config, this.ConfigFile); err != nil {
|
return err
|
||||||
return fmt.Errorf("Error reading config file %s. Details: %s", this.ConfigFile, err.Error())
|
}
|
||||||
|
|
||||||
|
if cfg.Section("client").Haskey("user") {
|
||||||
|
this.config.Client.User = cfg.Section("client").Key("user").String()
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.Section("client").Haskey("password") {
|
||||||
|
this.config.Client.Password = cfg.Section("client").Key("password").String()
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.Section("osc").Haskey("chunk_size") {
|
||||||
|
this.config.Osc.Chunk_Size, err = cfg.Section("osc").Key("chunk_size").Int64()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Unable to read osc chunk size: %s", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.Section("osc").Haskey("max_load") {
|
||||||
|
this.config.Osc.Max_Load = cfg.Section("osc").Key("max_load").String()
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.Section("osc").Haskey("replication_lag_query") {
|
||||||
|
this.config.Osc.Replication_Lag_Query = cfg.Section("osc").Key("replication_lag_query").String()
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.Section("osc").Haskey("max_lag_millis") {
|
||||||
|
this.config.Osc.Max_Lag_Millis, err = cfg.Section("osc").Key("max_lag_millis").Int64()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Unable to read max lag millis: %s", err.Error())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// We accept user & password in the form "${SOME_ENV_VARIABLE}" in which case we pull
|
// We accept user & password in the form "${SOME_ENV_VARIABLE}" in which case we pull
|
||||||
|
@ -1,16 +1,19 @@
|
|||||||
/*
|
/*
|
||||||
Copyright 2016 GitHub Inc.
|
Copyright 2021 GitHub Inc.
|
||||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
See https://github.com/github/gh-ost/blob/master/LICENSE
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package base
|
package base
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/outbrain/golib/log"
|
"github.com/openark/golib/log"
|
||||||
test "github.com/outbrain/golib/tests"
|
test "github.com/openark/golib/tests"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@ -56,3 +59,65 @@ func TestGetTableNames(t *testing.T) {
|
|||||||
test.S(t).ExpectEquals(context.GetChangelogTableName(), "_tmp_ghc")
|
test.S(t).ExpectEquals(context.GetChangelogTableName(), "_tmp_ghc")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestReadConfigFile(t *testing.T) {
|
||||||
|
{
|
||||||
|
context := NewMigrationContext()
|
||||||
|
context.ConfigFile = "/does/not/exist"
|
||||||
|
if err := context.ReadConfigFile(); err == nil {
|
||||||
|
t.Fatal("Expected .ReadConfigFile() to return an error, got nil")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
{
|
||||||
|
f, err := ioutil.TempFile("", t.Name())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create tmp file: %v", err)
|
||||||
|
}
|
||||||
|
defer os.Remove(f.Name())
|
||||||
|
|
||||||
|
f.Write([]byte("[client]"))
|
||||||
|
context := NewMigrationContext()
|
||||||
|
context.ConfigFile = f.Name()
|
||||||
|
if err := context.ReadConfigFile(); err != nil {
|
||||||
|
t.Fatalf(".ReadConfigFile() failed: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
{
|
||||||
|
f, err := ioutil.TempFile("", t.Name())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create tmp file: %v", err)
|
||||||
|
}
|
||||||
|
defer os.Remove(f.Name())
|
||||||
|
|
||||||
|
f.Write([]byte(fmt.Sprintf("[client]\nuser=test\npassword=123456")))
|
||||||
|
context := NewMigrationContext()
|
||||||
|
context.ConfigFile = f.Name()
|
||||||
|
if err := context.ReadConfigFile(); err != nil {
|
||||||
|
t.Fatalf(".ReadConfigFile() failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if context.config.Client.User != "test" {
|
||||||
|
t.Fatalf("Expected client user %q, got %q", "test", context.config.Client.User)
|
||||||
|
} else if context.config.Client.Password != "123456" {
|
||||||
|
t.Fatalf("Expected client password %q, got %q", "123456", context.config.Client.Password)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
{
|
||||||
|
f, err := ioutil.TempFile("", t.Name())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create tmp file: %v", err)
|
||||||
|
}
|
||||||
|
defer os.Remove(f.Name())
|
||||||
|
|
||||||
|
f.Write([]byte(fmt.Sprintf("[osc]\nmax_load=10")))
|
||||||
|
context := NewMigrationContext()
|
||||||
|
context.ConfigFile = f.Name()
|
||||||
|
if err := context.ReadConfigFile(); err != nil {
|
||||||
|
t.Fatalf(".ReadConfigFile() failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if context.config.Osc.Max_Load != "10" {
|
||||||
|
t.Fatalf("Expected osc 'max_load' %q, got %q", "10", context.config.Osc.Max_Load)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
package base
|
package base
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/outbrain/golib/log"
|
"github.com/openark/golib/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
type simpleLogger struct{}
|
type simpleLogger struct{}
|
||||||
|
@ -8,8 +8,8 @@ package base
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/outbrain/golib/log"
|
"github.com/openark/golib/log"
|
||||||
test "github.com/outbrain/golib/tests"
|
test "github.com/openark/golib/tests"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -8,8 +8,8 @@ package base
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/outbrain/golib/log"
|
"github.com/openark/golib/log"
|
||||||
test "github.com/outbrain/golib/tests"
|
test "github.com/openark/golib/tests"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -16,7 +16,7 @@ import (
|
|||||||
"github.com/github/gh-ost/go/logic"
|
"github.com/github/gh-ost/go/logic"
|
||||||
"github.com/github/gh-ost/go/sql"
|
"github.com/github/gh-ost/go/sql"
|
||||||
_ "github.com/go-sql-driver/mysql"
|
_ "github.com/go-sql-driver/mysql"
|
||||||
"github.com/outbrain/golib/log"
|
"github.com/openark/golib/log"
|
||||||
|
|
||||||
"golang.org/x/crypto/ssh/terminal"
|
"golang.org/x/crypto/ssh/terminal"
|
||||||
)
|
)
|
||||||
|
@ -16,7 +16,7 @@ import (
|
|||||||
"github.com/github/gh-ost/go/mysql"
|
"github.com/github/gh-ost/go/mysql"
|
||||||
"github.com/github/gh-ost/go/sql"
|
"github.com/github/gh-ost/go/sql"
|
||||||
|
|
||||||
"github.com/outbrain/golib/sqlutils"
|
"github.com/openark/golib/sqlutils"
|
||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -14,7 +14,7 @@ import (
|
|||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
|
||||||
"github.com/github/gh-ost/go/base"
|
"github.com/github/gh-ost/go/base"
|
||||||
"github.com/outbrain/golib/log"
|
"github.com/openark/golib/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -17,7 +17,7 @@ import (
|
|||||||
"github.com/github/gh-ost/go/mysql"
|
"github.com/github/gh-ost/go/mysql"
|
||||||
"github.com/github/gh-ost/go/sql"
|
"github.com/github/gh-ost/go/sql"
|
||||||
|
|
||||||
"github.com/outbrain/golib/sqlutils"
|
"github.com/openark/golib/sqlutils"
|
||||||
)
|
)
|
||||||
|
|
||||||
const startSlavePostWaitMilliseconds = 500 * time.Millisecond
|
const startSlavePostWaitMilliseconds = 500 * time.Millisecond
|
||||||
|
@ -16,7 +16,7 @@ import (
|
|||||||
"github.com/github/gh-ost/go/binlog"
|
"github.com/github/gh-ost/go/binlog"
|
||||||
"github.com/github/gh-ost/go/mysql"
|
"github.com/github/gh-ost/go/mysql"
|
||||||
|
|
||||||
"github.com/outbrain/golib/sqlutils"
|
"github.com/openark/golib/sqlutils"
|
||||||
)
|
)
|
||||||
|
|
||||||
type BinlogEventListener struct {
|
type BinlogEventListener struct {
|
||||||
|
@ -8,8 +8,8 @@ package mysql
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/outbrain/golib/log"
|
"github.com/openark/golib/log"
|
||||||
test "github.com/outbrain/golib/tests"
|
test "github.com/openark/golib/tests"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -9,8 +9,8 @@ import (
|
|||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/outbrain/golib/log"
|
"github.com/openark/golib/log"
|
||||||
test "github.com/outbrain/golib/tests"
|
test "github.com/openark/golib/tests"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -8,8 +8,8 @@ package mysql
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/outbrain/golib/log"
|
"github.com/openark/golib/log"
|
||||||
test "github.com/outbrain/golib/tests"
|
test "github.com/openark/golib/tests"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -14,8 +14,8 @@ import (
|
|||||||
|
|
||||||
"github.com/github/gh-ost/go/sql"
|
"github.com/github/gh-ost/go/sql"
|
||||||
|
|
||||||
"github.com/outbrain/golib/log"
|
"github.com/openark/golib/log"
|
||||||
"github.com/outbrain/golib/sqlutils"
|
"github.com/openark/golib/sqlutils"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
package os
|
package os
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/outbrain/golib/log"
|
"github.com/openark/golib/log"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
|
@ -12,8 +12,8 @@ import (
|
|||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/outbrain/golib/log"
|
"github.com/openark/golib/log"
|
||||||
test "github.com/outbrain/golib/tests"
|
test "github.com/openark/golib/tests"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -9,8 +9,8 @@ import (
|
|||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/outbrain/golib/log"
|
"github.com/openark/golib/log"
|
||||||
test "github.com/outbrain/golib/tests"
|
test "github.com/openark/golib/tests"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -10,8 +10,8 @@ import (
|
|||||||
|
|
||||||
"reflect"
|
"reflect"
|
||||||
|
|
||||||
"github.com/outbrain/golib/log"
|
"github.com/openark/golib/log"
|
||||||
test "github.com/outbrain/golib/tests"
|
test "github.com/openark/golib/tests"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -243,8 +243,7 @@ build_binary() {
|
|||||||
return 0
|
return 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# TODO: remove GO111MODULE once gh-ost uses Go modules
|
go build -o $ghost_binary go/cmd/gh-ost/main.go
|
||||||
GO111MODULE=off go build -o $ghost_binary go/cmd/gh-ost/main.go
|
|
||||||
|
|
||||||
if [ $? -ne 0 ] ; then
|
if [ $? -ne 0 ] ; then
|
||||||
echo "Build failure"
|
echo "Build failure"
|
||||||
|
@ -17,5 +17,4 @@ export GOPATH="$PWD/.gopath"
|
|||||||
cd .gopath/src/github.com/github/gh-ost
|
cd .gopath/src/github.com/github/gh-ost
|
||||||
|
|
||||||
# We put the binaries directly into the bindir, because we have no need for shim wrappers
|
# We put the binaries directly into the bindir, because we have no need for shim wrappers
|
||||||
# TODO: remove GO111MODULE once gh-ost uses Go modules
|
go build -o "$bindir/gh-ost" -ldflags "-X main.AppVersion=${version} -X main.BuildDescribe=${describe}" ./go/cmd/gh-ost/main.go
|
||||||
GO111MODULE=off go build -o "$bindir/gh-ost" -ldflags "-X main.AppVersion=${version} -X main.BuildDescribe=${describe}" ./go/cmd/gh-ost/main.go
|
|
||||||
|
@ -13,6 +13,5 @@ script/build
|
|||||||
|
|
||||||
cd .gopath/src/github.com/github/gh-ost
|
cd .gopath/src/github.com/github/gh-ost
|
||||||
|
|
||||||
# TODO: remove GO111MODULE once gh-ost uses Go modules
|
|
||||||
echo "Running unit tests"
|
echo "Running unit tests"
|
||||||
GO111MODULE=off go test ./go/...
|
go test ./go/...
|
||||||
|
3
test.sh
3
test.sh
@ -6,8 +6,7 @@ for testsuite in base mysql sql
|
|||||||
do
|
do
|
||||||
pushd go/${testsuite} > /dev/null;
|
pushd go/${testsuite} > /dev/null;
|
||||||
|
|
||||||
# TODO: remove GO111MODULE once gh-ost uses Go modules
|
go test $*;
|
||||||
GO111MODULE=off go test $*;
|
|
||||||
|
|
||||||
[ $? -ne 0 ] && retval=1
|
[ $? -ne 0 ] && retval=1
|
||||||
popd > /dev/null;
|
popd > /dev/null;
|
||||||
|
5
vendor/github.com/BurntSushi/toml/.gitignore
generated
vendored
5
vendor/github.com/BurntSushi/toml/.gitignore
generated
vendored
@ -1,5 +0,0 @@
|
|||||||
TAGS
|
|
||||||
tags
|
|
||||||
.*.swp
|
|
||||||
tomlcheck/tomlcheck
|
|
||||||
toml.test
|
|
12
vendor/github.com/BurntSushi/toml/.travis.yml
generated
vendored
12
vendor/github.com/BurntSushi/toml/.travis.yml
generated
vendored
@ -1,12 +0,0 @@
|
|||||||
language: go
|
|
||||||
go:
|
|
||||||
- 1.1
|
|
||||||
- 1.2
|
|
||||||
- tip
|
|
||||||
install:
|
|
||||||
- go install ./...
|
|
||||||
- go get github.com/BurntSushi/toml-test
|
|
||||||
script:
|
|
||||||
- export PATH="$PATH:$HOME/gopath/bin"
|
|
||||||
- make test
|
|
||||||
|
|
3
vendor/github.com/BurntSushi/toml/COMPATIBLE
generated
vendored
3
vendor/github.com/BurntSushi/toml/COMPATIBLE
generated
vendored
@ -1,3 +0,0 @@
|
|||||||
Compatible with TOML version
|
|
||||||
[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md)
|
|
||||||
|
|
14
vendor/github.com/BurntSushi/toml/COPYING
generated
vendored
14
vendor/github.com/BurntSushi/toml/COPYING
generated
vendored
@ -1,14 +0,0 @@
|
|||||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
|
||||||
Version 2, December 2004
|
|
||||||
|
|
||||||
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
|
|
||||||
|
|
||||||
Everyone is permitted to copy and distribute verbatim or modified
|
|
||||||
copies of this license document, and changing it is allowed as long
|
|
||||||
as the name is changed.
|
|
||||||
|
|
||||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
|
||||||
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
|
||||||
|
|
||||||
0. You just DO WHAT THE FUCK YOU WANT TO.
|
|
||||||
|
|
19
vendor/github.com/BurntSushi/toml/Makefile
generated
vendored
19
vendor/github.com/BurntSushi/toml/Makefile
generated
vendored
@ -1,19 +0,0 @@
|
|||||||
install:
|
|
||||||
go install ./...
|
|
||||||
|
|
||||||
test: install
|
|
||||||
go test -v
|
|
||||||
toml-test toml-test-decoder
|
|
||||||
toml-test -encoder toml-test-encoder
|
|
||||||
|
|
||||||
fmt:
|
|
||||||
gofmt -w *.go */*.go
|
|
||||||
colcheck *.go */*.go
|
|
||||||
|
|
||||||
tags:
|
|
||||||
find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS
|
|
||||||
|
|
||||||
push:
|
|
||||||
git push origin master
|
|
||||||
git push github master
|
|
||||||
|
|
220
vendor/github.com/BurntSushi/toml/README.md
generated
vendored
220
vendor/github.com/BurntSushi/toml/README.md
generated
vendored
@ -1,220 +0,0 @@
|
|||||||
## TOML parser and encoder for Go with reflection
|
|
||||||
|
|
||||||
TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
|
|
||||||
reflection interface similar to Go's standard library `json` and `xml`
|
|
||||||
packages. This package also supports the `encoding.TextUnmarshaler` and
|
|
||||||
`encoding.TextMarshaler` interfaces so that you can define custom data
|
|
||||||
representations. (There is an example of this below.)
|
|
||||||
|
|
||||||
Spec: https://github.com/mojombo/toml
|
|
||||||
|
|
||||||
Compatible with TOML version
|
|
||||||
[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md)
|
|
||||||
|
|
||||||
Documentation: http://godoc.org/github.com/BurntSushi/toml
|
|
||||||
|
|
||||||
Installation:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
go get github.com/BurntSushi/toml
|
|
||||||
```
|
|
||||||
|
|
||||||
Try the toml validator:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
go get github.com/BurntSushi/toml/cmd/tomlv
|
|
||||||
tomlv some-toml-file.toml
|
|
||||||
```
|
|
||||||
|
|
||||||
[![Build status](https://api.travis-ci.org/BurntSushi/toml.png)](https://travis-ci.org/BurntSushi/toml)
|
|
||||||
|
|
||||||
|
|
||||||
### Testing
|
|
||||||
|
|
||||||
This package passes all tests in
|
|
||||||
[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder
|
|
||||||
and the encoder.
|
|
||||||
|
|
||||||
### Examples
|
|
||||||
|
|
||||||
This package works similarly to how the Go standard library handles `XML`
|
|
||||||
and `JSON`. Namely, data is loaded into Go values via reflection.
|
|
||||||
|
|
||||||
For the simplest example, consider some TOML file as just a list of keys
|
|
||||||
and values:
|
|
||||||
|
|
||||||
```toml
|
|
||||||
Age = 25
|
|
||||||
Cats = [ "Cauchy", "Plato" ]
|
|
||||||
Pi = 3.14
|
|
||||||
Perfection = [ 6, 28, 496, 8128 ]
|
|
||||||
DOB = 1987-07-05T05:45:00Z
|
|
||||||
```
|
|
||||||
|
|
||||||
Which could be defined in Go as:
|
|
||||||
|
|
||||||
```go
|
|
||||||
type Config struct {
|
|
||||||
Age int
|
|
||||||
Cats []string
|
|
||||||
Pi float64
|
|
||||||
Perfection []int
|
|
||||||
DOB time.Time // requires `import time`
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
And then decoded with:
|
|
||||||
|
|
||||||
```go
|
|
||||||
var conf Config
|
|
||||||
if _, err := toml.Decode(tomlData, &conf); err != nil {
|
|
||||||
// handle error
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
You can also use struct tags if your struct field name doesn't map to a TOML
|
|
||||||
key value directly:
|
|
||||||
|
|
||||||
```toml
|
|
||||||
some_key_NAME = "wat"
|
|
||||||
```
|
|
||||||
|
|
||||||
```go
|
|
||||||
type TOML struct {
|
|
||||||
ObscureKey string `toml:"some_key_NAME"`
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Using the `encoding.TextUnmarshaler` interface
|
|
||||||
|
|
||||||
Here's an example that automatically parses duration strings into
|
|
||||||
`time.Duration` values:
|
|
||||||
|
|
||||||
```toml
|
|
||||||
[[song]]
|
|
||||||
name = "Thunder Road"
|
|
||||||
duration = "4m49s"
|
|
||||||
|
|
||||||
[[song]]
|
|
||||||
name = "Stairway to Heaven"
|
|
||||||
duration = "8m03s"
|
|
||||||
```
|
|
||||||
|
|
||||||
Which can be decoded with:
|
|
||||||
|
|
||||||
```go
|
|
||||||
type song struct {
|
|
||||||
Name string
|
|
||||||
Duration duration
|
|
||||||
}
|
|
||||||
type songs struct {
|
|
||||||
Song []song
|
|
||||||
}
|
|
||||||
var favorites songs
|
|
||||||
if _, err := Decode(blob, &favorites); err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, s := range favorites.Song {
|
|
||||||
fmt.Printf("%s (%s)\n", s.Name, s.Duration)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
And you'll also need a `duration` type that satisfies the
|
|
||||||
`encoding.TextUnmarshaler` interface:
|
|
||||||
|
|
||||||
```go
|
|
||||||
type duration struct {
|
|
||||||
time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *duration) UnmarshalText(text []byte) error {
|
|
||||||
var err error
|
|
||||||
d.Duration, err = time.ParseDuration(string(text))
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### More complex usage
|
|
||||||
|
|
||||||
Here's an example of how to load the example from the official spec page:
|
|
||||||
|
|
||||||
```toml
|
|
||||||
# This is a TOML document. Boom.
|
|
||||||
|
|
||||||
title = "TOML Example"
|
|
||||||
|
|
||||||
[owner]
|
|
||||||
name = "Tom Preston-Werner"
|
|
||||||
organization = "GitHub"
|
|
||||||
bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
|
|
||||||
dob = 1979-05-27T07:32:00Z # First class dates? Why not?
|
|
||||||
|
|
||||||
[database]
|
|
||||||
server = "192.168.1.1"
|
|
||||||
ports = [ 8001, 8001, 8002 ]
|
|
||||||
connection_max = 5000
|
|
||||||
enabled = true
|
|
||||||
|
|
||||||
[servers]
|
|
||||||
|
|
||||||
# You can indent as you please. Tabs or spaces. TOML don't care.
|
|
||||||
[servers.alpha]
|
|
||||||
ip = "10.0.0.1"
|
|
||||||
dc = "eqdc10"
|
|
||||||
|
|
||||||
[servers.beta]
|
|
||||||
ip = "10.0.0.2"
|
|
||||||
dc = "eqdc10"
|
|
||||||
|
|
||||||
[clients]
|
|
||||||
data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
|
|
||||||
|
|
||||||
# Line breaks are OK when inside arrays
|
|
||||||
hosts = [
|
|
||||||
"alpha",
|
|
||||||
"omega"
|
|
||||||
]
|
|
||||||
```
|
|
||||||
|
|
||||||
And the corresponding Go types are:
|
|
||||||
|
|
||||||
```go
|
|
||||||
type tomlConfig struct {
|
|
||||||
Title string
|
|
||||||
Owner ownerInfo
|
|
||||||
DB database `toml:"database"`
|
|
||||||
Servers map[string]server
|
|
||||||
Clients clients
|
|
||||||
}
|
|
||||||
|
|
||||||
type ownerInfo struct {
|
|
||||||
Name string
|
|
||||||
Org string `toml:"organization"`
|
|
||||||
Bio string
|
|
||||||
DOB time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
type database struct {
|
|
||||||
Server string
|
|
||||||
Ports []int
|
|
||||||
ConnMax int `toml:"connection_max"`
|
|
||||||
Enabled bool
|
|
||||||
}
|
|
||||||
|
|
||||||
type server struct {
|
|
||||||
IP string
|
|
||||||
DC string
|
|
||||||
}
|
|
||||||
|
|
||||||
type clients struct {
|
|
||||||
Data [][]interface{}
|
|
||||||
Hosts []string
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Note that a case insensitive match will be tried if an exact match can't be
|
|
||||||
found.
|
|
||||||
|
|
||||||
A working example of the above can be found in `_examples/example.{go,toml}`.
|
|
||||||
|
|
14
vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING
generated
vendored
14
vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING
generated
vendored
@ -1,14 +0,0 @@
|
|||||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
|
||||||
Version 2, December 2004
|
|
||||||
|
|
||||||
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
|
|
||||||
|
|
||||||
Everyone is permitted to copy and distribute verbatim or modified
|
|
||||||
copies of this license document, and changing it is allowed as long
|
|
||||||
as the name is changed.
|
|
||||||
|
|
||||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
|
||||||
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
|
||||||
|
|
||||||
0. You just DO WHAT THE FUCK YOU WANT TO.
|
|
||||||
|
|
14
vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/README.md
generated
vendored
14
vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/README.md
generated
vendored
@ -1,14 +0,0 @@
|
|||||||
# Implements the TOML test suite interface
|
|
||||||
|
|
||||||
This is an implementation of the interface expected by
|
|
||||||
[toml-test](https://github.com/BurntSushi/toml-test) for my
|
|
||||||
[toml parser written in Go](https://github.com/BurntSushi/toml).
|
|
||||||
In particular, it maps TOML data on `stdin` to a JSON format on `stdout`.
|
|
||||||
|
|
||||||
|
|
||||||
Compatible with TOML version
|
|
||||||
[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md)
|
|
||||||
|
|
||||||
Compatible with `toml-test` version
|
|
||||||
[v0.2.0](https://github.com/BurntSushi/toml-test/tree/v0.2.0)
|
|
||||||
|
|
90
vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/main.go
generated
vendored
90
vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/main.go
generated
vendored
@ -1,90 +0,0 @@
|
|||||||
// Command toml-test-decoder satisfies the toml-test interface for testing
|
|
||||||
// TOML decoders. Namely, it accepts TOML on stdin and outputs JSON on stdout.
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/BurntSushi/toml"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
log.SetFlags(0)
|
|
||||||
|
|
||||||
flag.Usage = usage
|
|
||||||
flag.Parse()
|
|
||||||
}
|
|
||||||
|
|
||||||
func usage() {
|
|
||||||
log.Printf("Usage: %s < toml-file\n", path.Base(os.Args[0]))
|
|
||||||
flag.PrintDefaults()
|
|
||||||
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
if flag.NArg() != 0 {
|
|
||||||
flag.Usage()
|
|
||||||
}
|
|
||||||
|
|
||||||
var tmp interface{}
|
|
||||||
if _, err := toml.DecodeReader(os.Stdin, &tmp); err != nil {
|
|
||||||
log.Fatalf("Error decoding TOML: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
typedTmp := translate(tmp)
|
|
||||||
if err := json.NewEncoder(os.Stdout).Encode(typedTmp); err != nil {
|
|
||||||
log.Fatalf("Error encoding JSON: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func translate(tomlData interface{}) interface{} {
|
|
||||||
switch orig := tomlData.(type) {
|
|
||||||
case map[string]interface{}:
|
|
||||||
typed := make(map[string]interface{}, len(orig))
|
|
||||||
for k, v := range orig {
|
|
||||||
typed[k] = translate(v)
|
|
||||||
}
|
|
||||||
return typed
|
|
||||||
case []map[string]interface{}:
|
|
||||||
typed := make([]map[string]interface{}, len(orig))
|
|
||||||
for i, v := range orig {
|
|
||||||
typed[i] = translate(v).(map[string]interface{})
|
|
||||||
}
|
|
||||||
return typed
|
|
||||||
case []interface{}:
|
|
||||||
typed := make([]interface{}, len(orig))
|
|
||||||
for i, v := range orig {
|
|
||||||
typed[i] = translate(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// We don't really need to tag arrays, but let's be future proof.
|
|
||||||
// (If TOML ever supports tuples, we'll need this.)
|
|
||||||
return tag("array", typed)
|
|
||||||
case time.Time:
|
|
||||||
return tag("datetime", orig.Format("2006-01-02T15:04:05Z"))
|
|
||||||
case bool:
|
|
||||||
return tag("bool", fmt.Sprintf("%v", orig))
|
|
||||||
case int64:
|
|
||||||
return tag("integer", fmt.Sprintf("%d", orig))
|
|
||||||
case float64:
|
|
||||||
return tag("float", fmt.Sprintf("%v", orig))
|
|
||||||
case string:
|
|
||||||
return tag("string", orig)
|
|
||||||
}
|
|
||||||
|
|
||||||
panic(fmt.Sprintf("Unknown type: %T", tomlData))
|
|
||||||
}
|
|
||||||
|
|
||||||
func tag(typeName string, data interface{}) map[string]interface{} {
|
|
||||||
return map[string]interface{}{
|
|
||||||
"type": typeName,
|
|
||||||
"value": data,
|
|
||||||
}
|
|
||||||
}
|
|
14
vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING
generated
vendored
14
vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING
generated
vendored
@ -1,14 +0,0 @@
|
|||||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
|
||||||
Version 2, December 2004
|
|
||||||
|
|
||||||
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
|
|
||||||
|
|
||||||
Everyone is permitted to copy and distribute verbatim or modified
|
|
||||||
copies of this license document, and changing it is allowed as long
|
|
||||||
as the name is changed.
|
|
||||||
|
|
||||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
|
||||||
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
|
||||||
|
|
||||||
0. You just DO WHAT THE FUCK YOU WANT TO.
|
|
||||||
|
|
14
vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/README.md
generated
vendored
14
vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/README.md
generated
vendored
@ -1,14 +0,0 @@
|
|||||||
# Implements the TOML test suite interface for TOML encoders
|
|
||||||
|
|
||||||
This is an implementation of the interface expected by
|
|
||||||
[toml-test](https://github.com/BurntSushi/toml-test) for the
|
|
||||||
[TOML encoder](https://github.com/BurntSushi/toml).
|
|
||||||
In particular, it maps JSON data on `stdin` to a TOML format on `stdout`.
|
|
||||||
|
|
||||||
|
|
||||||
Compatible with TOML version
|
|
||||||
[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md)
|
|
||||||
|
|
||||||
Compatible with `toml-test` version
|
|
||||||
[v0.2.0](https://github.com/BurntSushi/toml-test/tree/v0.2.0)
|
|
||||||
|
|
131
vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/main.go
generated
vendored
131
vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/main.go
generated
vendored
@ -1,131 +0,0 @@
|
|||||||
// Command toml-test-encoder satisfies the toml-test interface for testing
|
|
||||||
// TOML encoders. Namely, it accepts JSON on stdin and outputs TOML on stdout.
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"flag"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/BurntSushi/toml"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
log.SetFlags(0)
|
|
||||||
|
|
||||||
flag.Usage = usage
|
|
||||||
flag.Parse()
|
|
||||||
}
|
|
||||||
|
|
||||||
func usage() {
|
|
||||||
log.Printf("Usage: %s < json-file\n", path.Base(os.Args[0]))
|
|
||||||
flag.PrintDefaults()
|
|
||||||
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
if flag.NArg() != 0 {
|
|
||||||
flag.Usage()
|
|
||||||
}
|
|
||||||
|
|
||||||
var tmp interface{}
|
|
||||||
if err := json.NewDecoder(os.Stdin).Decode(&tmp); err != nil {
|
|
||||||
log.Fatalf("Error decoding JSON: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
tomlData := translate(tmp)
|
|
||||||
if err := toml.NewEncoder(os.Stdout).Encode(tomlData); err != nil {
|
|
||||||
log.Fatalf("Error encoding TOML: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func translate(typedJson interface{}) interface{} {
|
|
||||||
switch v := typedJson.(type) {
|
|
||||||
case map[string]interface{}:
|
|
||||||
if len(v) == 2 && in("type", v) && in("value", v) {
|
|
||||||
return untag(v)
|
|
||||||
}
|
|
||||||
m := make(map[string]interface{}, len(v))
|
|
||||||
for k, v2 := range v {
|
|
||||||
m[k] = translate(v2)
|
|
||||||
}
|
|
||||||
return m
|
|
||||||
case []interface{}:
|
|
||||||
tabArray := make([]map[string]interface{}, len(v))
|
|
||||||
for i := range v {
|
|
||||||
if m, ok := translate(v[i]).(map[string]interface{}); ok {
|
|
||||||
tabArray[i] = m
|
|
||||||
} else {
|
|
||||||
log.Fatalf("JSON arrays may only contain objects. This " +
|
|
||||||
"corresponds to only tables being allowed in " +
|
|
||||||
"TOML table arrays.")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return tabArray
|
|
||||||
}
|
|
||||||
log.Fatalf("Unrecognized JSON format '%T'.", typedJson)
|
|
||||||
panic("unreachable")
|
|
||||||
}
|
|
||||||
|
|
||||||
func untag(typed map[string]interface{}) interface{} {
|
|
||||||
t := typed["type"].(string)
|
|
||||||
v := typed["value"]
|
|
||||||
switch t {
|
|
||||||
case "string":
|
|
||||||
return v.(string)
|
|
||||||
case "integer":
|
|
||||||
v := v.(string)
|
|
||||||
n, err := strconv.Atoi(v)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Could not parse '%s' as integer: %s", v, err)
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
case "float":
|
|
||||||
v := v.(string)
|
|
||||||
f, err := strconv.ParseFloat(v, 64)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Could not parse '%s' as float64: %s", v, err)
|
|
||||||
}
|
|
||||||
return f
|
|
||||||
case "datetime":
|
|
||||||
v := v.(string)
|
|
||||||
t, err := time.Parse("2006-01-02T15:04:05Z", v)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Could not parse '%s' as a datetime: %s", v, err)
|
|
||||||
}
|
|
||||||
return t
|
|
||||||
case "bool":
|
|
||||||
v := v.(string)
|
|
||||||
switch v {
|
|
||||||
case "true":
|
|
||||||
return true
|
|
||||||
case "false":
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
log.Fatalf("Could not parse '%s' as a boolean.", v)
|
|
||||||
case "array":
|
|
||||||
v := v.([]interface{})
|
|
||||||
array := make([]interface{}, len(v))
|
|
||||||
for i := range v {
|
|
||||||
if m, ok := v[i].(map[string]interface{}); ok {
|
|
||||||
array[i] = untag(m)
|
|
||||||
} else {
|
|
||||||
log.Fatalf("Arrays may only contain other arrays or "+
|
|
||||||
"primitive values, but found a '%T'.", m)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return array
|
|
||||||
}
|
|
||||||
log.Fatalf("Unrecognized tag type '%s'.", t)
|
|
||||||
panic("unreachable")
|
|
||||||
}
|
|
||||||
|
|
||||||
func in(key string, m map[string]interface{}) bool {
|
|
||||||
_, ok := m[key]
|
|
||||||
return ok
|
|
||||||
}
|
|
14
vendor/github.com/BurntSushi/toml/cmd/tomlv/COPYING
generated
vendored
14
vendor/github.com/BurntSushi/toml/cmd/tomlv/COPYING
generated
vendored
@ -1,14 +0,0 @@
|
|||||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
|
||||||
Version 2, December 2004
|
|
||||||
|
|
||||||
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
|
|
||||||
|
|
||||||
Everyone is permitted to copy and distribute verbatim or modified
|
|
||||||
copies of this license document, and changing it is allowed as long
|
|
||||||
as the name is changed.
|
|
||||||
|
|
||||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
|
||||||
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
|
||||||
|
|
||||||
0. You just DO WHAT THE FUCK YOU WANT TO.
|
|
||||||
|
|
22
vendor/github.com/BurntSushi/toml/cmd/tomlv/README.md
generated
vendored
22
vendor/github.com/BurntSushi/toml/cmd/tomlv/README.md
generated
vendored
@ -1,22 +0,0 @@
|
|||||||
# TOML Validator
|
|
||||||
|
|
||||||
If Go is installed, it's simple to try it out:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
go get github.com/BurntSushi/toml/cmd/tomlv
|
|
||||||
tomlv some-toml-file.toml
|
|
||||||
```
|
|
||||||
|
|
||||||
You can see the types of every key in a TOML file with:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
tomlv -types some-toml-file.toml
|
|
||||||
```
|
|
||||||
|
|
||||||
At the moment, only one error message is reported at a time. Error messages
|
|
||||||
include line numbers. No output means that the files given are valid TOML, or
|
|
||||||
there is a bug in `tomlv`.
|
|
||||||
|
|
||||||
Compatible with TOML version
|
|
||||||
[v0.1.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.1.0.md)
|
|
||||||
|
|
61
vendor/github.com/BurntSushi/toml/cmd/tomlv/main.go
generated
vendored
61
vendor/github.com/BurntSushi/toml/cmd/tomlv/main.go
generated
vendored
@ -1,61 +0,0 @@
|
|||||||
// Command tomlv validates TOML documents and prints each key's type.
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
"strings"
|
|
||||||
"text/tabwriter"
|
|
||||||
|
|
||||||
"github.com/BurntSushi/toml"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
flagTypes = false
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
log.SetFlags(0)
|
|
||||||
|
|
||||||
flag.BoolVar(&flagTypes, "types", flagTypes,
|
|
||||||
"When set, the types of every defined key will be shown.")
|
|
||||||
|
|
||||||
flag.Usage = usage
|
|
||||||
flag.Parse()
|
|
||||||
}
|
|
||||||
|
|
||||||
func usage() {
|
|
||||||
log.Printf("Usage: %s toml-file [ toml-file ... ]\n",
|
|
||||||
path.Base(os.Args[0]))
|
|
||||||
flag.PrintDefaults()
|
|
||||||
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
if flag.NArg() < 1 {
|
|
||||||
flag.Usage()
|
|
||||||
}
|
|
||||||
for _, f := range flag.Args() {
|
|
||||||
var tmp interface{}
|
|
||||||
md, err := toml.DecodeFile(f, &tmp)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Error in '%s': %s", f, err)
|
|
||||||
}
|
|
||||||
if flagTypes {
|
|
||||||
printTypes(md)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func printTypes(md toml.MetaData) {
|
|
||||||
tabw := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
|
|
||||||
for _, key := range md.Keys() {
|
|
||||||
fmt.Fprintf(tabw, "%s%s\t%s\n",
|
|
||||||
strings.Repeat(" ", len(key)-1), key, md.Type(key...))
|
|
||||||
}
|
|
||||||
tabw.Flush()
|
|
||||||
}
|
|
472
vendor/github.com/BurntSushi/toml/decode.go
generated
vendored
472
vendor/github.com/BurntSushi/toml/decode.go
generated
vendored
@ -1,472 +0,0 @@
|
|||||||
package toml
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"math"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
var e = fmt.Errorf
|
|
||||||
|
|
||||||
// Primitive is a TOML value that hasn't been decoded into a Go value.
|
|
||||||
// When using the various `Decode*` functions, the type `Primitive` may
|
|
||||||
// be given to any value, and its decoding will be delayed.
|
|
||||||
//
|
|
||||||
// A `Primitive` value can be decoded using the `PrimitiveDecode` function.
|
|
||||||
//
|
|
||||||
// The underlying representation of a `Primitive` value is subject to change.
|
|
||||||
// Do not rely on it.
|
|
||||||
//
|
|
||||||
// N.B. Primitive values are still parsed, so using them will only avoid
|
|
||||||
// the overhead of reflection. They can be useful when you don't know the
|
|
||||||
// exact type of TOML data until run time.
|
|
||||||
type Primitive struct {
|
|
||||||
undecoded interface{}
|
|
||||||
context Key
|
|
||||||
}
|
|
||||||
|
|
||||||
// DEPRECATED!
|
|
||||||
//
|
|
||||||
// Use MetaData.PrimitiveDecode instead.
|
|
||||||
func PrimitiveDecode(primValue Primitive, v interface{}) error {
|
|
||||||
md := MetaData{decoded: make(map[string]bool)}
|
|
||||||
return md.unify(primValue.undecoded, rvalue(v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrimitiveDecode is just like the other `Decode*` functions, except it
|
|
||||||
// decodes a TOML value that has already been parsed. Valid primitive values
|
|
||||||
// can *only* be obtained from values filled by the decoder functions,
|
|
||||||
// including this method. (i.e., `v` may contain more `Primitive`
|
|
||||||
// values.)
|
|
||||||
//
|
|
||||||
// Meta data for primitive values is included in the meta data returned by
|
|
||||||
// the `Decode*` functions with one exception: keys returned by the Undecoded
|
|
||||||
// method will only reflect keys that were decoded. Namely, any keys hidden
|
|
||||||
// behind a Primitive will be considered undecoded. Executing this method will
|
|
||||||
// update the undecoded keys in the meta data. (See the example.)
|
|
||||||
func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
|
|
||||||
md.context = primValue.context
|
|
||||||
defer func() { md.context = nil }()
|
|
||||||
return md.unify(primValue.undecoded, rvalue(v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode will decode the contents of `data` in TOML format into a pointer
|
|
||||||
// `v`.
|
|
||||||
//
|
|
||||||
// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be
|
|
||||||
// used interchangeably.)
|
|
||||||
//
|
|
||||||
// TOML arrays of tables correspond to either a slice of structs or a slice
|
|
||||||
// of maps.
|
|
||||||
//
|
|
||||||
// TOML datetimes correspond to Go `time.Time` values.
|
|
||||||
//
|
|
||||||
// All other TOML types (float, string, int, bool and array) correspond
|
|
||||||
// to the obvious Go types.
|
|
||||||
//
|
|
||||||
// An exception to the above rules is if a type implements the
|
|
||||||
// encoding.TextUnmarshaler interface. In this case, any primitive TOML value
|
|
||||||
// (floats, strings, integers, booleans and datetimes) will be converted to
|
|
||||||
// a byte string and given to the value's UnmarshalText method. See the
|
|
||||||
// Unmarshaler example for a demonstration with time duration strings.
|
|
||||||
//
|
|
||||||
// Key mapping
|
|
||||||
//
|
|
||||||
// TOML keys can map to either keys in a Go map or field names in a Go
|
|
||||||
// struct. The special `toml` struct tag may be used to map TOML keys to
|
|
||||||
// struct fields that don't match the key name exactly. (See the example.)
|
|
||||||
// A case insensitive match to struct names will be tried if an exact match
|
|
||||||
// can't be found.
|
|
||||||
//
|
|
||||||
// The mapping between TOML values and Go values is loose. That is, there
|
|
||||||
// may exist TOML values that cannot be placed into your representation, and
|
|
||||||
// there may be parts of your representation that do not correspond to
|
|
||||||
// TOML values. This loose mapping can be made stricter by using the IsDefined
|
|
||||||
// and/or Undecoded methods on the MetaData returned.
|
|
||||||
//
|
|
||||||
// This decoder will not handle cyclic types. If a cyclic type is passed,
|
|
||||||
// `Decode` will not terminate.
|
|
||||||
func Decode(data string, v interface{}) (MetaData, error) {
|
|
||||||
p, err := parse(data)
|
|
||||||
if err != nil {
|
|
||||||
return MetaData{}, err
|
|
||||||
}
|
|
||||||
md := MetaData{
|
|
||||||
p.mapping, p.types, p.ordered,
|
|
||||||
make(map[string]bool, len(p.ordered)), nil,
|
|
||||||
}
|
|
||||||
return md, md.unify(p.mapping, rvalue(v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeFile is just like Decode, except it will automatically read the
|
|
||||||
// contents of the file at `fpath` and decode it for you.
|
|
||||||
func DecodeFile(fpath string, v interface{}) (MetaData, error) {
|
|
||||||
bs, err := ioutil.ReadFile(fpath)
|
|
||||||
if err != nil {
|
|
||||||
return MetaData{}, err
|
|
||||||
}
|
|
||||||
return Decode(string(bs), v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeReader is just like Decode, except it will consume all bytes
|
|
||||||
// from the reader and decode it for you.
|
|
||||||
func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
|
|
||||||
bs, err := ioutil.ReadAll(r)
|
|
||||||
if err != nil {
|
|
||||||
return MetaData{}, err
|
|
||||||
}
|
|
||||||
return Decode(string(bs), v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// unify performs a sort of type unification based on the structure of `rv`,
|
|
||||||
// which is the client representation.
|
|
||||||
//
|
|
||||||
// Any type mismatch produces an error. Finding a type that we don't know
|
|
||||||
// how to handle produces an unsupported type error.
|
|
||||||
func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
|
||||||
// Special case. Look for a `Primitive` value.
|
|
||||||
if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
|
|
||||||
// Save the undecoded data and the key context into the primitive
|
|
||||||
// value.
|
|
||||||
context := make(Key, len(md.context))
|
|
||||||
copy(context, md.context)
|
|
||||||
rv.Set(reflect.ValueOf(Primitive{
|
|
||||||
undecoded: data,
|
|
||||||
context: context,
|
|
||||||
}))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Special case. Handle time.Time values specifically.
|
|
||||||
// TODO: Remove this code when we decide to drop support for Go 1.1.
|
|
||||||
// This isn't necessary in Go 1.2 because time.Time satisfies the encoding
|
|
||||||
// interfaces.
|
|
||||||
if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) {
|
|
||||||
return md.unifyDatetime(data, rv)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Special case. Look for a value satisfying the TextUnmarshaler interface.
|
|
||||||
if v, ok := rv.Interface().(TextUnmarshaler); ok {
|
|
||||||
return md.unifyText(data, v)
|
|
||||||
}
|
|
||||||
// BUG(burntsushi)
|
|
||||||
// The behavior here is incorrect whenever a Go type satisfies the
|
|
||||||
// encoding.TextUnmarshaler interface but also corresponds to a TOML
|
|
||||||
// hash or array. In particular, the unmarshaler should only be applied
|
|
||||||
// to primitive TOML values. But at this point, it will be applied to
|
|
||||||
// all kinds of values and produce an incorrect error whenever those values
|
|
||||||
// are hashes or arrays (including arrays of tables).
|
|
||||||
|
|
||||||
k := rv.Kind()
|
|
||||||
|
|
||||||
// laziness
|
|
||||||
if k >= reflect.Int && k <= reflect.Uint64 {
|
|
||||||
return md.unifyInt(data, rv)
|
|
||||||
}
|
|
||||||
switch k {
|
|
||||||
case reflect.Ptr:
|
|
||||||
elem := reflect.New(rv.Type().Elem())
|
|
||||||
err := md.unify(data, reflect.Indirect(elem))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
rv.Set(elem)
|
|
||||||
return nil
|
|
||||||
case reflect.Struct:
|
|
||||||
return md.unifyStruct(data, rv)
|
|
||||||
case reflect.Map:
|
|
||||||
return md.unifyMap(data, rv)
|
|
||||||
case reflect.Array:
|
|
||||||
return md.unifyArray(data, rv)
|
|
||||||
case reflect.Slice:
|
|
||||||
return md.unifySlice(data, rv)
|
|
||||||
case reflect.String:
|
|
||||||
return md.unifyString(data, rv)
|
|
||||||
case reflect.Bool:
|
|
||||||
return md.unifyBool(data, rv)
|
|
||||||
case reflect.Interface:
|
|
||||||
// we only support empty interfaces.
|
|
||||||
if rv.NumMethod() > 0 {
|
|
||||||
return e("Unsupported type '%s'.", rv.Kind())
|
|
||||||
}
|
|
||||||
return md.unifyAnything(data, rv)
|
|
||||||
case reflect.Float32:
|
|
||||||
fallthrough
|
|
||||||
case reflect.Float64:
|
|
||||||
return md.unifyFloat64(data, rv)
|
|
||||||
}
|
|
||||||
return e("Unsupported type '%s'.", rv.Kind())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
|
|
||||||
tmap, ok := mapping.(map[string]interface{})
|
|
||||||
if !ok {
|
|
||||||
return mismatch(rv, "map", mapping)
|
|
||||||
}
|
|
||||||
|
|
||||||
for key, datum := range tmap {
|
|
||||||
var f *field
|
|
||||||
fields := cachedTypeFields(rv.Type())
|
|
||||||
for i := range fields {
|
|
||||||
ff := &fields[i]
|
|
||||||
if ff.name == key {
|
|
||||||
f = ff
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if f == nil && strings.EqualFold(ff.name, key) {
|
|
||||||
f = ff
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if f != nil {
|
|
||||||
subv := rv
|
|
||||||
for _, i := range f.index {
|
|
||||||
subv = indirect(subv.Field(i))
|
|
||||||
}
|
|
||||||
if isUnifiable(subv) {
|
|
||||||
md.decoded[md.context.add(key).String()] = true
|
|
||||||
md.context = append(md.context, key)
|
|
||||||
if err := md.unify(datum, subv); err != nil {
|
|
||||||
return e("Type mismatch for '%s.%s': %s",
|
|
||||||
rv.Type().String(), f.name, err)
|
|
||||||
}
|
|
||||||
md.context = md.context[0 : len(md.context)-1]
|
|
||||||
} else if f.name != "" {
|
|
||||||
// Bad user! No soup for you!
|
|
||||||
return e("Field '%s.%s' is unexported, and therefore cannot "+
|
|
||||||
"be loaded with reflection.", rv.Type().String(), f.name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
|
|
||||||
tmap, ok := mapping.(map[string]interface{})
|
|
||||||
if !ok {
|
|
||||||
return badtype("map", mapping)
|
|
||||||
}
|
|
||||||
if rv.IsNil() {
|
|
||||||
rv.Set(reflect.MakeMap(rv.Type()))
|
|
||||||
}
|
|
||||||
for k, v := range tmap {
|
|
||||||
md.decoded[md.context.add(k).String()] = true
|
|
||||||
md.context = append(md.context, k)
|
|
||||||
|
|
||||||
rvkey := indirect(reflect.New(rv.Type().Key()))
|
|
||||||
rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
|
|
||||||
if err := md.unify(v, rvval); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
md.context = md.context[0 : len(md.context)-1]
|
|
||||||
|
|
||||||
rvkey.SetString(k)
|
|
||||||
rv.SetMapIndex(rvkey, rvval)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
|
|
||||||
datav := reflect.ValueOf(data)
|
|
||||||
if datav.Kind() != reflect.Slice {
|
|
||||||
return badtype("slice", data)
|
|
||||||
}
|
|
||||||
sliceLen := datav.Len()
|
|
||||||
if sliceLen != rv.Len() {
|
|
||||||
return e("expected array length %d; got TOML array of length %d",
|
|
||||||
rv.Len(), sliceLen)
|
|
||||||
}
|
|
||||||
return md.unifySliceArray(datav, rv)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
|
|
||||||
datav := reflect.ValueOf(data)
|
|
||||||
if datav.Kind() != reflect.Slice {
|
|
||||||
return badtype("slice", data)
|
|
||||||
}
|
|
||||||
sliceLen := datav.Len()
|
|
||||||
if rv.IsNil() {
|
|
||||||
rv.Set(reflect.MakeSlice(rv.Type(), sliceLen, sliceLen))
|
|
||||||
}
|
|
||||||
return md.unifySliceArray(datav, rv)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
|
|
||||||
sliceLen := data.Len()
|
|
||||||
for i := 0; i < sliceLen; i++ {
|
|
||||||
v := data.Index(i).Interface()
|
|
||||||
sliceval := indirect(rv.Index(i))
|
|
||||||
if err := md.unify(v, sliceval); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error {
|
|
||||||
if _, ok := data.(time.Time); ok {
|
|
||||||
rv.Set(reflect.ValueOf(data))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return badtype("time.Time", data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
|
|
||||||
if s, ok := data.(string); ok {
|
|
||||||
rv.SetString(s)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return badtype("string", data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
|
|
||||||
if num, ok := data.(float64); ok {
|
|
||||||
switch rv.Kind() {
|
|
||||||
case reflect.Float32:
|
|
||||||
fallthrough
|
|
||||||
case reflect.Float64:
|
|
||||||
rv.SetFloat(num)
|
|
||||||
default:
|
|
||||||
panic("bug")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return badtype("float", data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
|
|
||||||
if num, ok := data.(int64); ok {
|
|
||||||
if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
|
|
||||||
switch rv.Kind() {
|
|
||||||
case reflect.Int, reflect.Int64:
|
|
||||||
// No bounds checking necessary.
|
|
||||||
case reflect.Int8:
|
|
||||||
if num < math.MinInt8 || num > math.MaxInt8 {
|
|
||||||
return e("Value '%d' is out of range for int8.", num)
|
|
||||||
}
|
|
||||||
case reflect.Int16:
|
|
||||||
if num < math.MinInt16 || num > math.MaxInt16 {
|
|
||||||
return e("Value '%d' is out of range for int16.", num)
|
|
||||||
}
|
|
||||||
case reflect.Int32:
|
|
||||||
if num < math.MinInt32 || num > math.MaxInt32 {
|
|
||||||
return e("Value '%d' is out of range for int32.", num)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
rv.SetInt(num)
|
|
||||||
} else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
|
|
||||||
unum := uint64(num)
|
|
||||||
switch rv.Kind() {
|
|
||||||
case reflect.Uint, reflect.Uint64:
|
|
||||||
// No bounds checking necessary.
|
|
||||||
case reflect.Uint8:
|
|
||||||
if num < 0 || unum > math.MaxUint8 {
|
|
||||||
return e("Value '%d' is out of range for uint8.", num)
|
|
||||||
}
|
|
||||||
case reflect.Uint16:
|
|
||||||
if num < 0 || unum > math.MaxUint16 {
|
|
||||||
return e("Value '%d' is out of range for uint16.", num)
|
|
||||||
}
|
|
||||||
case reflect.Uint32:
|
|
||||||
if num < 0 || unum > math.MaxUint32 {
|
|
||||||
return e("Value '%d' is out of range for uint32.", num)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
rv.SetUint(unum)
|
|
||||||
} else {
|
|
||||||
panic("unreachable")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return badtype("integer", data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
|
|
||||||
if b, ok := data.(bool); ok {
|
|
||||||
rv.SetBool(b)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return badtype("boolean", data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
|
|
||||||
rv.Set(reflect.ValueOf(data))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error {
|
|
||||||
var s string
|
|
||||||
switch sdata := data.(type) {
|
|
||||||
case TextMarshaler:
|
|
||||||
text, err := sdata.MarshalText()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
s = string(text)
|
|
||||||
case fmt.Stringer:
|
|
||||||
s = sdata.String()
|
|
||||||
case string:
|
|
||||||
s = sdata
|
|
||||||
case bool:
|
|
||||||
s = fmt.Sprintf("%v", sdata)
|
|
||||||
case int64:
|
|
||||||
s = fmt.Sprintf("%d", sdata)
|
|
||||||
case float64:
|
|
||||||
s = fmt.Sprintf("%f", sdata)
|
|
||||||
default:
|
|
||||||
return badtype("primitive (string-like)", data)
|
|
||||||
}
|
|
||||||
if err := v.UnmarshalText([]byte(s)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// rvalue returns a reflect.Value of `v`. All pointers are resolved.
|
|
||||||
func rvalue(v interface{}) reflect.Value {
|
|
||||||
return indirect(reflect.ValueOf(v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// indirect returns the value pointed to by a pointer.
|
|
||||||
// Pointers are followed until the value is not a pointer.
|
|
||||||
// New values are allocated for each nil pointer.
|
|
||||||
//
|
|
||||||
// An exception to this rule is if the value satisfies an interface of
|
|
||||||
// interest to us (like encoding.TextUnmarshaler).
|
|
||||||
func indirect(v reflect.Value) reflect.Value {
|
|
||||||
if v.Kind() != reflect.Ptr {
|
|
||||||
if v.CanAddr() {
|
|
||||||
pv := v.Addr()
|
|
||||||
if _, ok := pv.Interface().(TextUnmarshaler); ok {
|
|
||||||
return pv
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
if v.IsNil() {
|
|
||||||
v.Set(reflect.New(v.Type().Elem()))
|
|
||||||
}
|
|
||||||
return indirect(reflect.Indirect(v))
|
|
||||||
}
|
|
||||||
|
|
||||||
func isUnifiable(rv reflect.Value) bool {
|
|
||||||
if rv.CanSet() {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if _, ok := rv.Interface().(TextUnmarshaler); ok {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func badtype(expected string, data interface{}) error {
|
|
||||||
return e("Expected %s but found '%T'.", expected, data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func mismatch(user reflect.Value, expected string, data interface{}) error {
|
|
||||||
return e("Type mismatch for %s. Expected %s but found '%T'.",
|
|
||||||
user.Type().String(), expected, data)
|
|
||||||
}
|
|
99
vendor/github.com/BurntSushi/toml/decode_meta.go
generated
vendored
99
vendor/github.com/BurntSushi/toml/decode_meta.go
generated
vendored
@ -1,99 +0,0 @@
|
|||||||
package toml
|
|
||||||
|
|
||||||
import "strings"
|
|
||||||
|
|
||||||
// MetaData allows access to meta information about TOML data that may not
|
|
||||||
// be inferrable via reflection. In particular, whether a key has been defined
|
|
||||||
// and the TOML type of a key.
|
|
||||||
type MetaData struct {
|
|
||||||
mapping map[string]interface{}
|
|
||||||
types map[string]tomlType
|
|
||||||
keys []Key
|
|
||||||
decoded map[string]bool
|
|
||||||
context Key // Used only during decoding.
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsDefined returns true if the key given exists in the TOML data. The key
|
|
||||||
// should be specified hierarchially. e.g.,
|
|
||||||
//
|
|
||||||
// // access the TOML key 'a.b.c'
|
|
||||||
// IsDefined("a", "b", "c")
|
|
||||||
//
|
|
||||||
// IsDefined will return false if an empty key given. Keys are case sensitive.
|
|
||||||
func (md *MetaData) IsDefined(key ...string) bool {
|
|
||||||
if len(key) == 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
var hash map[string]interface{}
|
|
||||||
var ok bool
|
|
||||||
var hashOrVal interface{} = md.mapping
|
|
||||||
for _, k := range key {
|
|
||||||
if hash, ok = hashOrVal.(map[string]interface{}); !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if hashOrVal, ok = hash[k]; !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type returns a string representation of the type of the key specified.
|
|
||||||
//
|
|
||||||
// Type will return the empty string if given an empty key or a key that
|
|
||||||
// does not exist. Keys are case sensitive.
|
|
||||||
func (md *MetaData) Type(key ...string) string {
|
|
||||||
fullkey := strings.Join(key, ".")
|
|
||||||
if typ, ok := md.types[fullkey]; ok {
|
|
||||||
return typ.typeString()
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// Key is the type of any TOML key, including key groups. Use (MetaData).Keys
|
|
||||||
// to get values of this type.
|
|
||||||
type Key []string
|
|
||||||
|
|
||||||
func (k Key) String() string {
|
|
||||||
return strings.Join(k, ".")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (k Key) add(piece string) Key {
|
|
||||||
newKey := make(Key, len(k)+1)
|
|
||||||
copy(newKey, k)
|
|
||||||
newKey[len(k)] = piece
|
|
||||||
return newKey
|
|
||||||
}
|
|
||||||
|
|
||||||
// Keys returns a slice of every key in the TOML data, including key groups.
|
|
||||||
// Each key is itself a slice, where the first element is the top of the
|
|
||||||
// hierarchy and the last is the most specific.
|
|
||||||
//
|
|
||||||
// The list will have the same order as the keys appeared in the TOML data.
|
|
||||||
//
|
|
||||||
// All keys returned are non-empty.
|
|
||||||
func (md *MetaData) Keys() []Key {
|
|
||||||
return md.keys
|
|
||||||
}
|
|
||||||
|
|
||||||
// Undecoded returns all keys that have not been decoded in the order in which
|
|
||||||
// they appear in the original TOML document.
|
|
||||||
//
|
|
||||||
// This includes keys that haven't been decoded because of a Primitive value.
|
|
||||||
// Once the Primitive value is decoded, the keys will be considered decoded.
|
|
||||||
//
|
|
||||||
// Also note that decoding into an empty interface will result in no decoding,
|
|
||||||
// and so no keys will be considered decoded.
|
|
||||||
//
|
|
||||||
// In this sense, the Undecoded keys correspond to keys in the TOML document
|
|
||||||
// that do not have a concrete type in your representation.
|
|
||||||
func (md *MetaData) Undecoded() []Key {
|
|
||||||
undecoded := make([]Key, 0, len(md.keys))
|
|
||||||
for _, key := range md.keys {
|
|
||||||
if !md.decoded[key.String()] {
|
|
||||||
undecoded = append(undecoded, key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return undecoded
|
|
||||||
}
|
|
540
vendor/github.com/BurntSushi/toml/decode_test.go
generated
vendored
540
vendor/github.com/BurntSushi/toml/decode_test.go
generated
vendored
@ -1,540 +0,0 @@
|
|||||||
package toml
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"reflect"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
log.SetFlags(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDecodeSimple(t *testing.T) {
|
|
||||||
var testSimple = `
|
|
||||||
age = 250
|
|
||||||
andrew = "gallant"
|
|
||||||
kait = "brady"
|
|
||||||
now = 1987-07-05T05:45:00Z
|
|
||||||
yesOrNo = true
|
|
||||||
pi = 3.14
|
|
||||||
colors = [
|
|
||||||
["red", "green", "blue"],
|
|
||||||
["cyan", "magenta", "yellow", "black"],
|
|
||||||
]
|
|
||||||
|
|
||||||
[My.Cats]
|
|
||||||
plato = "cat 1"
|
|
||||||
cauchy = "cat 2"
|
|
||||||
`
|
|
||||||
|
|
||||||
type cats struct {
|
|
||||||
Plato string
|
|
||||||
Cauchy string
|
|
||||||
}
|
|
||||||
type simple struct {
|
|
||||||
Age int
|
|
||||||
Colors [][]string
|
|
||||||
Pi float64
|
|
||||||
YesOrNo bool
|
|
||||||
Now time.Time
|
|
||||||
Andrew string
|
|
||||||
Kait string
|
|
||||||
My map[string]cats
|
|
||||||
}
|
|
||||||
|
|
||||||
var val simple
|
|
||||||
_, err := Decode(testSimple, &val)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
now, err := time.Parse("2006-01-02T15:04:05", "1987-07-05T05:45:00")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
var answer = simple{
|
|
||||||
Age: 250,
|
|
||||||
Andrew: "gallant",
|
|
||||||
Kait: "brady",
|
|
||||||
Now: now,
|
|
||||||
YesOrNo: true,
|
|
||||||
Pi: 3.14,
|
|
||||||
Colors: [][]string{
|
|
||||||
{"red", "green", "blue"},
|
|
||||||
{"cyan", "magenta", "yellow", "black"},
|
|
||||||
},
|
|
||||||
My: map[string]cats{
|
|
||||||
"Cats": cats{Plato: "cat 1", Cauchy: "cat 2"},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
if !reflect.DeepEqual(val, answer) {
|
|
||||||
t.Fatalf("Expected\n-----\n%#v\n-----\nbut got\n-----\n%#v\n",
|
|
||||||
answer, val)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDecodeEmbedded(t *testing.T) {
|
|
||||||
type Dog struct{ Name string }
|
|
||||||
type Age int
|
|
||||||
|
|
||||||
tests := map[string]struct {
|
|
||||||
input string
|
|
||||||
decodeInto interface{}
|
|
||||||
wantDecoded interface{}
|
|
||||||
}{
|
|
||||||
"embedded struct": {
|
|
||||||
input: `Name = "milton"`,
|
|
||||||
decodeInto: &struct{ Dog }{},
|
|
||||||
wantDecoded: &struct{ Dog }{Dog{"milton"}},
|
|
||||||
},
|
|
||||||
"embedded non-nil pointer to struct": {
|
|
||||||
input: `Name = "milton"`,
|
|
||||||
decodeInto: &struct{ *Dog }{},
|
|
||||||
wantDecoded: &struct{ *Dog }{&Dog{"milton"}},
|
|
||||||
},
|
|
||||||
"embedded nil pointer to struct": {
|
|
||||||
input: ``,
|
|
||||||
decodeInto: &struct{ *Dog }{},
|
|
||||||
wantDecoded: &struct{ *Dog }{nil},
|
|
||||||
},
|
|
||||||
"embedded int": {
|
|
||||||
input: `Age = -5`,
|
|
||||||
decodeInto: &struct{ Age }{},
|
|
||||||
wantDecoded: &struct{ Age }{-5},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for label, test := range tests {
|
|
||||||
_, err := Decode(test.input, test.decodeInto)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if !reflect.DeepEqual(test.wantDecoded, test.decodeInto) {
|
|
||||||
t.Errorf("%s: want decoded == %+v, got %+v",
|
|
||||||
label, test.wantDecoded, test.decodeInto)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestTableArrays(t *testing.T) {
|
|
||||||
var tomlTableArrays = `
|
|
||||||
[[albums]]
|
|
||||||
name = "Born to Run"
|
|
||||||
|
|
||||||
[[albums.songs]]
|
|
||||||
name = "Jungleland"
|
|
||||||
|
|
||||||
[[albums.songs]]
|
|
||||||
name = "Meeting Across the River"
|
|
||||||
|
|
||||||
[[albums]]
|
|
||||||
name = "Born in the USA"
|
|
||||||
|
|
||||||
[[albums.songs]]
|
|
||||||
name = "Glory Days"
|
|
||||||
|
|
||||||
[[albums.songs]]
|
|
||||||
name = "Dancing in the Dark"
|
|
||||||
`
|
|
||||||
|
|
||||||
type Song struct {
|
|
||||||
Name string
|
|
||||||
}
|
|
||||||
|
|
||||||
type Album struct {
|
|
||||||
Name string
|
|
||||||
Songs []Song
|
|
||||||
}
|
|
||||||
|
|
||||||
type Music struct {
|
|
||||||
Albums []Album
|
|
||||||
}
|
|
||||||
|
|
||||||
expected := Music{[]Album{
|
|
||||||
{"Born to Run", []Song{{"Jungleland"}, {"Meeting Across the River"}}},
|
|
||||||
{"Born in the USA", []Song{{"Glory Days"}, {"Dancing in the Dark"}}},
|
|
||||||
}}
|
|
||||||
var got Music
|
|
||||||
if _, err := Decode(tomlTableArrays, &got); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if !reflect.DeepEqual(expected, got) {
|
|
||||||
t.Fatalf("\n%#v\n!=\n%#v\n", expected, got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Case insensitive matching tests.
|
|
||||||
// A bit more comprehensive than needed given the current implementation,
|
|
||||||
// but implementations change.
|
|
||||||
// Probably still missing demonstrations of some ugly corner cases regarding
|
|
||||||
// case insensitive matching and multiple fields.
|
|
||||||
func TestCase(t *testing.T) {
|
|
||||||
var caseToml = `
|
|
||||||
tOpString = "string"
|
|
||||||
tOpInt = 1
|
|
||||||
tOpFloat = 1.1
|
|
||||||
tOpBool = true
|
|
||||||
tOpdate = 2006-01-02T15:04:05Z
|
|
||||||
tOparray = [ "array" ]
|
|
||||||
Match = "i should be in Match only"
|
|
||||||
MatcH = "i should be in MatcH only"
|
|
||||||
once = "just once"
|
|
||||||
[nEst.eD]
|
|
||||||
nEstedString = "another string"
|
|
||||||
`
|
|
||||||
|
|
||||||
type InsensitiveEd struct {
|
|
||||||
NestedString string
|
|
||||||
}
|
|
||||||
|
|
||||||
type InsensitiveNest struct {
|
|
||||||
Ed InsensitiveEd
|
|
||||||
}
|
|
||||||
|
|
||||||
type Insensitive struct {
|
|
||||||
TopString string
|
|
||||||
TopInt int
|
|
||||||
TopFloat float64
|
|
||||||
TopBool bool
|
|
||||||
TopDate time.Time
|
|
||||||
TopArray []string
|
|
||||||
Match string
|
|
||||||
MatcH string
|
|
||||||
Once string
|
|
||||||
OncE string
|
|
||||||
Nest InsensitiveNest
|
|
||||||
}
|
|
||||||
|
|
||||||
tme, err := time.Parse(time.RFC3339, time.RFC3339[:len(time.RFC3339)-5])
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
expected := Insensitive{
|
|
||||||
TopString: "string",
|
|
||||||
TopInt: 1,
|
|
||||||
TopFloat: 1.1,
|
|
||||||
TopBool: true,
|
|
||||||
TopDate: tme,
|
|
||||||
TopArray: []string{"array"},
|
|
||||||
MatcH: "i should be in MatcH only",
|
|
||||||
Match: "i should be in Match only",
|
|
||||||
Once: "just once",
|
|
||||||
OncE: "",
|
|
||||||
Nest: InsensitiveNest{
|
|
||||||
Ed: InsensitiveEd{NestedString: "another string"},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
var got Insensitive
|
|
||||||
if _, err := Decode(caseToml, &got); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if !reflect.DeepEqual(expected, got) {
|
|
||||||
t.Fatalf("\n%#v\n!=\n%#v\n", expected, got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPointers(t *testing.T) {
|
|
||||||
type Object struct {
|
|
||||||
Type string
|
|
||||||
Description string
|
|
||||||
}
|
|
||||||
|
|
||||||
type Dict struct {
|
|
||||||
NamedObject map[string]*Object
|
|
||||||
BaseObject *Object
|
|
||||||
Strptr *string
|
|
||||||
Strptrs []*string
|
|
||||||
}
|
|
||||||
s1, s2, s3 := "blah", "abc", "def"
|
|
||||||
expected := &Dict{
|
|
||||||
Strptr: &s1,
|
|
||||||
Strptrs: []*string{&s2, &s3},
|
|
||||||
NamedObject: map[string]*Object{
|
|
||||||
"foo": {"FOO", "fooooo!!!"},
|
|
||||||
"bar": {"BAR", "ba-ba-ba-ba-barrrr!!!"},
|
|
||||||
},
|
|
||||||
BaseObject: &Object{"BASE", "da base"},
|
|
||||||
}
|
|
||||||
|
|
||||||
ex1 := `
|
|
||||||
Strptr = "blah"
|
|
||||||
Strptrs = ["abc", "def"]
|
|
||||||
|
|
||||||
[NamedObject.foo]
|
|
||||||
Type = "FOO"
|
|
||||||
Description = "fooooo!!!"
|
|
||||||
|
|
||||||
[NamedObject.bar]
|
|
||||||
Type = "BAR"
|
|
||||||
Description = "ba-ba-ba-ba-barrrr!!!"
|
|
||||||
|
|
||||||
[BaseObject]
|
|
||||||
Type = "BASE"
|
|
||||||
Description = "da base"
|
|
||||||
`
|
|
||||||
dict := new(Dict)
|
|
||||||
_, err := Decode(ex1, dict)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Decode error: %v", err)
|
|
||||||
}
|
|
||||||
if !reflect.DeepEqual(expected, dict) {
|
|
||||||
t.Fatalf("\n%#v\n!=\n%#v\n", expected, dict)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type sphere struct {
|
|
||||||
Center [3]float64
|
|
||||||
Radius float64
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDecodeSimpleArray(t *testing.T) {
|
|
||||||
var s1 sphere
|
|
||||||
if _, err := Decode(`center = [0.0, 1.5, 0.0]`, &s1); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDecodeArrayWrongSize(t *testing.T) {
|
|
||||||
var s1 sphere
|
|
||||||
if _, err := Decode(`center = [0.1, 2.3]`, &s1); err == nil {
|
|
||||||
t.Fatal("Expected array type mismatch error")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDecodeLargeIntoSmallInt(t *testing.T) {
|
|
||||||
type table struct {
|
|
||||||
Value int8
|
|
||||||
}
|
|
||||||
var tab table
|
|
||||||
if _, err := Decode(`value = 500`, &tab); err == nil {
|
|
||||||
t.Fatal("Expected integer out-of-bounds error.")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDecodeSizedInts(t *testing.T) {
|
|
||||||
type table struct {
|
|
||||||
U8 uint8
|
|
||||||
U16 uint16
|
|
||||||
U32 uint32
|
|
||||||
U64 uint64
|
|
||||||
U uint
|
|
||||||
I8 int8
|
|
||||||
I16 int16
|
|
||||||
I32 int32
|
|
||||||
I64 int64
|
|
||||||
I int
|
|
||||||
}
|
|
||||||
answer := table{1, 1, 1, 1, 1, -1, -1, -1, -1, -1}
|
|
||||||
toml := `
|
|
||||||
u8 = 1
|
|
||||||
u16 = 1
|
|
||||||
u32 = 1
|
|
||||||
u64 = 1
|
|
||||||
u = 1
|
|
||||||
i8 = -1
|
|
||||||
i16 = -1
|
|
||||||
i32 = -1
|
|
||||||
i64 = -1
|
|
||||||
i = -1
|
|
||||||
`
|
|
||||||
var tab table
|
|
||||||
if _, err := Decode(toml, &tab); err != nil {
|
|
||||||
t.Fatal(err.Error())
|
|
||||||
}
|
|
||||||
if answer != tab {
|
|
||||||
t.Fatalf("Expected %#v but got %#v", answer, tab)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func ExampleMetaData_PrimitiveDecode() {
|
|
||||||
var md MetaData
|
|
||||||
var err error
|
|
||||||
|
|
||||||
var tomlBlob = `
|
|
||||||
ranking = ["Springsteen", "J Geils"]
|
|
||||||
|
|
||||||
[bands.Springsteen]
|
|
||||||
started = 1973
|
|
||||||
albums = ["Greetings", "WIESS", "Born to Run", "Darkness"]
|
|
||||||
|
|
||||||
[bands.J Geils]
|
|
||||||
started = 1970
|
|
||||||
albums = ["The J. Geils Band", "Full House", "Blow Your Face Out"]
|
|
||||||
`
|
|
||||||
|
|
||||||
type band struct {
|
|
||||||
Started int
|
|
||||||
Albums []string
|
|
||||||
}
|
|
||||||
type classics struct {
|
|
||||||
Ranking []string
|
|
||||||
Bands map[string]Primitive
|
|
||||||
}
|
|
||||||
|
|
||||||
// Do the initial decode. Reflection is delayed on Primitive values.
|
|
||||||
var music classics
|
|
||||||
if md, err = Decode(tomlBlob, &music); err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MetaData still includes information on Primitive values.
|
|
||||||
fmt.Printf("Is `bands.Springsteen` defined? %v\n",
|
|
||||||
md.IsDefined("bands", "Springsteen"))
|
|
||||||
|
|
||||||
// Decode primitive data into Go values.
|
|
||||||
for _, artist := range music.Ranking {
|
|
||||||
// A band is a primitive value, so we need to decode it to get a
|
|
||||||
// real `band` value.
|
|
||||||
primValue := music.Bands[artist]
|
|
||||||
|
|
||||||
var aBand band
|
|
||||||
if err = md.PrimitiveDecode(primValue, &aBand); err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
fmt.Printf("%s started in %d.\n", artist, aBand.Started)
|
|
||||||
}
|
|
||||||
// Check to see if there were any fields left undecoded.
|
|
||||||
// Note that this won't be empty before decoding the Primitive value!
|
|
||||||
fmt.Printf("Undecoded: %q\n", md.Undecoded())
|
|
||||||
|
|
||||||
// Output:
|
|
||||||
// Is `bands.Springsteen` defined? true
|
|
||||||
// Springsteen started in 1973.
|
|
||||||
// J Geils started in 1970.
|
|
||||||
// Undecoded: []
|
|
||||||
}
|
|
||||||
|
|
||||||
func ExampleDecode() {
|
|
||||||
var tomlBlob = `
|
|
||||||
# Some comments.
|
|
||||||
[alpha]
|
|
||||||
ip = "10.0.0.1"
|
|
||||||
|
|
||||||
[alpha.config]
|
|
||||||
Ports = [ 8001, 8002 ]
|
|
||||||
Location = "Toronto"
|
|
||||||
Created = 1987-07-05T05:45:00Z
|
|
||||||
|
|
||||||
[beta]
|
|
||||||
ip = "10.0.0.2"
|
|
||||||
|
|
||||||
[beta.config]
|
|
||||||
Ports = [ 9001, 9002 ]
|
|
||||||
Location = "New Jersey"
|
|
||||||
Created = 1887-01-05T05:55:00Z
|
|
||||||
`
|
|
||||||
|
|
||||||
type serverConfig struct {
|
|
||||||
Ports []int
|
|
||||||
Location string
|
|
||||||
Created time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
type server struct {
|
|
||||||
IP string `toml:"ip"`
|
|
||||||
Config serverConfig `toml:"config"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type servers map[string]server
|
|
||||||
|
|
||||||
var config servers
|
|
||||||
if _, err := Decode(tomlBlob, &config); err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, name := range []string{"alpha", "beta"} {
|
|
||||||
s := config[name]
|
|
||||||
fmt.Printf("Server: %s (ip: %s) in %s created on %s\n",
|
|
||||||
name, s.IP, s.Config.Location,
|
|
||||||
s.Config.Created.Format("2006-01-02"))
|
|
||||||
fmt.Printf("Ports: %v\n", s.Config.Ports)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Output:
|
|
||||||
// Server: alpha (ip: 10.0.0.1) in Toronto created on 1987-07-05
|
|
||||||
// Ports: [8001 8002]
|
|
||||||
// Server: beta (ip: 10.0.0.2) in New Jersey created on 1887-01-05
|
|
||||||
// Ports: [9001 9002]
|
|
||||||
}
|
|
||||||
|
|
||||||
type duration struct {
|
|
||||||
time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *duration) UnmarshalText(text []byte) error {
|
|
||||||
var err error
|
|
||||||
d.Duration, err = time.ParseDuration(string(text))
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Example Unmarshaler shows how to decode TOML strings into your own
|
|
||||||
// custom data type.
|
|
||||||
func Example_unmarshaler() {
|
|
||||||
blob := `
|
|
||||||
[[song]]
|
|
||||||
name = "Thunder Road"
|
|
||||||
duration = "4m49s"
|
|
||||||
|
|
||||||
[[song]]
|
|
||||||
name = "Stairway to Heaven"
|
|
||||||
duration = "8m03s"
|
|
||||||
`
|
|
||||||
type song struct {
|
|
||||||
Name string
|
|
||||||
Duration duration
|
|
||||||
}
|
|
||||||
type songs struct {
|
|
||||||
Song []song
|
|
||||||
}
|
|
||||||
var favorites songs
|
|
||||||
if _, err := Decode(blob, &favorites); err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Code to implement the TextUnmarshaler interface for `duration`:
|
|
||||||
//
|
|
||||||
// type duration struct {
|
|
||||||
// time.Duration
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// func (d *duration) UnmarshalText(text []byte) error {
|
|
||||||
// var err error
|
|
||||||
// d.Duration, err = time.ParseDuration(string(text))
|
|
||||||
// return err
|
|
||||||
// }
|
|
||||||
|
|
||||||
for _, s := range favorites.Song {
|
|
||||||
fmt.Printf("%s (%s)\n", s.Name, s.Duration)
|
|
||||||
}
|
|
||||||
// Output:
|
|
||||||
// Thunder Road (4m49s)
|
|
||||||
// Stairway to Heaven (8m3s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Example StrictDecoding shows how to detect whether there are keys in the
|
|
||||||
// TOML document that weren't decoded into the value given. This is useful
|
|
||||||
// for returning an error to the user if they've included extraneous fields
|
|
||||||
// in their configuration.
|
|
||||||
func Example_strictDecoding() {
|
|
||||||
var blob = `
|
|
||||||
key1 = "value1"
|
|
||||||
key2 = "value2"
|
|
||||||
key3 = "value3"
|
|
||||||
`
|
|
||||||
type config struct {
|
|
||||||
Key1 string
|
|
||||||
Key3 string
|
|
||||||
}
|
|
||||||
|
|
||||||
var conf config
|
|
||||||
md, err := Decode(blob, &conf)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
fmt.Printf("Undecoded keys: %q\n", md.Undecoded())
|
|
||||||
// Output:
|
|
||||||
// Undecoded keys: ["key2"]
|
|
||||||
}
|
|
27
vendor/github.com/BurntSushi/toml/doc.go
generated
vendored
27
vendor/github.com/BurntSushi/toml/doc.go
generated
vendored
@ -1,27 +0,0 @@
|
|||||||
/*
|
|
||||||
Package toml provides facilities for decoding and encoding TOML configuration
|
|
||||||
files via reflection. There is also support for delaying decoding with
|
|
||||||
the Primitive type, and querying the set of keys in a TOML document with the
|
|
||||||
MetaData type.
|
|
||||||
|
|
||||||
The specification implemented: https://github.com/mojombo/toml
|
|
||||||
|
|
||||||
The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify
|
|
||||||
whether a file is a valid TOML document. It can also be used to print the
|
|
||||||
type of each key in a TOML document.
|
|
||||||
|
|
||||||
Testing
|
|
||||||
|
|
||||||
There are two important types of tests used for this package. The first is
|
|
||||||
contained inside '*_test.go' files and uses the standard Go unit testing
|
|
||||||
framework. These tests are primarily devoted to holistically testing the
|
|
||||||
decoder and encoder.
|
|
||||||
|
|
||||||
The second type of testing is used to verify the implementation's adherence
|
|
||||||
to the TOML specification. These tests have been factored into their own
|
|
||||||
project: https://github.com/BurntSushi/toml-test
|
|
||||||
|
|
||||||
The reason the tests are in a separate project is so that they can be used by
|
|
||||||
any implementation of TOML. Namely, it is language agnostic.
|
|
||||||
*/
|
|
||||||
package toml
|
|
515
vendor/github.com/BurntSushi/toml/encode.go
generated
vendored
515
vendor/github.com/BurntSushi/toml/encode.go
generated
vendored
@ -1,515 +0,0 @@
|
|||||||
package toml
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"reflect"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
type tomlEncodeError struct{ error }
|
|
||||||
|
|
||||||
var (
|
|
||||||
errArrayMixedElementTypes = errors.New(
|
|
||||||
"can't encode array with mixed element types")
|
|
||||||
errArrayNilElement = errors.New(
|
|
||||||
"can't encode array with nil element")
|
|
||||||
errNonString = errors.New(
|
|
||||||
"can't encode a map with non-string key type")
|
|
||||||
errAnonNonStruct = errors.New(
|
|
||||||
"can't encode an anonymous field that is not a struct")
|
|
||||||
errArrayNoTable = errors.New(
|
|
||||||
"TOML array element can't contain a table")
|
|
||||||
errNoKey = errors.New(
|
|
||||||
"top-level values must be a Go map or struct")
|
|
||||||
errAnything = errors.New("") // used in testing
|
|
||||||
)
|
|
||||||
|
|
||||||
var quotedReplacer = strings.NewReplacer(
|
|
||||||
"\t", "\\t",
|
|
||||||
"\n", "\\n",
|
|
||||||
"\r", "\\r",
|
|
||||||
"\"", "\\\"",
|
|
||||||
"\\", "\\\\",
|
|
||||||
)
|
|
||||||
|
|
||||||
// Encoder controls the encoding of Go values to a TOML document to some
|
|
||||||
// io.Writer.
|
|
||||||
//
|
|
||||||
// The indentation level can be controlled with the Indent field.
|
|
||||||
type Encoder struct {
|
|
||||||
// A single indentation level. By default it is two spaces.
|
|
||||||
Indent string
|
|
||||||
|
|
||||||
// hasWritten is whether we have written any output to w yet.
|
|
||||||
hasWritten bool
|
|
||||||
w *bufio.Writer
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer
|
|
||||||
// given. By default, a single indentation level is 2 spaces.
|
|
||||||
func NewEncoder(w io.Writer) *Encoder {
|
|
||||||
return &Encoder{
|
|
||||||
w: bufio.NewWriter(w),
|
|
||||||
Indent: " ",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Encode writes a TOML representation of the Go value to the underlying
|
|
||||||
// io.Writer. If the value given cannot be encoded to a valid TOML document,
|
|
||||||
// then an error is returned.
|
|
||||||
//
|
|
||||||
// The mapping between Go values and TOML values should be precisely the same
|
|
||||||
// as for the Decode* functions. Similarly, the TextMarshaler interface is
|
|
||||||
// supported by encoding the resulting bytes as strings. (If you want to write
|
|
||||||
// arbitrary binary data then you will need to use something like base64 since
|
|
||||||
// TOML does not have any binary types.)
|
|
||||||
//
|
|
||||||
// When encoding TOML hashes (i.e., Go maps or structs), keys without any
|
|
||||||
// sub-hashes are encoded first.
|
|
||||||
//
|
|
||||||
// If a Go map is encoded, then its keys are sorted alphabetically for
|
|
||||||
// deterministic output. More control over this behavior may be provided if
|
|
||||||
// there is demand for it.
|
|
||||||
//
|
|
||||||
// Encoding Go values without a corresponding TOML representation---like map
|
|
||||||
// types with non-string keys---will cause an error to be returned. Similarly
|
|
||||||
// for mixed arrays/slices, arrays/slices with nil elements, embedded
|
|
||||||
// non-struct types and nested slices containing maps or structs.
|
|
||||||
// (e.g., [][]map[string]string is not allowed but []map[string]string is OK
|
|
||||||
// and so is []map[string][]string.)
|
|
||||||
func (enc *Encoder) Encode(v interface{}) error {
|
|
||||||
rv := eindirect(reflect.ValueOf(v))
|
|
||||||
if err := enc.safeEncode(Key([]string{}), rv); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return enc.w.Flush()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
|
|
||||||
defer func() {
|
|
||||||
if r := recover(); r != nil {
|
|
||||||
if terr, ok := r.(tomlEncodeError); ok {
|
|
||||||
err = terr.error
|
|
||||||
return
|
|
||||||
}
|
|
||||||
panic(r)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
enc.encode(key, rv)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (enc *Encoder) encode(key Key, rv reflect.Value) {
|
|
||||||
// Special case. Time needs to be in ISO8601 format.
|
|
||||||
// Special case. If we can marshal the type to text, then we used that.
|
|
||||||
// Basically, this prevents the encoder for handling these types as
|
|
||||||
// generic structs (or whatever the underlying type of a TextMarshaler is).
|
|
||||||
switch rv.Interface().(type) {
|
|
||||||
case time.Time, TextMarshaler:
|
|
||||||
enc.keyEqElement(key, rv)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
k := rv.Kind()
|
|
||||||
switch k {
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
|
||||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
|
|
||||||
reflect.Uint64,
|
|
||||||
reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
|
|
||||||
enc.keyEqElement(key, rv)
|
|
||||||
case reflect.Array, reflect.Slice:
|
|
||||||
if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
|
|
||||||
enc.eArrayOfTables(key, rv)
|
|
||||||
} else {
|
|
||||||
enc.keyEqElement(key, rv)
|
|
||||||
}
|
|
||||||
case reflect.Interface:
|
|
||||||
if rv.IsNil() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
enc.encode(key, rv.Elem())
|
|
||||||
case reflect.Map:
|
|
||||||
if rv.IsNil() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
enc.eTable(key, rv)
|
|
||||||
case reflect.Ptr:
|
|
||||||
if rv.IsNil() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
enc.encode(key, rv.Elem())
|
|
||||||
case reflect.Struct:
|
|
||||||
enc.eTable(key, rv)
|
|
||||||
default:
|
|
||||||
panic(e("Unsupported type for key '%s': %s", key, k))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// eElement encodes any value that can be an array element (primitives and
|
|
||||||
// arrays).
|
|
||||||
func (enc *Encoder) eElement(rv reflect.Value) {
|
|
||||||
switch v := rv.Interface().(type) {
|
|
||||||
case time.Time:
|
|
||||||
// Special case time.Time as a primitive. Has to come before
|
|
||||||
// TextMarshaler below because time.Time implements
|
|
||||||
// encoding.TextMarshaler, but we need to always use UTC.
|
|
||||||
enc.wf(v.In(time.FixedZone("UTC", 0)).Format("2006-01-02T15:04:05Z"))
|
|
||||||
return
|
|
||||||
case TextMarshaler:
|
|
||||||
// Special case. Use text marshaler if it's available for this value.
|
|
||||||
if s, err := v.MarshalText(); err != nil {
|
|
||||||
encPanic(err)
|
|
||||||
} else {
|
|
||||||
enc.writeQuoted(string(s))
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch rv.Kind() {
|
|
||||||
case reflect.Bool:
|
|
||||||
enc.wf(strconv.FormatBool(rv.Bool()))
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
|
||||||
enc.wf(strconv.FormatInt(rv.Int(), 10))
|
|
||||||
case reflect.Uint, reflect.Uint8, reflect.Uint16,
|
|
||||||
reflect.Uint32, reflect.Uint64:
|
|
||||||
enc.wf(strconv.FormatUint(rv.Uint(), 10))
|
|
||||||
case reflect.Float32:
|
|
||||||
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32)))
|
|
||||||
case reflect.Float64:
|
|
||||||
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64)))
|
|
||||||
case reflect.Array, reflect.Slice:
|
|
||||||
enc.eArrayOrSliceElement(rv)
|
|
||||||
case reflect.Interface:
|
|
||||||
enc.eElement(rv.Elem())
|
|
||||||
case reflect.String:
|
|
||||||
enc.writeQuoted(rv.String())
|
|
||||||
default:
|
|
||||||
panic(e("Unexpected primitive type: %s", rv.Kind()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// By the TOML spec, all floats must have a decimal with at least one
|
|
||||||
// number on either side.
|
|
||||||
func floatAddDecimal(fstr string) string {
|
|
||||||
if !strings.Contains(fstr, ".") {
|
|
||||||
return fstr + ".0"
|
|
||||||
}
|
|
||||||
return fstr
|
|
||||||
}
|
|
||||||
|
|
||||||
func (enc *Encoder) writeQuoted(s string) {
|
|
||||||
enc.wf("\"%s\"", quotedReplacer.Replace(s))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
|
|
||||||
length := rv.Len()
|
|
||||||
enc.wf("[")
|
|
||||||
for i := 0; i < length; i++ {
|
|
||||||
elem := rv.Index(i)
|
|
||||||
enc.eElement(elem)
|
|
||||||
if i != length-1 {
|
|
||||||
enc.wf(", ")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
enc.wf("]")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
|
|
||||||
if len(key) == 0 {
|
|
||||||
encPanic(errNoKey)
|
|
||||||
}
|
|
||||||
panicIfInvalidKey(key, true)
|
|
||||||
for i := 0; i < rv.Len(); i++ {
|
|
||||||
trv := rv.Index(i)
|
|
||||||
if isNil(trv) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
enc.newline()
|
|
||||||
enc.wf("%s[[%s]]", enc.indentStr(key), key.String())
|
|
||||||
enc.newline()
|
|
||||||
enc.eMapOrStruct(key, trv)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (enc *Encoder) eTable(key Key, rv reflect.Value) {
|
|
||||||
if len(key) == 1 {
|
|
||||||
// Output an extra new line between top-level tables.
|
|
||||||
// (The newline isn't written if nothing else has been written though.)
|
|
||||||
enc.newline()
|
|
||||||
}
|
|
||||||
if len(key) > 0 {
|
|
||||||
panicIfInvalidKey(key, true)
|
|
||||||
enc.wf("%s[%s]", enc.indentStr(key), key.String())
|
|
||||||
enc.newline()
|
|
||||||
}
|
|
||||||
enc.eMapOrStruct(key, rv)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) {
|
|
||||||
switch rv := eindirect(rv); rv.Kind() {
|
|
||||||
case reflect.Map:
|
|
||||||
enc.eMap(key, rv)
|
|
||||||
case reflect.Struct:
|
|
||||||
enc.eStruct(key, rv)
|
|
||||||
default:
|
|
||||||
panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (enc *Encoder) eMap(key Key, rv reflect.Value) {
|
|
||||||
rt := rv.Type()
|
|
||||||
if rt.Key().Kind() != reflect.String {
|
|
||||||
encPanic(errNonString)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sort keys so that we have deterministic output. And write keys directly
|
|
||||||
// underneath this key first, before writing sub-structs or sub-maps.
|
|
||||||
var mapKeysDirect, mapKeysSub []string
|
|
||||||
for _, mapKey := range rv.MapKeys() {
|
|
||||||
k := mapKey.String()
|
|
||||||
if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) {
|
|
||||||
mapKeysSub = append(mapKeysSub, k)
|
|
||||||
} else {
|
|
||||||
mapKeysDirect = append(mapKeysDirect, k)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var writeMapKeys = func(mapKeys []string) {
|
|
||||||
sort.Strings(mapKeys)
|
|
||||||
for _, mapKey := range mapKeys {
|
|
||||||
mrv := rv.MapIndex(reflect.ValueOf(mapKey))
|
|
||||||
if isNil(mrv) {
|
|
||||||
// Don't write anything for nil fields.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
enc.encode(key.add(mapKey), mrv)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
writeMapKeys(mapKeysDirect)
|
|
||||||
writeMapKeys(mapKeysSub)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
|
|
||||||
// Write keys for fields directly under this key first, because if we write
|
|
||||||
// a field that creates a new table, then all keys under it will be in that
|
|
||||||
// table (not the one we're writing here).
|
|
||||||
rt := rv.Type()
|
|
||||||
var fieldsDirect, fieldsSub [][]int
|
|
||||||
var addFields func(rt reflect.Type, rv reflect.Value, start []int)
|
|
||||||
addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
|
|
||||||
for i := 0; i < rt.NumField(); i++ {
|
|
||||||
f := rt.Field(i)
|
|
||||||
// skip unexporded fields
|
|
||||||
if f.PkgPath != "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
frv := rv.Field(i)
|
|
||||||
if f.Anonymous {
|
|
||||||
frv := eindirect(frv)
|
|
||||||
t := frv.Type()
|
|
||||||
if t.Kind() != reflect.Struct {
|
|
||||||
encPanic(errAnonNonStruct)
|
|
||||||
}
|
|
||||||
addFields(t, frv, f.Index)
|
|
||||||
} else if typeIsHash(tomlTypeOfGo(frv)) {
|
|
||||||
fieldsSub = append(fieldsSub, append(start, f.Index...))
|
|
||||||
} else {
|
|
||||||
fieldsDirect = append(fieldsDirect, append(start, f.Index...))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
addFields(rt, rv, nil)
|
|
||||||
|
|
||||||
var writeFields = func(fields [][]int) {
|
|
||||||
for _, fieldIndex := range fields {
|
|
||||||
sft := rt.FieldByIndex(fieldIndex)
|
|
||||||
sf := rv.FieldByIndex(fieldIndex)
|
|
||||||
if isNil(sf) {
|
|
||||||
// Don't write anything for nil fields.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
keyName := sft.Tag.Get("toml")
|
|
||||||
if keyName == "-" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if keyName == "" {
|
|
||||||
keyName = sft.Name
|
|
||||||
}
|
|
||||||
enc.encode(key.add(keyName), sf)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
writeFields(fieldsDirect)
|
|
||||||
writeFields(fieldsSub)
|
|
||||||
}
|
|
||||||
|
|
||||||
// tomlTypeName returns the TOML type name of the Go value's type. It is used to
|
|
||||||
// determine whether the types of array elements are mixed (which is forbidden).
|
|
||||||
// If the Go value is nil, then it is illegal for it to be an array element, and
|
|
||||||
// valueIsNil is returned as true.
|
|
||||||
|
|
||||||
// Returns the TOML type of a Go value. The type may be `nil`, which means
|
|
||||||
// no concrete TOML type could be found.
|
|
||||||
func tomlTypeOfGo(rv reflect.Value) tomlType {
|
|
||||||
if isNil(rv) || !rv.IsValid() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
switch rv.Kind() {
|
|
||||||
case reflect.Bool:
|
|
||||||
return tomlBool
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
|
||||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
|
|
||||||
reflect.Uint64:
|
|
||||||
return tomlInteger
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
return tomlFloat
|
|
||||||
case reflect.Array, reflect.Slice:
|
|
||||||
if typeEqual(tomlHash, tomlArrayType(rv)) {
|
|
||||||
return tomlArrayHash
|
|
||||||
} else {
|
|
||||||
return tomlArray
|
|
||||||
}
|
|
||||||
case reflect.Ptr, reflect.Interface:
|
|
||||||
return tomlTypeOfGo(rv.Elem())
|
|
||||||
case reflect.String:
|
|
||||||
return tomlString
|
|
||||||
case reflect.Map:
|
|
||||||
return tomlHash
|
|
||||||
case reflect.Struct:
|
|
||||||
switch rv.Interface().(type) {
|
|
||||||
case time.Time:
|
|
||||||
return tomlDatetime
|
|
||||||
case TextMarshaler:
|
|
||||||
return tomlString
|
|
||||||
default:
|
|
||||||
return tomlHash
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
panic("unexpected reflect.Kind: " + rv.Kind().String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// tomlArrayType returns the element type of a TOML array. The type returned
|
|
||||||
// may be nil if it cannot be determined (e.g., a nil slice or a zero length
|
|
||||||
// slize). This function may also panic if it finds a type that cannot be
|
|
||||||
// expressed in TOML (such as nil elements, heterogeneous arrays or directly
|
|
||||||
// nested arrays of tables).
|
|
||||||
func tomlArrayType(rv reflect.Value) tomlType {
|
|
||||||
if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
firstType := tomlTypeOfGo(rv.Index(0))
|
|
||||||
if firstType == nil {
|
|
||||||
encPanic(errArrayNilElement)
|
|
||||||
}
|
|
||||||
|
|
||||||
rvlen := rv.Len()
|
|
||||||
for i := 1; i < rvlen; i++ {
|
|
||||||
elem := rv.Index(i)
|
|
||||||
switch elemType := tomlTypeOfGo(elem); {
|
|
||||||
case elemType == nil:
|
|
||||||
encPanic(errArrayNilElement)
|
|
||||||
case !typeEqual(firstType, elemType):
|
|
||||||
encPanic(errArrayMixedElementTypes)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// If we have a nested array, then we must make sure that the nested
|
|
||||||
// array contains ONLY primitives.
|
|
||||||
// This checks arbitrarily nested arrays.
|
|
||||||
if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) {
|
|
||||||
nest := tomlArrayType(eindirect(rv.Index(0)))
|
|
||||||
if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) {
|
|
||||||
encPanic(errArrayNoTable)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return firstType
|
|
||||||
}
|
|
||||||
|
|
||||||
func (enc *Encoder) newline() {
|
|
||||||
if enc.hasWritten {
|
|
||||||
enc.wf("\n")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (enc *Encoder) keyEqElement(key Key, val reflect.Value) {
|
|
||||||
if len(key) == 0 {
|
|
||||||
encPanic(errNoKey)
|
|
||||||
}
|
|
||||||
panicIfInvalidKey(key, false)
|
|
||||||
enc.wf("%s%s = ", enc.indentStr(key), key[len(key)-1])
|
|
||||||
enc.eElement(val)
|
|
||||||
enc.newline()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (enc *Encoder) wf(format string, v ...interface{}) {
|
|
||||||
if _, err := fmt.Fprintf(enc.w, format, v...); err != nil {
|
|
||||||
encPanic(err)
|
|
||||||
}
|
|
||||||
enc.hasWritten = true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (enc *Encoder) indentStr(key Key) string {
|
|
||||||
return strings.Repeat(enc.Indent, len(key)-1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func encPanic(err error) {
|
|
||||||
panic(tomlEncodeError{err})
|
|
||||||
}
|
|
||||||
|
|
||||||
func eindirect(v reflect.Value) reflect.Value {
|
|
||||||
switch v.Kind() {
|
|
||||||
case reflect.Ptr, reflect.Interface:
|
|
||||||
return eindirect(v.Elem())
|
|
||||||
default:
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func isNil(rv reflect.Value) bool {
|
|
||||||
switch rv.Kind() {
|
|
||||||
case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
|
||||||
return rv.IsNil()
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func panicIfInvalidKey(key Key, hash bool) {
|
|
||||||
if hash {
|
|
||||||
for _, k := range key {
|
|
||||||
if !isValidTableName(k) {
|
|
||||||
encPanic(e("Key '%s' is not a valid table name. Table names "+
|
|
||||||
"cannot contain '[', ']' or '.'.", key.String()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if !isValidKeyName(key[len(key)-1]) {
|
|
||||||
encPanic(e("Key '%s' is not a name. Key names "+
|
|
||||||
"cannot contain whitespace.", key.String()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func isValidTableName(s string) bool {
|
|
||||||
if len(s) == 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for _, r := range s {
|
|
||||||
if r == '[' || r == ']' || r == '.' {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func isValidKeyName(s string) bool {
|
|
||||||
if len(s) == 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
506
vendor/github.com/BurntSushi/toml/encode_test.go
generated
vendored
506
vendor/github.com/BurntSushi/toml/encode_test.go
generated
vendored
@ -1,506 +0,0 @@
|
|||||||
package toml
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"net"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestEncodeRoundTrip(t *testing.T) {
|
|
||||||
type Config struct {
|
|
||||||
Age int
|
|
||||||
Cats []string
|
|
||||||
Pi float64
|
|
||||||
Perfection []int
|
|
||||||
DOB time.Time
|
|
||||||
Ipaddress net.IP
|
|
||||||
}
|
|
||||||
|
|
||||||
var inputs = Config{
|
|
||||||
13,
|
|
||||||
[]string{"one", "two", "three"},
|
|
||||||
3.145,
|
|
||||||
[]int{11, 2, 3, 4},
|
|
||||||
time.Now(),
|
|
||||||
net.ParseIP("192.168.59.254"),
|
|
||||||
}
|
|
||||||
|
|
||||||
var firstBuffer bytes.Buffer
|
|
||||||
e := NewEncoder(&firstBuffer)
|
|
||||||
err := e.Encode(inputs)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
var outputs Config
|
|
||||||
if _, err := Decode(firstBuffer.String(), &outputs); err != nil {
|
|
||||||
log.Printf("Could not decode:\n-----\n%s\n-----\n",
|
|
||||||
firstBuffer.String())
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// could test each value individually, but I'm lazy
|
|
||||||
var secondBuffer bytes.Buffer
|
|
||||||
e2 := NewEncoder(&secondBuffer)
|
|
||||||
err = e2.Encode(outputs)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if firstBuffer.String() != secondBuffer.String() {
|
|
||||||
t.Error(
|
|
||||||
firstBuffer.String(),
|
|
||||||
"\n\n is not identical to\n\n",
|
|
||||||
secondBuffer.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// XXX(burntsushi)
|
|
||||||
// I think these tests probably should be removed. They are good, but they
|
|
||||||
// ought to be obsolete by toml-test.
|
|
||||||
func TestEncode(t *testing.T) {
|
|
||||||
type Embedded struct {
|
|
||||||
Int int `toml:"_int"`
|
|
||||||
}
|
|
||||||
type NonStruct int
|
|
||||||
|
|
||||||
date := time.Date(2014, 5, 11, 20, 30, 40, 0, time.FixedZone("IST", 3600))
|
|
||||||
dateStr := "2014-05-11T19:30:40Z"
|
|
||||||
|
|
||||||
tests := map[string]struct {
|
|
||||||
input interface{}
|
|
||||||
wantOutput string
|
|
||||||
wantError error
|
|
||||||
}{
|
|
||||||
"bool field": {
|
|
||||||
input: struct {
|
|
||||||
BoolTrue bool
|
|
||||||
BoolFalse bool
|
|
||||||
}{true, false},
|
|
||||||
wantOutput: "BoolTrue = true\nBoolFalse = false\n",
|
|
||||||
},
|
|
||||||
"int fields": {
|
|
||||||
input: struct {
|
|
||||||
Int int
|
|
||||||
Int8 int8
|
|
||||||
Int16 int16
|
|
||||||
Int32 int32
|
|
||||||
Int64 int64
|
|
||||||
}{1, 2, 3, 4, 5},
|
|
||||||
wantOutput: "Int = 1\nInt8 = 2\nInt16 = 3\nInt32 = 4\nInt64 = 5\n",
|
|
||||||
},
|
|
||||||
"uint fields": {
|
|
||||||
input: struct {
|
|
||||||
Uint uint
|
|
||||||
Uint8 uint8
|
|
||||||
Uint16 uint16
|
|
||||||
Uint32 uint32
|
|
||||||
Uint64 uint64
|
|
||||||
}{1, 2, 3, 4, 5},
|
|
||||||
wantOutput: "Uint = 1\nUint8 = 2\nUint16 = 3\nUint32 = 4" +
|
|
||||||
"\nUint64 = 5\n",
|
|
||||||
},
|
|
||||||
"float fields": {
|
|
||||||
input: struct {
|
|
||||||
Float32 float32
|
|
||||||
Float64 float64
|
|
||||||
}{1.5, 2.5},
|
|
||||||
wantOutput: "Float32 = 1.5\nFloat64 = 2.5\n",
|
|
||||||
},
|
|
||||||
"string field": {
|
|
||||||
input: struct{ String string }{"foo"},
|
|
||||||
wantOutput: "String = \"foo\"\n",
|
|
||||||
},
|
|
||||||
"string field and unexported field": {
|
|
||||||
input: struct {
|
|
||||||
String string
|
|
||||||
unexported int
|
|
||||||
}{"foo", 0},
|
|
||||||
wantOutput: "String = \"foo\"\n",
|
|
||||||
},
|
|
||||||
"datetime field in UTC": {
|
|
||||||
input: struct{ Date time.Time }{date},
|
|
||||||
wantOutput: fmt.Sprintf("Date = %s\n", dateStr),
|
|
||||||
},
|
|
||||||
"datetime field as primitive": {
|
|
||||||
// Using a map here to fail if isStructOrMap() returns true for
|
|
||||||
// time.Time.
|
|
||||||
input: map[string]interface{}{
|
|
||||||
"Date": date,
|
|
||||||
"Int": 1,
|
|
||||||
},
|
|
||||||
wantOutput: fmt.Sprintf("Date = %s\nInt = 1\n", dateStr),
|
|
||||||
},
|
|
||||||
"array fields": {
|
|
||||||
input: struct {
|
|
||||||
IntArray0 [0]int
|
|
||||||
IntArray3 [3]int
|
|
||||||
}{[0]int{}, [3]int{1, 2, 3}},
|
|
||||||
wantOutput: "IntArray0 = []\nIntArray3 = [1, 2, 3]\n",
|
|
||||||
},
|
|
||||||
"slice fields": {
|
|
||||||
input: struct{ IntSliceNil, IntSlice0, IntSlice3 []int }{
|
|
||||||
nil, []int{}, []int{1, 2, 3},
|
|
||||||
},
|
|
||||||
wantOutput: "IntSlice0 = []\nIntSlice3 = [1, 2, 3]\n",
|
|
||||||
},
|
|
||||||
"datetime slices": {
|
|
||||||
input: struct{ DatetimeSlice []time.Time }{
|
|
||||||
[]time.Time{date, date},
|
|
||||||
},
|
|
||||||
wantOutput: fmt.Sprintf("DatetimeSlice = [%s, %s]\n",
|
|
||||||
dateStr, dateStr),
|
|
||||||
},
|
|
||||||
"nested arrays and slices": {
|
|
||||||
input: struct {
|
|
||||||
SliceOfArrays [][2]int
|
|
||||||
ArrayOfSlices [2][]int
|
|
||||||
SliceOfArraysOfSlices [][2][]int
|
|
||||||
ArrayOfSlicesOfArrays [2][][2]int
|
|
||||||
SliceOfMixedArrays [][2]interface{}
|
|
||||||
ArrayOfMixedSlices [2][]interface{}
|
|
||||||
}{
|
|
||||||
[][2]int{{1, 2}, {3, 4}},
|
|
||||||
[2][]int{{1, 2}, {3, 4}},
|
|
||||||
[][2][]int{
|
|
||||||
{
|
|
||||||
{1, 2}, {3, 4},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
{5, 6}, {7, 8},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
[2][][2]int{
|
|
||||||
{
|
|
||||||
{1, 2}, {3, 4},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
{5, 6}, {7, 8},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
[][2]interface{}{
|
|
||||||
{1, 2}, {"a", "b"},
|
|
||||||
},
|
|
||||||
[2][]interface{}{
|
|
||||||
{1, 2}, {"a", "b"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
wantOutput: `SliceOfArrays = [[1, 2], [3, 4]]
|
|
||||||
ArrayOfSlices = [[1, 2], [3, 4]]
|
|
||||||
SliceOfArraysOfSlices = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
|
|
||||||
ArrayOfSlicesOfArrays = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
|
|
||||||
SliceOfMixedArrays = [[1, 2], ["a", "b"]]
|
|
||||||
ArrayOfMixedSlices = [[1, 2], ["a", "b"]]
|
|
||||||
`,
|
|
||||||
},
|
|
||||||
"empty slice": {
|
|
||||||
input: struct{ Empty []interface{} }{[]interface{}{}},
|
|
||||||
wantOutput: "Empty = []\n",
|
|
||||||
},
|
|
||||||
"(error) slice with element type mismatch (string and integer)": {
|
|
||||||
input: struct{ Mixed []interface{} }{[]interface{}{1, "a"}},
|
|
||||||
wantError: errArrayMixedElementTypes,
|
|
||||||
},
|
|
||||||
"(error) slice with element type mismatch (integer and float)": {
|
|
||||||
input: struct{ Mixed []interface{} }{[]interface{}{1, 2.5}},
|
|
||||||
wantError: errArrayMixedElementTypes,
|
|
||||||
},
|
|
||||||
"slice with elems of differing Go types, same TOML types": {
|
|
||||||
input: struct {
|
|
||||||
MixedInts []interface{}
|
|
||||||
MixedFloats []interface{}
|
|
||||||
}{
|
|
||||||
[]interface{}{
|
|
||||||
int(1), int8(2), int16(3), int32(4), int64(5),
|
|
||||||
uint(1), uint8(2), uint16(3), uint32(4), uint64(5),
|
|
||||||
},
|
|
||||||
[]interface{}{float32(1.5), float64(2.5)},
|
|
||||||
},
|
|
||||||
wantOutput: "MixedInts = [1, 2, 3, 4, 5, 1, 2, 3, 4, 5]\n" +
|
|
||||||
"MixedFloats = [1.5, 2.5]\n",
|
|
||||||
},
|
|
||||||
"(error) slice w/ element type mismatch (one is nested array)": {
|
|
||||||
input: struct{ Mixed []interface{} }{
|
|
||||||
[]interface{}{1, []interface{}{2}},
|
|
||||||
},
|
|
||||||
wantError: errArrayMixedElementTypes,
|
|
||||||
},
|
|
||||||
"(error) slice with 1 nil element": {
|
|
||||||
input: struct{ NilElement1 []interface{} }{[]interface{}{nil}},
|
|
||||||
wantError: errArrayNilElement,
|
|
||||||
},
|
|
||||||
"(error) slice with 1 nil element (and other non-nil elements)": {
|
|
||||||
input: struct{ NilElement []interface{} }{
|
|
||||||
[]interface{}{1, nil},
|
|
||||||
},
|
|
||||||
wantError: errArrayNilElement,
|
|
||||||
},
|
|
||||||
"simple map": {
|
|
||||||
input: map[string]int{"a": 1, "b": 2},
|
|
||||||
wantOutput: "a = 1\nb = 2\n",
|
|
||||||
},
|
|
||||||
"map with interface{} value type": {
|
|
||||||
input: map[string]interface{}{"a": 1, "b": "c"},
|
|
||||||
wantOutput: "a = 1\nb = \"c\"\n",
|
|
||||||
},
|
|
||||||
"map with interface{} value type, some of which are structs": {
|
|
||||||
input: map[string]interface{}{
|
|
||||||
"a": struct{ Int int }{2},
|
|
||||||
"b": 1,
|
|
||||||
},
|
|
||||||
wantOutput: "b = 1\n\n[a]\n Int = 2\n",
|
|
||||||
},
|
|
||||||
"nested map": {
|
|
||||||
input: map[string]map[string]int{
|
|
||||||
"a": {"b": 1},
|
|
||||||
"c": {"d": 2},
|
|
||||||
},
|
|
||||||
wantOutput: "[a]\n b = 1\n\n[c]\n d = 2\n",
|
|
||||||
},
|
|
||||||
"nested struct": {
|
|
||||||
input: struct{ Struct struct{ Int int } }{
|
|
||||||
struct{ Int int }{1},
|
|
||||||
},
|
|
||||||
wantOutput: "[Struct]\n Int = 1\n",
|
|
||||||
},
|
|
||||||
"nested struct and non-struct field": {
|
|
||||||
input: struct {
|
|
||||||
Struct struct{ Int int }
|
|
||||||
Bool bool
|
|
||||||
}{struct{ Int int }{1}, true},
|
|
||||||
wantOutput: "Bool = true\n\n[Struct]\n Int = 1\n",
|
|
||||||
},
|
|
||||||
"2 nested structs": {
|
|
||||||
input: struct{ Struct1, Struct2 struct{ Int int } }{
|
|
||||||
struct{ Int int }{1}, struct{ Int int }{2},
|
|
||||||
},
|
|
||||||
wantOutput: "[Struct1]\n Int = 1\n\n[Struct2]\n Int = 2\n",
|
|
||||||
},
|
|
||||||
"deeply nested structs": {
|
|
||||||
input: struct {
|
|
||||||
Struct1, Struct2 struct{ Struct3 *struct{ Int int } }
|
|
||||||
}{
|
|
||||||
struct{ Struct3 *struct{ Int int } }{&struct{ Int int }{1}},
|
|
||||||
struct{ Struct3 *struct{ Int int } }{nil},
|
|
||||||
},
|
|
||||||
wantOutput: "[Struct1]\n [Struct1.Struct3]\n Int = 1" +
|
|
||||||
"\n\n[Struct2]\n",
|
|
||||||
},
|
|
||||||
"nested struct with nil struct elem": {
|
|
||||||
input: struct {
|
|
||||||
Struct struct{ Inner *struct{ Int int } }
|
|
||||||
}{
|
|
||||||
struct{ Inner *struct{ Int int } }{nil},
|
|
||||||
},
|
|
||||||
wantOutput: "[Struct]\n",
|
|
||||||
},
|
|
||||||
"nested struct with no fields": {
|
|
||||||
input: struct {
|
|
||||||
Struct struct{ Inner struct{} }
|
|
||||||
}{
|
|
||||||
struct{ Inner struct{} }{struct{}{}},
|
|
||||||
},
|
|
||||||
wantOutput: "[Struct]\n [Struct.Inner]\n",
|
|
||||||
},
|
|
||||||
"struct with tags": {
|
|
||||||
input: struct {
|
|
||||||
Struct struct {
|
|
||||||
Int int `toml:"_int"`
|
|
||||||
} `toml:"_struct"`
|
|
||||||
Bool bool `toml:"_bool"`
|
|
||||||
}{
|
|
||||||
struct {
|
|
||||||
Int int `toml:"_int"`
|
|
||||||
}{1}, true,
|
|
||||||
},
|
|
||||||
wantOutput: "_bool = true\n\n[_struct]\n _int = 1\n",
|
|
||||||
},
|
|
||||||
"embedded struct": {
|
|
||||||
input: struct{ Embedded }{Embedded{1}},
|
|
||||||
wantOutput: "_int = 1\n",
|
|
||||||
},
|
|
||||||
"embedded *struct": {
|
|
||||||
input: struct{ *Embedded }{&Embedded{1}},
|
|
||||||
wantOutput: "_int = 1\n",
|
|
||||||
},
|
|
||||||
"nested embedded struct": {
|
|
||||||
input: struct {
|
|
||||||
Struct struct{ Embedded } `toml:"_struct"`
|
|
||||||
}{struct{ Embedded }{Embedded{1}}},
|
|
||||||
wantOutput: "[_struct]\n _int = 1\n",
|
|
||||||
},
|
|
||||||
"nested embedded *struct": {
|
|
||||||
input: struct {
|
|
||||||
Struct struct{ *Embedded } `toml:"_struct"`
|
|
||||||
}{struct{ *Embedded }{&Embedded{1}}},
|
|
||||||
wantOutput: "[_struct]\n _int = 1\n",
|
|
||||||
},
|
|
||||||
"array of tables": {
|
|
||||||
input: struct {
|
|
||||||
Structs []*struct{ Int int } `toml:"struct"`
|
|
||||||
}{
|
|
||||||
[]*struct{ Int int }{{1}, {3}},
|
|
||||||
},
|
|
||||||
wantOutput: "[[struct]]\n Int = 1\n\n[[struct]]\n Int = 3\n",
|
|
||||||
},
|
|
||||||
"array of tables order": {
|
|
||||||
input: map[string]interface{}{
|
|
||||||
"map": map[string]interface{}{
|
|
||||||
"zero": 5,
|
|
||||||
"arr": []map[string]int{
|
|
||||||
map[string]int{
|
|
||||||
"friend": 5,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
wantOutput: "[map]\n zero = 5\n\n [[map.arr]]\n friend = 5\n",
|
|
||||||
},
|
|
||||||
"(error) top-level slice": {
|
|
||||||
input: []struct{ Int int }{{1}, {2}, {3}},
|
|
||||||
wantError: errNoKey,
|
|
||||||
},
|
|
||||||
"(error) slice of slice": {
|
|
||||||
input: struct {
|
|
||||||
Slices [][]struct{ Int int }
|
|
||||||
}{
|
|
||||||
[][]struct{ Int int }{{{1}}, {{2}}, {{3}}},
|
|
||||||
},
|
|
||||||
wantError: errArrayNoTable,
|
|
||||||
},
|
|
||||||
"(error) map no string key": {
|
|
||||||
input: map[int]string{1: ""},
|
|
||||||
wantError: errNonString,
|
|
||||||
},
|
|
||||||
"(error) anonymous non-struct": {
|
|
||||||
input: struct{ NonStruct }{5},
|
|
||||||
wantError: errAnonNonStruct,
|
|
||||||
},
|
|
||||||
"(error) empty key name": {
|
|
||||||
input: map[string]int{"": 1},
|
|
||||||
wantError: errAnything,
|
|
||||||
},
|
|
||||||
"(error) empty map name": {
|
|
||||||
input: map[string]interface{}{
|
|
||||||
"": map[string]int{"v": 1},
|
|
||||||
},
|
|
||||||
wantError: errAnything,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for label, test := range tests {
|
|
||||||
encodeExpected(t, label, test.input, test.wantOutput, test.wantError)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestEncodeNestedTableArrays(t *testing.T) {
|
|
||||||
type song struct {
|
|
||||||
Name string `toml:"name"`
|
|
||||||
}
|
|
||||||
type album struct {
|
|
||||||
Name string `toml:"name"`
|
|
||||||
Songs []song `toml:"songs"`
|
|
||||||
}
|
|
||||||
type springsteen struct {
|
|
||||||
Albums []album `toml:"albums"`
|
|
||||||
}
|
|
||||||
value := springsteen{
|
|
||||||
[]album{
|
|
||||||
{"Born to Run",
|
|
||||||
[]song{{"Jungleland"}, {"Meeting Across the River"}}},
|
|
||||||
{"Born in the USA",
|
|
||||||
[]song{{"Glory Days"}, {"Dancing in the Dark"}}},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
expected := `[[albums]]
|
|
||||||
name = "Born to Run"
|
|
||||||
|
|
||||||
[[albums.songs]]
|
|
||||||
name = "Jungleland"
|
|
||||||
|
|
||||||
[[albums.songs]]
|
|
||||||
name = "Meeting Across the River"
|
|
||||||
|
|
||||||
[[albums]]
|
|
||||||
name = "Born in the USA"
|
|
||||||
|
|
||||||
[[albums.songs]]
|
|
||||||
name = "Glory Days"
|
|
||||||
|
|
||||||
[[albums.songs]]
|
|
||||||
name = "Dancing in the Dark"
|
|
||||||
`
|
|
||||||
encodeExpected(t, "nested table arrays", value, expected, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestEncodeArrayHashWithNormalHashOrder(t *testing.T) {
|
|
||||||
type Alpha struct {
|
|
||||||
V int
|
|
||||||
}
|
|
||||||
type Beta struct {
|
|
||||||
V int
|
|
||||||
}
|
|
||||||
type Conf struct {
|
|
||||||
V int
|
|
||||||
A Alpha
|
|
||||||
B []Beta
|
|
||||||
}
|
|
||||||
|
|
||||||
val := Conf{
|
|
||||||
V: 1,
|
|
||||||
A: Alpha{2},
|
|
||||||
B: []Beta{{3}},
|
|
||||||
}
|
|
||||||
expected := "V = 1\n\n[A]\n V = 2\n\n[[B]]\n V = 3\n"
|
|
||||||
encodeExpected(t, "array hash with normal hash order", val, expected, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func encodeExpected(
|
|
||||||
t *testing.T, label string, val interface{}, wantStr string, wantErr error,
|
|
||||||
) {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
enc := NewEncoder(&buf)
|
|
||||||
err := enc.Encode(val)
|
|
||||||
if err != wantErr {
|
|
||||||
if wantErr != nil {
|
|
||||||
if wantErr == errAnything && err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t.Errorf("%s: want Encode error %v, got %v", label, wantErr, err)
|
|
||||||
} else {
|
|
||||||
t.Errorf("%s: Encode failed: %s", label, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if got := buf.String(); wantStr != got {
|
|
||||||
t.Errorf("%s: want\n-----\n%q\n-----\nbut got\n-----\n%q\n-----\n",
|
|
||||||
label, wantStr, got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func ExampleEncoder_Encode() {
|
|
||||||
date, _ := time.Parse(time.RFC822, "14 Mar 10 18:00 UTC")
|
|
||||||
var config = map[string]interface{}{
|
|
||||||
"date": date,
|
|
||||||
"counts": []int{1, 1, 2, 3, 5, 8},
|
|
||||||
"hash": map[string]string{
|
|
||||||
"key1": "val1",
|
|
||||||
"key2": "val2",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
buf := new(bytes.Buffer)
|
|
||||||
if err := NewEncoder(buf).Encode(config); err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
fmt.Println(buf.String())
|
|
||||||
|
|
||||||
// Output:
|
|
||||||
// counts = [1, 1, 2, 3, 5, 8]
|
|
||||||
// date = 2010-03-14T18:00:00Z
|
|
||||||
//
|
|
||||||
// [hash]
|
|
||||||
// key1 = "val1"
|
|
||||||
// key2 = "val2"
|
|
||||||
}
|
|
19
vendor/github.com/BurntSushi/toml/encoding_types.go
generated
vendored
19
vendor/github.com/BurntSushi/toml/encoding_types.go
generated
vendored
@ -1,19 +0,0 @@
|
|||||||
// +build go1.2
|
|
||||||
|
|
||||||
package toml
|
|
||||||
|
|
||||||
// In order to support Go 1.1, we define our own TextMarshaler and
|
|
||||||
// TextUnmarshaler types. For Go 1.2+, we just alias them with the
|
|
||||||
// standard library interfaces.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
|
|
||||||
// so that Go 1.1 can be supported.
|
|
||||||
type TextMarshaler encoding.TextMarshaler
|
|
||||||
|
|
||||||
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined here
|
|
||||||
// so that Go 1.1 can be supported.
|
|
||||||
type TextUnmarshaler encoding.TextUnmarshaler
|
|
18
vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
generated
vendored
18
vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
generated
vendored
@ -1,18 +0,0 @@
|
|||||||
// +build !go1.2
|
|
||||||
|
|
||||||
package toml
|
|
||||||
|
|
||||||
// These interfaces were introduced in Go 1.2, so we add them manually when
|
|
||||||
// compiling for Go 1.1.
|
|
||||||
|
|
||||||
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
|
|
||||||
// so that Go 1.1 can be supported.
|
|
||||||
type TextMarshaler interface {
|
|
||||||
MarshalText() (text []byte, err error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined here
|
|
||||||
// so that Go 1.1 can be supported.
|
|
||||||
type TextUnmarshaler interface {
|
|
||||||
UnmarshalText(text []byte) error
|
|
||||||
}
|
|
734
vendor/github.com/BurntSushi/toml/lex.go
generated
vendored
734
vendor/github.com/BurntSushi/toml/lex.go
generated
vendored
@ -1,734 +0,0 @@
|
|||||||
package toml
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
"unicode/utf8"
|
|
||||||
)
|
|
||||||
|
|
||||||
type itemType int
|
|
||||||
|
|
||||||
const (
|
|
||||||
itemError itemType = iota
|
|
||||||
itemNIL // used in the parser to indicate no type
|
|
||||||
itemEOF
|
|
||||||
itemText
|
|
||||||
itemString
|
|
||||||
itemBool
|
|
||||||
itemInteger
|
|
||||||
itemFloat
|
|
||||||
itemDatetime
|
|
||||||
itemArray // the start of an array
|
|
||||||
itemArrayEnd
|
|
||||||
itemTableStart
|
|
||||||
itemTableEnd
|
|
||||||
itemArrayTableStart
|
|
||||||
itemArrayTableEnd
|
|
||||||
itemKeyStart
|
|
||||||
itemCommentStart
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
eof = 0
|
|
||||||
tableStart = '['
|
|
||||||
tableEnd = ']'
|
|
||||||
arrayTableStart = '['
|
|
||||||
arrayTableEnd = ']'
|
|
||||||
tableSep = '.'
|
|
||||||
keySep = '='
|
|
||||||
arrayStart = '['
|
|
||||||
arrayEnd = ']'
|
|
||||||
arrayValTerm = ','
|
|
||||||
commentStart = '#'
|
|
||||||
stringStart = '"'
|
|
||||||
stringEnd = '"'
|
|
||||||
)
|
|
||||||
|
|
||||||
type stateFn func(lx *lexer) stateFn
|
|
||||||
|
|
||||||
type lexer struct {
|
|
||||||
input string
|
|
||||||
start int
|
|
||||||
pos int
|
|
||||||
width int
|
|
||||||
line int
|
|
||||||
state stateFn
|
|
||||||
items chan item
|
|
||||||
|
|
||||||
// A stack of state functions used to maintain context.
|
|
||||||
// The idea is to reuse parts of the state machine in various places.
|
|
||||||
// For example, values can appear at the top level or within arbitrarily
|
|
||||||
// nested arrays. The last state on the stack is used after a value has
|
|
||||||
// been lexed. Similarly for comments.
|
|
||||||
stack []stateFn
|
|
||||||
}
|
|
||||||
|
|
||||||
type item struct {
|
|
||||||
typ itemType
|
|
||||||
val string
|
|
||||||
line int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lx *lexer) nextItem() item {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case item := <-lx.items:
|
|
||||||
return item
|
|
||||||
default:
|
|
||||||
lx.state = lx.state(lx)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func lex(input string) *lexer {
|
|
||||||
lx := &lexer{
|
|
||||||
input: input + "\n",
|
|
||||||
state: lexTop,
|
|
||||||
line: 1,
|
|
||||||
items: make(chan item, 10),
|
|
||||||
stack: make([]stateFn, 0, 10),
|
|
||||||
}
|
|
||||||
return lx
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lx *lexer) push(state stateFn) {
|
|
||||||
lx.stack = append(lx.stack, state)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lx *lexer) pop() stateFn {
|
|
||||||
if len(lx.stack) == 0 {
|
|
||||||
return lx.errorf("BUG in lexer: no states to pop.")
|
|
||||||
}
|
|
||||||
last := lx.stack[len(lx.stack)-1]
|
|
||||||
lx.stack = lx.stack[0 : len(lx.stack)-1]
|
|
||||||
return last
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lx *lexer) current() string {
|
|
||||||
return lx.input[lx.start:lx.pos]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lx *lexer) emit(typ itemType) {
|
|
||||||
lx.items <- item{typ, lx.current(), lx.line}
|
|
||||||
lx.start = lx.pos
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lx *lexer) emitTrim(typ itemType) {
|
|
||||||
lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line}
|
|
||||||
lx.start = lx.pos
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lx *lexer) next() (r rune) {
|
|
||||||
if lx.pos >= len(lx.input) {
|
|
||||||
lx.width = 0
|
|
||||||
return eof
|
|
||||||
}
|
|
||||||
|
|
||||||
if lx.input[lx.pos] == '\n' {
|
|
||||||
lx.line++
|
|
||||||
}
|
|
||||||
r, lx.width = utf8.DecodeRuneInString(lx.input[lx.pos:])
|
|
||||||
lx.pos += lx.width
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
// ignore skips over the pending input before this point.
|
|
||||||
func (lx *lexer) ignore() {
|
|
||||||
lx.start = lx.pos
|
|
||||||
}
|
|
||||||
|
|
||||||
// backup steps back one rune. Can be called only once per call of next.
|
|
||||||
func (lx *lexer) backup() {
|
|
||||||
lx.pos -= lx.width
|
|
||||||
if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
|
|
||||||
lx.line--
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// accept consumes the next rune if it's equal to `valid`.
|
|
||||||
func (lx *lexer) accept(valid rune) bool {
|
|
||||||
if lx.next() == valid {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
lx.backup()
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// peek returns but does not consume the next rune in the input.
|
|
||||||
func (lx *lexer) peek() rune {
|
|
||||||
r := lx.next()
|
|
||||||
lx.backup()
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
// errorf stops all lexing by emitting an error and returning `nil`.
|
|
||||||
// Note that any value that is a character is escaped if it's a special
|
|
||||||
// character (new lines, tabs, etc.).
|
|
||||||
func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
|
|
||||||
lx.items <- item{
|
|
||||||
itemError,
|
|
||||||
fmt.Sprintf(format, values...),
|
|
||||||
lx.line,
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexTop consumes elements at the top level of TOML data.
|
|
||||||
func lexTop(lx *lexer) stateFn {
|
|
||||||
r := lx.next()
|
|
||||||
if isWhitespace(r) || isNL(r) {
|
|
||||||
return lexSkip(lx, lexTop)
|
|
||||||
}
|
|
||||||
|
|
||||||
switch r {
|
|
||||||
case commentStart:
|
|
||||||
lx.push(lexTop)
|
|
||||||
return lexCommentStart
|
|
||||||
case tableStart:
|
|
||||||
return lexTableStart
|
|
||||||
case eof:
|
|
||||||
if lx.pos > lx.start {
|
|
||||||
return lx.errorf("Unexpected EOF.")
|
|
||||||
}
|
|
||||||
lx.emit(itemEOF)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// At this point, the only valid item can be a key, so we back up
|
|
||||||
// and let the key lexer do the rest.
|
|
||||||
lx.backup()
|
|
||||||
lx.push(lexTopEnd)
|
|
||||||
return lexKeyStart
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexTopEnd is entered whenever a top-level item has been consumed. (A value
|
|
||||||
// or a table.) It must see only whitespace, and will turn back to lexTop
|
|
||||||
// upon a new line. If it sees EOF, it will quit the lexer successfully.
|
|
||||||
func lexTopEnd(lx *lexer) stateFn {
|
|
||||||
r := lx.next()
|
|
||||||
switch {
|
|
||||||
case r == commentStart:
|
|
||||||
// a comment will read to a new line for us.
|
|
||||||
lx.push(lexTop)
|
|
||||||
return lexCommentStart
|
|
||||||
case isWhitespace(r):
|
|
||||||
return lexTopEnd
|
|
||||||
case isNL(r):
|
|
||||||
lx.ignore()
|
|
||||||
return lexTop
|
|
||||||
case r == eof:
|
|
||||||
lx.ignore()
|
|
||||||
return lexTop
|
|
||||||
}
|
|
||||||
return lx.errorf("Expected a top-level item to end with a new line, "+
|
|
||||||
"comment or EOF, but got %q instead.", r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexTable lexes the beginning of a table. Namely, it makes sure that
|
|
||||||
// it starts with a character other than '.' and ']'.
|
|
||||||
// It assumes that '[' has already been consumed.
|
|
||||||
// It also handles the case that this is an item in an array of tables.
|
|
||||||
// e.g., '[[name]]'.
|
|
||||||
func lexTableStart(lx *lexer) stateFn {
|
|
||||||
if lx.peek() == arrayTableStart {
|
|
||||||
lx.next()
|
|
||||||
lx.emit(itemArrayTableStart)
|
|
||||||
lx.push(lexArrayTableEnd)
|
|
||||||
} else {
|
|
||||||
lx.emit(itemTableStart)
|
|
||||||
lx.push(lexTableEnd)
|
|
||||||
}
|
|
||||||
return lexTableNameStart
|
|
||||||
}
|
|
||||||
|
|
||||||
func lexTableEnd(lx *lexer) stateFn {
|
|
||||||
lx.emit(itemTableEnd)
|
|
||||||
return lexTopEnd
|
|
||||||
}
|
|
||||||
|
|
||||||
func lexArrayTableEnd(lx *lexer) stateFn {
|
|
||||||
if r := lx.next(); r != arrayTableEnd {
|
|
||||||
return lx.errorf("Expected end of table array name delimiter %q, "+
|
|
||||||
"but got %q instead.", arrayTableEnd, r)
|
|
||||||
}
|
|
||||||
lx.emit(itemArrayTableEnd)
|
|
||||||
return lexTopEnd
|
|
||||||
}
|
|
||||||
|
|
||||||
func lexTableNameStart(lx *lexer) stateFn {
|
|
||||||
switch lx.next() {
|
|
||||||
case tableEnd, eof:
|
|
||||||
return lx.errorf("Unexpected end of table. (Tables cannot " +
|
|
||||||
"be empty.)")
|
|
||||||
case tableSep:
|
|
||||||
return lx.errorf("Unexpected table separator. (Tables cannot " +
|
|
||||||
"be empty.)")
|
|
||||||
}
|
|
||||||
return lexTableName
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexTableName lexes the name of a table. It assumes that at least one
|
|
||||||
// valid character for the table has already been read.
|
|
||||||
func lexTableName(lx *lexer) stateFn {
|
|
||||||
switch lx.peek() {
|
|
||||||
case eof:
|
|
||||||
return lx.errorf("Unexpected end of table name %q.", lx.current())
|
|
||||||
case tableStart:
|
|
||||||
return lx.errorf("Table names cannot contain %q or %q.",
|
|
||||||
tableStart, tableEnd)
|
|
||||||
case tableEnd:
|
|
||||||
lx.emit(itemText)
|
|
||||||
lx.next()
|
|
||||||
return lx.pop()
|
|
||||||
case tableSep:
|
|
||||||
lx.emit(itemText)
|
|
||||||
lx.next()
|
|
||||||
lx.ignore()
|
|
||||||
return lexTableNameStart
|
|
||||||
}
|
|
||||||
lx.next()
|
|
||||||
return lexTableName
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexKeyStart consumes a key name up until the first non-whitespace character.
|
|
||||||
// lexKeyStart will ignore whitespace.
|
|
||||||
func lexKeyStart(lx *lexer) stateFn {
|
|
||||||
r := lx.peek()
|
|
||||||
switch {
|
|
||||||
case r == keySep:
|
|
||||||
return lx.errorf("Unexpected key separator %q.", keySep)
|
|
||||||
case isWhitespace(r) || isNL(r):
|
|
||||||
lx.next()
|
|
||||||
return lexSkip(lx, lexKeyStart)
|
|
||||||
}
|
|
||||||
|
|
||||||
lx.ignore()
|
|
||||||
lx.emit(itemKeyStart)
|
|
||||||
lx.next()
|
|
||||||
return lexKey
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexKey consumes the text of a key. Assumes that the first character (which
|
|
||||||
// is not whitespace) has already been consumed.
|
|
||||||
func lexKey(lx *lexer) stateFn {
|
|
||||||
r := lx.peek()
|
|
||||||
|
|
||||||
// Keys cannot contain a '#' character.
|
|
||||||
if r == commentStart {
|
|
||||||
return lx.errorf("Key cannot contain a '#' character.")
|
|
||||||
}
|
|
||||||
|
|
||||||
// XXX: Possible divergence from spec?
|
|
||||||
// "Keys start with the first non-whitespace character and end with the
|
|
||||||
// last non-whitespace character before the equals sign."
|
|
||||||
// Note here that whitespace is either a tab or a space.
|
|
||||||
// But we'll call it quits if we see a new line too.
|
|
||||||
if isNL(r) {
|
|
||||||
lx.emitTrim(itemText)
|
|
||||||
return lexKeyEnd
|
|
||||||
}
|
|
||||||
|
|
||||||
// Let's also call it quits if we see an equals sign.
|
|
||||||
if r == keySep {
|
|
||||||
lx.emitTrim(itemText)
|
|
||||||
return lexKeyEnd
|
|
||||||
}
|
|
||||||
|
|
||||||
lx.next()
|
|
||||||
return lexKey
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexKeyEnd consumes the end of a key (up to the key separator).
|
|
||||||
// Assumes that any whitespace after a key has been consumed.
|
|
||||||
func lexKeyEnd(lx *lexer) stateFn {
|
|
||||||
r := lx.next()
|
|
||||||
if r == keySep {
|
|
||||||
return lexSkip(lx, lexValue)
|
|
||||||
}
|
|
||||||
return lx.errorf("Expected key separator %q, but got %q instead.",
|
|
||||||
keySep, r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexValue starts the consumption of a value anywhere a value is expected.
|
|
||||||
// lexValue will ignore whitespace.
|
|
||||||
// After a value is lexed, the last state on the next is popped and returned.
|
|
||||||
func lexValue(lx *lexer) stateFn {
|
|
||||||
// We allow whitespace to precede a value, but NOT new lines.
|
|
||||||
// In array syntax, the array states are responsible for ignoring new lines.
|
|
||||||
r := lx.next()
|
|
||||||
if isWhitespace(r) {
|
|
||||||
return lexSkip(lx, lexValue)
|
|
||||||
}
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case r == arrayStart:
|
|
||||||
lx.ignore()
|
|
||||||
lx.emit(itemArray)
|
|
||||||
return lexArrayValue
|
|
||||||
case r == stringStart:
|
|
||||||
lx.ignore() // ignore the '"'
|
|
||||||
return lexString
|
|
||||||
case r == 't':
|
|
||||||
return lexTrue
|
|
||||||
case r == 'f':
|
|
||||||
return lexFalse
|
|
||||||
case r == '-':
|
|
||||||
return lexNumberStart
|
|
||||||
case isDigit(r):
|
|
||||||
lx.backup() // avoid an extra state and use the same as above
|
|
||||||
return lexNumberOrDateStart
|
|
||||||
case r == '.': // special error case, be kind to users
|
|
||||||
return lx.errorf("Floats must start with a digit, not '.'.")
|
|
||||||
}
|
|
||||||
return lx.errorf("Expected value but found %q instead.", r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexArrayValue consumes one value in an array. It assumes that '[' or ','
|
|
||||||
// have already been consumed. All whitespace and new lines are ignored.
|
|
||||||
func lexArrayValue(lx *lexer) stateFn {
|
|
||||||
r := lx.next()
|
|
||||||
switch {
|
|
||||||
case isWhitespace(r) || isNL(r):
|
|
||||||
return lexSkip(lx, lexArrayValue)
|
|
||||||
case r == commentStart:
|
|
||||||
lx.push(lexArrayValue)
|
|
||||||
return lexCommentStart
|
|
||||||
case r == arrayValTerm:
|
|
||||||
return lx.errorf("Unexpected array value terminator %q.",
|
|
||||||
arrayValTerm)
|
|
||||||
case r == arrayEnd:
|
|
||||||
return lexArrayEnd
|
|
||||||
}
|
|
||||||
|
|
||||||
lx.backup()
|
|
||||||
lx.push(lexArrayValueEnd)
|
|
||||||
return lexValue
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexArrayValueEnd consumes the cruft between values of an array. Namely,
|
|
||||||
// it ignores whitespace and expects either a ',' or a ']'.
|
|
||||||
func lexArrayValueEnd(lx *lexer) stateFn {
|
|
||||||
r := lx.next()
|
|
||||||
switch {
|
|
||||||
case isWhitespace(r) || isNL(r):
|
|
||||||
return lexSkip(lx, lexArrayValueEnd)
|
|
||||||
case r == commentStart:
|
|
||||||
lx.push(lexArrayValueEnd)
|
|
||||||
return lexCommentStart
|
|
||||||
case r == arrayValTerm:
|
|
||||||
lx.ignore()
|
|
||||||
return lexArrayValue // move on to the next value
|
|
||||||
case r == arrayEnd:
|
|
||||||
return lexArrayEnd
|
|
||||||
}
|
|
||||||
return lx.errorf("Expected an array value terminator %q or an array "+
|
|
||||||
"terminator %q, but got %q instead.", arrayValTerm, arrayEnd, r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexArrayEnd finishes the lexing of an array. It assumes that a ']' has
|
|
||||||
// just been consumed.
|
|
||||||
func lexArrayEnd(lx *lexer) stateFn {
|
|
||||||
lx.ignore()
|
|
||||||
lx.emit(itemArrayEnd)
|
|
||||||
return lx.pop()
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexString consumes the inner contents of a string. It assumes that the
|
|
||||||
// beginning '"' has already been consumed and ignored.
|
|
||||||
func lexString(lx *lexer) stateFn {
|
|
||||||
r := lx.next()
|
|
||||||
switch {
|
|
||||||
case isNL(r):
|
|
||||||
return lx.errorf("Strings cannot contain new lines.")
|
|
||||||
case r == '\\':
|
|
||||||
return lexStringEscape
|
|
||||||
case r == stringEnd:
|
|
||||||
lx.backup()
|
|
||||||
lx.emit(itemString)
|
|
||||||
lx.next()
|
|
||||||
lx.ignore()
|
|
||||||
return lx.pop()
|
|
||||||
}
|
|
||||||
return lexString
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexStringEscape consumes an escaped character. It assumes that the preceding
|
|
||||||
// '\\' has already been consumed.
|
|
||||||
func lexStringEscape(lx *lexer) stateFn {
|
|
||||||
r := lx.next()
|
|
||||||
switch r {
|
|
||||||
case 'b':
|
|
||||||
fallthrough
|
|
||||||
case 't':
|
|
||||||
fallthrough
|
|
||||||
case 'n':
|
|
||||||
fallthrough
|
|
||||||
case 'f':
|
|
||||||
fallthrough
|
|
||||||
case 'r':
|
|
||||||
fallthrough
|
|
||||||
case '"':
|
|
||||||
fallthrough
|
|
||||||
case '/':
|
|
||||||
fallthrough
|
|
||||||
case '\\':
|
|
||||||
return lexString
|
|
||||||
case 'u':
|
|
||||||
return lexStringUnicode
|
|
||||||
}
|
|
||||||
return lx.errorf("Invalid escape character %q. Only the following "+
|
|
||||||
"escape characters are allowed: "+
|
|
||||||
"\\b, \\t, \\n, \\f, \\r, \\\", \\/, \\\\, and \\uXXXX.", r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexStringBinary consumes two hexadecimal digits following '\x'. It assumes
|
|
||||||
// that the '\x' has already been consumed.
|
|
||||||
func lexStringUnicode(lx *lexer) stateFn {
|
|
||||||
var r rune
|
|
||||||
|
|
||||||
for i := 0; i < 4; i++ {
|
|
||||||
r = lx.next()
|
|
||||||
if !isHexadecimal(r) {
|
|
||||||
return lx.errorf("Expected four hexadecimal digits after '\\x', "+
|
|
||||||
"but got '%s' instead.", lx.current())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return lexString
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexNumberOrDateStart consumes either a (positive) integer, float or datetime.
|
|
||||||
// It assumes that NO negative sign has been consumed.
|
|
||||||
func lexNumberOrDateStart(lx *lexer) stateFn {
|
|
||||||
r := lx.next()
|
|
||||||
if !isDigit(r) {
|
|
||||||
if r == '.' {
|
|
||||||
return lx.errorf("Floats must start with a digit, not '.'.")
|
|
||||||
} else {
|
|
||||||
return lx.errorf("Expected a digit but got %q.", r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return lexNumberOrDate
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexNumberOrDate consumes either a (positive) integer, float or datetime.
|
|
||||||
func lexNumberOrDate(lx *lexer) stateFn {
|
|
||||||
r := lx.next()
|
|
||||||
switch {
|
|
||||||
case r == '-':
|
|
||||||
if lx.pos-lx.start != 5 {
|
|
||||||
return lx.errorf("All ISO8601 dates must be in full Zulu form.")
|
|
||||||
}
|
|
||||||
return lexDateAfterYear
|
|
||||||
case isDigit(r):
|
|
||||||
return lexNumberOrDate
|
|
||||||
case r == '.':
|
|
||||||
return lexFloatStart
|
|
||||||
}
|
|
||||||
|
|
||||||
lx.backup()
|
|
||||||
lx.emit(itemInteger)
|
|
||||||
return lx.pop()
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexDateAfterYear consumes a full Zulu Datetime in ISO8601 format.
|
|
||||||
// It assumes that "YYYY-" has already been consumed.
|
|
||||||
func lexDateAfterYear(lx *lexer) stateFn {
|
|
||||||
formats := []rune{
|
|
||||||
// digits are '0'.
|
|
||||||
// everything else is direct equality.
|
|
||||||
'0', '0', '-', '0', '0',
|
|
||||||
'T',
|
|
||||||
'0', '0', ':', '0', '0', ':', '0', '0',
|
|
||||||
'Z',
|
|
||||||
}
|
|
||||||
for _, f := range formats {
|
|
||||||
r := lx.next()
|
|
||||||
if f == '0' {
|
|
||||||
if !isDigit(r) {
|
|
||||||
return lx.errorf("Expected digit in ISO8601 datetime, "+
|
|
||||||
"but found %q instead.", r)
|
|
||||||
}
|
|
||||||
} else if f != r {
|
|
||||||
return lx.errorf("Expected %q in ISO8601 datetime, "+
|
|
||||||
"but found %q instead.", f, r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
lx.emit(itemDatetime)
|
|
||||||
return lx.pop()
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexNumberStart consumes either an integer or a float. It assumes that a
|
|
||||||
// negative sign has already been read, but that *no* digits have been consumed.
|
|
||||||
// lexNumberStart will move to the appropriate integer or float states.
|
|
||||||
func lexNumberStart(lx *lexer) stateFn {
|
|
||||||
// we MUST see a digit. Even floats have to start with a digit.
|
|
||||||
r := lx.next()
|
|
||||||
if !isDigit(r) {
|
|
||||||
if r == '.' {
|
|
||||||
return lx.errorf("Floats must start with a digit, not '.'.")
|
|
||||||
} else {
|
|
||||||
return lx.errorf("Expected a digit but got %q.", r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return lexNumber
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexNumber consumes an integer or a float after seeing the first digit.
|
|
||||||
func lexNumber(lx *lexer) stateFn {
|
|
||||||
r := lx.next()
|
|
||||||
switch {
|
|
||||||
case isDigit(r):
|
|
||||||
return lexNumber
|
|
||||||
case r == '.':
|
|
||||||
return lexFloatStart
|
|
||||||
}
|
|
||||||
|
|
||||||
lx.backup()
|
|
||||||
lx.emit(itemInteger)
|
|
||||||
return lx.pop()
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexFloatStart starts the consumption of digits of a float after a '.'.
|
|
||||||
// Namely, at least one digit is required.
|
|
||||||
func lexFloatStart(lx *lexer) stateFn {
|
|
||||||
r := lx.next()
|
|
||||||
if !isDigit(r) {
|
|
||||||
return lx.errorf("Floats must have a digit after the '.', but got "+
|
|
||||||
"%q instead.", r)
|
|
||||||
}
|
|
||||||
return lexFloat
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexFloat consumes the digits of a float after a '.'.
|
|
||||||
// Assumes that one digit has been consumed after a '.' already.
|
|
||||||
func lexFloat(lx *lexer) stateFn {
|
|
||||||
r := lx.next()
|
|
||||||
if isDigit(r) {
|
|
||||||
return lexFloat
|
|
||||||
}
|
|
||||||
|
|
||||||
lx.backup()
|
|
||||||
lx.emit(itemFloat)
|
|
||||||
return lx.pop()
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexConst consumes the s[1:] in s. It assumes that s[0] has already been
|
|
||||||
// consumed.
|
|
||||||
func lexConst(lx *lexer, s string) stateFn {
|
|
||||||
for i := range s[1:] {
|
|
||||||
if r := lx.next(); r != rune(s[i+1]) {
|
|
||||||
return lx.errorf("Expected %q, but found %q instead.", s[:i+1],
|
|
||||||
s[:i]+string(r))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexTrue consumes the "rue" in "true". It assumes that 't' has already
|
|
||||||
// been consumed.
|
|
||||||
func lexTrue(lx *lexer) stateFn {
|
|
||||||
if fn := lexConst(lx, "true"); fn != nil {
|
|
||||||
return fn
|
|
||||||
}
|
|
||||||
lx.emit(itemBool)
|
|
||||||
return lx.pop()
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexFalse consumes the "alse" in "false". It assumes that 'f' has already
|
|
||||||
// been consumed.
|
|
||||||
func lexFalse(lx *lexer) stateFn {
|
|
||||||
if fn := lexConst(lx, "false"); fn != nil {
|
|
||||||
return fn
|
|
||||||
}
|
|
||||||
lx.emit(itemBool)
|
|
||||||
return lx.pop()
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexCommentStart begins the lexing of a comment. It will emit
|
|
||||||
// itemCommentStart and consume no characters, passing control to lexComment.
|
|
||||||
func lexCommentStart(lx *lexer) stateFn {
|
|
||||||
lx.ignore()
|
|
||||||
lx.emit(itemCommentStart)
|
|
||||||
return lexComment
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexComment lexes an entire comment. It assumes that '#' has been consumed.
|
|
||||||
// It will consume *up to* the first new line character, and pass control
|
|
||||||
// back to the last state on the stack.
|
|
||||||
func lexComment(lx *lexer) stateFn {
|
|
||||||
r := lx.peek()
|
|
||||||
if isNL(r) || r == eof {
|
|
||||||
lx.emit(itemText)
|
|
||||||
return lx.pop()
|
|
||||||
}
|
|
||||||
lx.next()
|
|
||||||
return lexComment
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexSkip ignores all slurped input and moves on to the next state.
|
|
||||||
func lexSkip(lx *lexer, nextState stateFn) stateFn {
|
|
||||||
return func(lx *lexer) stateFn {
|
|
||||||
lx.ignore()
|
|
||||||
return nextState
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// isWhitespace returns true if `r` is a whitespace character according
|
|
||||||
// to the spec.
|
|
||||||
func isWhitespace(r rune) bool {
|
|
||||||
return r == '\t' || r == ' '
|
|
||||||
}
|
|
||||||
|
|
||||||
func isNL(r rune) bool {
|
|
||||||
return r == '\n' || r == '\r'
|
|
||||||
}
|
|
||||||
|
|
||||||
func isDigit(r rune) bool {
|
|
||||||
return r >= '0' && r <= '9'
|
|
||||||
}
|
|
||||||
|
|
||||||
func isHexadecimal(r rune) bool {
|
|
||||||
return (r >= '0' && r <= '9') ||
|
|
||||||
(r >= 'a' && r <= 'f') ||
|
|
||||||
(r >= 'A' && r <= 'F')
|
|
||||||
}
|
|
||||||
|
|
||||||
func (itype itemType) String() string {
|
|
||||||
switch itype {
|
|
||||||
case itemError:
|
|
||||||
return "Error"
|
|
||||||
case itemNIL:
|
|
||||||
return "NIL"
|
|
||||||
case itemEOF:
|
|
||||||
return "EOF"
|
|
||||||
case itemText:
|
|
||||||
return "Text"
|
|
||||||
case itemString:
|
|
||||||
return "String"
|
|
||||||
case itemBool:
|
|
||||||
return "Bool"
|
|
||||||
case itemInteger:
|
|
||||||
return "Integer"
|
|
||||||
case itemFloat:
|
|
||||||
return "Float"
|
|
||||||
case itemDatetime:
|
|
||||||
return "DateTime"
|
|
||||||
case itemTableStart:
|
|
||||||
return "TableStart"
|
|
||||||
case itemTableEnd:
|
|
||||||
return "TableEnd"
|
|
||||||
case itemKeyStart:
|
|
||||||
return "KeyStart"
|
|
||||||
case itemArray:
|
|
||||||
return "Array"
|
|
||||||
case itemArrayEnd:
|
|
||||||
return "ArrayEnd"
|
|
||||||
case itemCommentStart:
|
|
||||||
return "CommentStart"
|
|
||||||
}
|
|
||||||
panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype)))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (item item) String() string {
|
|
||||||
return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
|
|
||||||
}
|
|
417
vendor/github.com/BurntSushi/toml/parse.go
generated
vendored
417
vendor/github.com/BurntSushi/toml/parse.go
generated
vendored
@ -1,417 +0,0 @@
|
|||||||
package toml
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
"unicode/utf8"
|
|
||||||
)
|
|
||||||
|
|
||||||
type parser struct {
|
|
||||||
mapping map[string]interface{}
|
|
||||||
types map[string]tomlType
|
|
||||||
lx *lexer
|
|
||||||
|
|
||||||
// A list of keys in the order that they appear in the TOML data.
|
|
||||||
ordered []Key
|
|
||||||
|
|
||||||
// the full key for the current hash in scope
|
|
||||||
context Key
|
|
||||||
|
|
||||||
// the base key name for everything except hashes
|
|
||||||
currentKey string
|
|
||||||
|
|
||||||
// rough approximation of line number
|
|
||||||
approxLine int
|
|
||||||
|
|
||||||
// A map of 'key.group.names' to whether they were created implicitly.
|
|
||||||
implicits map[string]bool
|
|
||||||
}
|
|
||||||
|
|
||||||
type parseError string
|
|
||||||
|
|
||||||
func (pe parseError) Error() string {
|
|
||||||
return string(pe)
|
|
||||||
}
|
|
||||||
|
|
||||||
func parse(data string) (p *parser, err error) {
|
|
||||||
defer func() {
|
|
||||||
if r := recover(); r != nil {
|
|
||||||
var ok bool
|
|
||||||
if err, ok = r.(parseError); ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
panic(r)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
p = &parser{
|
|
||||||
mapping: make(map[string]interface{}),
|
|
||||||
types: make(map[string]tomlType),
|
|
||||||
lx: lex(data),
|
|
||||||
ordered: make([]Key, 0),
|
|
||||||
implicits: make(map[string]bool),
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
item := p.next()
|
|
||||||
if item.typ == itemEOF {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
p.topLevel(item)
|
|
||||||
}
|
|
||||||
|
|
||||||
return p, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) panicf(format string, v ...interface{}) {
|
|
||||||
msg := fmt.Sprintf("Near line %d, key '%s': %s",
|
|
||||||
p.approxLine, p.current(), fmt.Sprintf(format, v...))
|
|
||||||
panic(parseError(msg))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) next() item {
|
|
||||||
it := p.lx.nextItem()
|
|
||||||
if it.typ == itemError {
|
|
||||||
p.panicf("Near line %d: %s", it.line, it.val)
|
|
||||||
}
|
|
||||||
return it
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) bug(format string, v ...interface{}) {
|
|
||||||
log.Fatalf("BUG: %s\n\n", fmt.Sprintf(format, v...))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) expect(typ itemType) item {
|
|
||||||
it := p.next()
|
|
||||||
p.assertEqual(typ, it.typ)
|
|
||||||
return it
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) assertEqual(expected, got itemType) {
|
|
||||||
if expected != got {
|
|
||||||
p.bug("Expected '%s' but got '%s'.", expected, got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) topLevel(item item) {
|
|
||||||
switch item.typ {
|
|
||||||
case itemCommentStart:
|
|
||||||
p.approxLine = item.line
|
|
||||||
p.expect(itemText)
|
|
||||||
case itemTableStart:
|
|
||||||
kg := p.expect(itemText)
|
|
||||||
p.approxLine = kg.line
|
|
||||||
|
|
||||||
key := make(Key, 0)
|
|
||||||
for ; kg.typ == itemText; kg = p.next() {
|
|
||||||
key = append(key, kg.val)
|
|
||||||
}
|
|
||||||
p.assertEqual(itemTableEnd, kg.typ)
|
|
||||||
|
|
||||||
p.establishContext(key, false)
|
|
||||||
p.setType("", tomlHash)
|
|
||||||
p.ordered = append(p.ordered, key)
|
|
||||||
case itemArrayTableStart:
|
|
||||||
kg := p.expect(itemText)
|
|
||||||
p.approxLine = kg.line
|
|
||||||
|
|
||||||
key := make(Key, 0)
|
|
||||||
for ; kg.typ == itemText; kg = p.next() {
|
|
||||||
key = append(key, kg.val)
|
|
||||||
}
|
|
||||||
p.assertEqual(itemArrayTableEnd, kg.typ)
|
|
||||||
|
|
||||||
p.establishContext(key, true)
|
|
||||||
p.setType("", tomlArrayHash)
|
|
||||||
p.ordered = append(p.ordered, key)
|
|
||||||
case itemKeyStart:
|
|
||||||
kname := p.expect(itemText)
|
|
||||||
p.currentKey = kname.val
|
|
||||||
p.approxLine = kname.line
|
|
||||||
|
|
||||||
val, typ := p.value(p.next())
|
|
||||||
p.setValue(p.currentKey, val)
|
|
||||||
p.setType(p.currentKey, typ)
|
|
||||||
p.ordered = append(p.ordered, p.context.add(p.currentKey))
|
|
||||||
|
|
||||||
p.currentKey = ""
|
|
||||||
default:
|
|
||||||
p.bug("Unexpected type at top level: %s", item.typ)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// value translates an expected value from the lexer into a Go value wrapped
|
|
||||||
// as an empty interface.
|
|
||||||
func (p *parser) value(it item) (interface{}, tomlType) {
|
|
||||||
switch it.typ {
|
|
||||||
case itemString:
|
|
||||||
return p.replaceUnicode(replaceEscapes(it.val)), p.typeOfPrimitive(it)
|
|
||||||
case itemBool:
|
|
||||||
switch it.val {
|
|
||||||
case "true":
|
|
||||||
return true, p.typeOfPrimitive(it)
|
|
||||||
case "false":
|
|
||||||
return false, p.typeOfPrimitive(it)
|
|
||||||
}
|
|
||||||
p.bug("Expected boolean value, but got '%s'.", it.val)
|
|
||||||
case itemInteger:
|
|
||||||
num, err := strconv.ParseInt(it.val, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
// See comment below for floats describing why we make a
|
|
||||||
// distinction between a bug and a user error.
|
|
||||||
if e, ok := err.(*strconv.NumError); ok &&
|
|
||||||
e.Err == strconv.ErrRange {
|
|
||||||
|
|
||||||
p.panicf("Integer '%s' is out of the range of 64-bit "+
|
|
||||||
"signed integers.", it.val)
|
|
||||||
} else {
|
|
||||||
p.bug("Expected integer value, but got '%s'.", it.val)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return num, p.typeOfPrimitive(it)
|
|
||||||
case itemFloat:
|
|
||||||
num, err := strconv.ParseFloat(it.val, 64)
|
|
||||||
if err != nil {
|
|
||||||
// Distinguish float values. Normally, it'd be a bug if the lexer
|
|
||||||
// provides an invalid float, but it's possible that the float is
|
|
||||||
// out of range of valid values (which the lexer cannot determine).
|
|
||||||
// So mark the former as a bug but the latter as a legitimate user
|
|
||||||
// error.
|
|
||||||
//
|
|
||||||
// This is also true for integers.
|
|
||||||
if e, ok := err.(*strconv.NumError); ok &&
|
|
||||||
e.Err == strconv.ErrRange {
|
|
||||||
|
|
||||||
p.panicf("Float '%s' is out of the range of 64-bit "+
|
|
||||||
"IEEE-754 floating-point numbers.", it.val)
|
|
||||||
} else {
|
|
||||||
p.bug("Expected float value, but got '%s'.", it.val)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return num, p.typeOfPrimitive(it)
|
|
||||||
case itemDatetime:
|
|
||||||
t, err := time.Parse("2006-01-02T15:04:05Z", it.val)
|
|
||||||
if err != nil {
|
|
||||||
p.bug("Expected Zulu formatted DateTime, but got '%s'.", it.val)
|
|
||||||
}
|
|
||||||
return t, p.typeOfPrimitive(it)
|
|
||||||
case itemArray:
|
|
||||||
array := make([]interface{}, 0)
|
|
||||||
types := make([]tomlType, 0)
|
|
||||||
|
|
||||||
for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
|
|
||||||
if it.typ == itemCommentStart {
|
|
||||||
p.expect(itemText)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
val, typ := p.value(it)
|
|
||||||
array = append(array, val)
|
|
||||||
types = append(types, typ)
|
|
||||||
}
|
|
||||||
return array, p.typeOfArray(types)
|
|
||||||
}
|
|
||||||
p.bug("Unexpected value type: %s", it.typ)
|
|
||||||
panic("unreachable")
|
|
||||||
}
|
|
||||||
|
|
||||||
// establishContext sets the current context of the parser,
|
|
||||||
// where the context is either a hash or an array of hashes. Which one is
|
|
||||||
// set depends on the value of the `array` parameter.
|
|
||||||
//
|
|
||||||
// Establishing the context also makes sure that the key isn't a duplicate, and
|
|
||||||
// will create implicit hashes automatically.
|
|
||||||
func (p *parser) establishContext(key Key, array bool) {
|
|
||||||
var ok bool
|
|
||||||
|
|
||||||
// Always start at the top level and drill down for our context.
|
|
||||||
hashContext := p.mapping
|
|
||||||
keyContext := make(Key, 0)
|
|
||||||
|
|
||||||
// We only need implicit hashes for key[0:-1]
|
|
||||||
for _, k := range key[0 : len(key)-1] {
|
|
||||||
_, ok = hashContext[k]
|
|
||||||
keyContext = append(keyContext, k)
|
|
||||||
|
|
||||||
// No key? Make an implicit hash and move on.
|
|
||||||
if !ok {
|
|
||||||
p.addImplicit(keyContext)
|
|
||||||
hashContext[k] = make(map[string]interface{})
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the hash context is actually an array of tables, then set
|
|
||||||
// the hash context to the last element in that array.
|
|
||||||
//
|
|
||||||
// Otherwise, it better be a table, since this MUST be a key group (by
|
|
||||||
// virtue of it not being the last element in a key).
|
|
||||||
switch t := hashContext[k].(type) {
|
|
||||||
case []map[string]interface{}:
|
|
||||||
hashContext = t[len(t)-1]
|
|
||||||
case map[string]interface{}:
|
|
||||||
hashContext = t
|
|
||||||
default:
|
|
||||||
p.panicf("Key '%s' was already created as a hash.", keyContext)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
p.context = keyContext
|
|
||||||
if array {
|
|
||||||
// If this is the first element for this array, then allocate a new
|
|
||||||
// list of tables for it.
|
|
||||||
k := key[len(key)-1]
|
|
||||||
if _, ok := hashContext[k]; !ok {
|
|
||||||
hashContext[k] = make([]map[string]interface{}, 0, 5)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add a new table. But make sure the key hasn't already been used
|
|
||||||
// for something else.
|
|
||||||
if hash, ok := hashContext[k].([]map[string]interface{}); ok {
|
|
||||||
hashContext[k] = append(hash, make(map[string]interface{}))
|
|
||||||
} else {
|
|
||||||
p.panicf("Key '%s' was already created and cannot be used as "+
|
|
||||||
"an array.", keyContext)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
p.setValue(key[len(key)-1], make(map[string]interface{}))
|
|
||||||
}
|
|
||||||
p.context = append(p.context, key[len(key)-1])
|
|
||||||
}
|
|
||||||
|
|
||||||
// setValue sets the given key to the given value in the current context.
|
|
||||||
// It will make sure that the key hasn't already been defined, account for
|
|
||||||
// implicit key groups.
|
|
||||||
func (p *parser) setValue(key string, value interface{}) {
|
|
||||||
var tmpHash interface{}
|
|
||||||
var ok bool
|
|
||||||
|
|
||||||
hash := p.mapping
|
|
||||||
keyContext := make(Key, 0)
|
|
||||||
for _, k := range p.context {
|
|
||||||
keyContext = append(keyContext, k)
|
|
||||||
if tmpHash, ok = hash[k]; !ok {
|
|
||||||
p.bug("Context for key '%s' has not been established.", keyContext)
|
|
||||||
}
|
|
||||||
switch t := tmpHash.(type) {
|
|
||||||
case []map[string]interface{}:
|
|
||||||
// The context is a table of hashes. Pick the most recent table
|
|
||||||
// defined as the current hash.
|
|
||||||
hash = t[len(t)-1]
|
|
||||||
case map[string]interface{}:
|
|
||||||
hash = t
|
|
||||||
default:
|
|
||||||
p.bug("Expected hash to have type 'map[string]interface{}', but "+
|
|
||||||
"it has '%T' instead.", tmpHash)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
keyContext = append(keyContext, key)
|
|
||||||
|
|
||||||
if _, ok := hash[key]; ok {
|
|
||||||
// Typically, if the given key has already been set, then we have
|
|
||||||
// to raise an error since duplicate keys are disallowed. However,
|
|
||||||
// it's possible that a key was previously defined implicitly. In this
|
|
||||||
// case, it is allowed to be redefined concretely. (See the
|
|
||||||
// `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.)
|
|
||||||
//
|
|
||||||
// But we have to make sure to stop marking it as an implicit. (So that
|
|
||||||
// another redefinition provokes an error.)
|
|
||||||
//
|
|
||||||
// Note that since it has already been defined (as a hash), we don't
|
|
||||||
// want to overwrite it. So our business is done.
|
|
||||||
if p.isImplicit(keyContext) {
|
|
||||||
p.removeImplicit(keyContext)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Otherwise, we have a concrete key trying to override a previous
|
|
||||||
// key, which is *always* wrong.
|
|
||||||
p.panicf("Key '%s' has already been defined.", keyContext)
|
|
||||||
}
|
|
||||||
hash[key] = value
|
|
||||||
}
|
|
||||||
|
|
||||||
// setType sets the type of a particular value at a given key.
|
|
||||||
// It should be called immediately AFTER setValue.
|
|
||||||
//
|
|
||||||
// Note that if `key` is empty, then the type given will be applied to the
|
|
||||||
// current context (which is either a table or an array of tables).
|
|
||||||
func (p *parser) setType(key string, typ tomlType) {
|
|
||||||
keyContext := make(Key, 0, len(p.context)+1)
|
|
||||||
for _, k := range p.context {
|
|
||||||
keyContext = append(keyContext, k)
|
|
||||||
}
|
|
||||||
if len(key) > 0 { // allow type setting for hashes
|
|
||||||
keyContext = append(keyContext, key)
|
|
||||||
}
|
|
||||||
p.types[keyContext.String()] = typ
|
|
||||||
}
|
|
||||||
|
|
||||||
// addImplicit sets the given Key as having been created implicitly.
|
|
||||||
func (p *parser) addImplicit(key Key) {
|
|
||||||
p.implicits[key.String()] = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// removeImplicit stops tagging the given key as having been implicitly created.
|
|
||||||
func (p *parser) removeImplicit(key Key) {
|
|
||||||
p.implicits[key.String()] = false
|
|
||||||
}
|
|
||||||
|
|
||||||
// isImplicit returns true if the key group pointed to by the key was created
|
|
||||||
// implicitly.
|
|
||||||
func (p *parser) isImplicit(key Key) bool {
|
|
||||||
return p.implicits[key.String()]
|
|
||||||
}
|
|
||||||
|
|
||||||
// current returns the full key name of the current context.
|
|
||||||
func (p *parser) current() string {
|
|
||||||
if len(p.currentKey) == 0 {
|
|
||||||
return p.context.String()
|
|
||||||
}
|
|
||||||
if len(p.context) == 0 {
|
|
||||||
return p.currentKey
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%s.%s", p.context, p.currentKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
func replaceEscapes(s string) string {
|
|
||||||
return strings.NewReplacer(
|
|
||||||
"\\b", "\u0008",
|
|
||||||
"\\t", "\u0009",
|
|
||||||
"\\n", "\u000A",
|
|
||||||
"\\f", "\u000C",
|
|
||||||
"\\r", "\u000D",
|
|
||||||
"\\\"", "\u0022",
|
|
||||||
"\\/", "\u002F",
|
|
||||||
"\\\\", "\u005C",
|
|
||||||
).Replace(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) replaceUnicode(s string) string {
|
|
||||||
indexEsc := func() int {
|
|
||||||
return strings.Index(s, "\\u")
|
|
||||||
}
|
|
||||||
for i := indexEsc(); i != -1; i = indexEsc() {
|
|
||||||
asciiBytes := s[i+2 : i+6]
|
|
||||||
s = strings.Replace(s, s[i:i+6], p.asciiEscapeToUnicode(asciiBytes), -1)
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) asciiEscapeToUnicode(s string) string {
|
|
||||||
hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
|
|
||||||
if err != nil {
|
|
||||||
p.bug("Could not parse '%s' as a hexadecimal number, but the "+
|
|
||||||
"lexer claims it's OK: %s", s, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BUG(burntsushi)
|
|
||||||
// I honestly don't understand how this works. I can't seem
|
|
||||||
// to find a way to make this fail. I figured this would fail on invalid
|
|
||||||
// UTF-8 characters like U+DCFF, but it doesn't.
|
|
||||||
r := string(rune(hex))
|
|
||||||
if !utf8.ValidString(r) {
|
|
||||||
p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s)
|
|
||||||
}
|
|
||||||
return string(r)
|
|
||||||
}
|
|
1
vendor/github.com/BurntSushi/toml/session.vim
generated
vendored
1
vendor/github.com/BurntSushi/toml/session.vim
generated
vendored
@ -1 +0,0 @@
|
|||||||
au BufWritePost *.go silent!make tags > /dev/null 2>&1
|
|
85
vendor/github.com/BurntSushi/toml/type_check.go
generated
vendored
85
vendor/github.com/BurntSushi/toml/type_check.go
generated
vendored
@ -1,85 +0,0 @@
|
|||||||
package toml
|
|
||||||
|
|
||||||
// tomlType represents any Go type that corresponds to a TOML type.
|
|
||||||
// While the first draft of the TOML spec has a simplistic type system that
|
|
||||||
// probably doesn't need this level of sophistication, we seem to be militating
|
|
||||||
// toward adding real composite types.
|
|
||||||
type tomlType interface {
|
|
||||||
typeString() string
|
|
||||||
}
|
|
||||||
|
|
||||||
// typeEqual accepts any two types and returns true if they are equal.
|
|
||||||
func typeEqual(t1, t2 tomlType) bool {
|
|
||||||
if t1 == nil || t2 == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return t1.typeString() == t2.typeString()
|
|
||||||
}
|
|
||||||
|
|
||||||
func typeIsHash(t tomlType) bool {
|
|
||||||
return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
|
|
||||||
}
|
|
||||||
|
|
||||||
type tomlBaseType string
|
|
||||||
|
|
||||||
func (btype tomlBaseType) typeString() string {
|
|
||||||
return string(btype)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (btype tomlBaseType) String() string {
|
|
||||||
return btype.typeString()
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
tomlInteger tomlBaseType = "Integer"
|
|
||||||
tomlFloat tomlBaseType = "Float"
|
|
||||||
tomlDatetime tomlBaseType = "Datetime"
|
|
||||||
tomlString tomlBaseType = "String"
|
|
||||||
tomlBool tomlBaseType = "Bool"
|
|
||||||
tomlArray tomlBaseType = "Array"
|
|
||||||
tomlHash tomlBaseType = "Hash"
|
|
||||||
tomlArrayHash tomlBaseType = "ArrayHash"
|
|
||||||
)
|
|
||||||
|
|
||||||
// typeOfPrimitive returns a tomlType of any primitive value in TOML.
|
|
||||||
// Primitive values are: Integer, Float, Datetime, String and Bool.
|
|
||||||
//
|
|
||||||
// Passing a lexer item other than the following will cause a BUG message
|
|
||||||
// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime.
|
|
||||||
func (p *parser) typeOfPrimitive(lexItem item) tomlType {
|
|
||||||
switch lexItem.typ {
|
|
||||||
case itemInteger:
|
|
||||||
return tomlInteger
|
|
||||||
case itemFloat:
|
|
||||||
return tomlFloat
|
|
||||||
case itemDatetime:
|
|
||||||
return tomlDatetime
|
|
||||||
case itemString:
|
|
||||||
return tomlString
|
|
||||||
case itemBool:
|
|
||||||
return tomlBool
|
|
||||||
}
|
|
||||||
p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
|
|
||||||
panic("unreachable")
|
|
||||||
}
|
|
||||||
|
|
||||||
// typeOfArray returns a tomlType for an array given a list of types of its
|
|
||||||
// values.
|
|
||||||
//
|
|
||||||
// In the current spec, if an array is homogeneous, then its type is always
|
|
||||||
// "Array". If the array is not homogeneous, an error is generated.
|
|
||||||
func (p *parser) typeOfArray(types []tomlType) tomlType {
|
|
||||||
// Empty arrays are cool.
|
|
||||||
if len(types) == 0 {
|
|
||||||
return tomlArray
|
|
||||||
}
|
|
||||||
|
|
||||||
theType := types[0]
|
|
||||||
for _, t := range types[1:] {
|
|
||||||
if !typeEqual(theType, t) {
|
|
||||||
p.panicf("Array contains values of type '%s' and '%s', but arrays "+
|
|
||||||
"must be homogeneous.", theType, t)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return tomlArray
|
|
||||||
}
|
|
241
vendor/github.com/BurntSushi/toml/type_fields.go
generated
vendored
241
vendor/github.com/BurntSushi/toml/type_fields.go
generated
vendored
@ -1,241 +0,0 @@
|
|||||||
package toml
|
|
||||||
|
|
||||||
// Struct field handling is adapted from code in encoding/json:
|
|
||||||
//
|
|
||||||
// Copyright 2010 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the Go distribution.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
"sort"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A field represents a single field found in a struct.
|
|
||||||
type field struct {
|
|
||||||
name string // the name of the field (`toml` tag included)
|
|
||||||
tag bool // whether field has a `toml` tag
|
|
||||||
index []int // represents the depth of an anonymous field
|
|
||||||
typ reflect.Type // the type of the field
|
|
||||||
}
|
|
||||||
|
|
||||||
// byName sorts field by name, breaking ties with depth,
|
|
||||||
// then breaking ties with "name came from toml tag", then
|
|
||||||
// breaking ties with index sequence.
|
|
||||||
type byName []field
|
|
||||||
|
|
||||||
func (x byName) Len() int { return len(x) }
|
|
||||||
|
|
||||||
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
|
||||||
|
|
||||||
func (x byName) Less(i, j int) bool {
|
|
||||||
if x[i].name != x[j].name {
|
|
||||||
return x[i].name < x[j].name
|
|
||||||
}
|
|
||||||
if len(x[i].index) != len(x[j].index) {
|
|
||||||
return len(x[i].index) < len(x[j].index)
|
|
||||||
}
|
|
||||||
if x[i].tag != x[j].tag {
|
|
||||||
return x[i].tag
|
|
||||||
}
|
|
||||||
return byIndex(x).Less(i, j)
|
|
||||||
}
|
|
||||||
|
|
||||||
// byIndex sorts field by index sequence.
|
|
||||||
type byIndex []field
|
|
||||||
|
|
||||||
func (x byIndex) Len() int { return len(x) }
|
|
||||||
|
|
||||||
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
|
||||||
|
|
||||||
func (x byIndex) Less(i, j int) bool {
|
|
||||||
for k, xik := range x[i].index {
|
|
||||||
if k >= len(x[j].index) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if xik != x[j].index[k] {
|
|
||||||
return xik < x[j].index[k]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return len(x[i].index) < len(x[j].index)
|
|
||||||
}
|
|
||||||
|
|
||||||
// typeFields returns a list of fields that TOML should recognize for the given
|
|
||||||
// type. The algorithm is breadth-first search over the set of structs to
|
|
||||||
// include - the top struct and then any reachable anonymous structs.
|
|
||||||
func typeFields(t reflect.Type) []field {
|
|
||||||
// Anonymous fields to explore at the current level and the next.
|
|
||||||
current := []field{}
|
|
||||||
next := []field{{typ: t}}
|
|
||||||
|
|
||||||
// Count of queued names for current level and the next.
|
|
||||||
count := map[reflect.Type]int{}
|
|
||||||
nextCount := map[reflect.Type]int{}
|
|
||||||
|
|
||||||
// Types already visited at an earlier level.
|
|
||||||
visited := map[reflect.Type]bool{}
|
|
||||||
|
|
||||||
// Fields found.
|
|
||||||
var fields []field
|
|
||||||
|
|
||||||
for len(next) > 0 {
|
|
||||||
current, next = next, current[:0]
|
|
||||||
count, nextCount = nextCount, map[reflect.Type]int{}
|
|
||||||
|
|
||||||
for _, f := range current {
|
|
||||||
if visited[f.typ] {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
visited[f.typ] = true
|
|
||||||
|
|
||||||
// Scan f.typ for fields to include.
|
|
||||||
for i := 0; i < f.typ.NumField(); i++ {
|
|
||||||
sf := f.typ.Field(i)
|
|
||||||
if sf.PkgPath != "" { // unexported
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
name := sf.Tag.Get("toml")
|
|
||||||
if name == "-" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
index := make([]int, len(f.index)+1)
|
|
||||||
copy(index, f.index)
|
|
||||||
index[len(f.index)] = i
|
|
||||||
|
|
||||||
ft := sf.Type
|
|
||||||
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
|
|
||||||
// Follow pointer.
|
|
||||||
ft = ft.Elem()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Record found field and index sequence.
|
|
||||||
if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
|
|
||||||
tagged := name != ""
|
|
||||||
if name == "" {
|
|
||||||
name = sf.Name
|
|
||||||
}
|
|
||||||
fields = append(fields, field{name, tagged, index, ft})
|
|
||||||
if count[f.typ] > 1 {
|
|
||||||
// If there were multiple instances, add a second,
|
|
||||||
// so that the annihilation code will see a duplicate.
|
|
||||||
// It only cares about the distinction between 1 or 2,
|
|
||||||
// so don't bother generating any more copies.
|
|
||||||
fields = append(fields, fields[len(fields)-1])
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Record new anonymous struct to explore in next round.
|
|
||||||
nextCount[ft]++
|
|
||||||
if nextCount[ft] == 1 {
|
|
||||||
f := field{name: ft.Name(), index: index, typ: ft}
|
|
||||||
next = append(next, f)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Sort(byName(fields))
|
|
||||||
|
|
||||||
// Delete all fields that are hidden by the Go rules for embedded fields,
|
|
||||||
// except that fields with TOML tags are promoted.
|
|
||||||
|
|
||||||
// The fields are sorted in primary order of name, secondary order
|
|
||||||
// of field index length. Loop over names; for each name, delete
|
|
||||||
// hidden fields by choosing the one dominant field that survives.
|
|
||||||
out := fields[:0]
|
|
||||||
for advance, i := 0, 0; i < len(fields); i += advance {
|
|
||||||
// One iteration per name.
|
|
||||||
// Find the sequence of fields with the name of this first field.
|
|
||||||
fi := fields[i]
|
|
||||||
name := fi.name
|
|
||||||
for advance = 1; i+advance < len(fields); advance++ {
|
|
||||||
fj := fields[i+advance]
|
|
||||||
if fj.name != name {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if advance == 1 { // Only one field with this name
|
|
||||||
out = append(out, fi)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
dominant, ok := dominantField(fields[i : i+advance])
|
|
||||||
if ok {
|
|
||||||
out = append(out, dominant)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fields = out
|
|
||||||
sort.Sort(byIndex(fields))
|
|
||||||
|
|
||||||
return fields
|
|
||||||
}
|
|
||||||
|
|
||||||
// dominantField looks through the fields, all of which are known to
|
|
||||||
// have the same name, to find the single field that dominates the
|
|
||||||
// others using Go's embedding rules, modified by the presence of
|
|
||||||
// TOML tags. If there are multiple top-level fields, the boolean
|
|
||||||
// will be false: This condition is an error in Go and we skip all
|
|
||||||
// the fields.
|
|
||||||
func dominantField(fields []field) (field, bool) {
|
|
||||||
// The fields are sorted in increasing index-length order. The winner
|
|
||||||
// must therefore be one with the shortest index length. Drop all
|
|
||||||
// longer entries, which is easy: just truncate the slice.
|
|
||||||
length := len(fields[0].index)
|
|
||||||
tagged := -1 // Index of first tagged field.
|
|
||||||
for i, f := range fields {
|
|
||||||
if len(f.index) > length {
|
|
||||||
fields = fields[:i]
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if f.tag {
|
|
||||||
if tagged >= 0 {
|
|
||||||
// Multiple tagged fields at the same level: conflict.
|
|
||||||
// Return no field.
|
|
||||||
return field{}, false
|
|
||||||
}
|
|
||||||
tagged = i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if tagged >= 0 {
|
|
||||||
return fields[tagged], true
|
|
||||||
}
|
|
||||||
// All remaining fields have the same length. If there's more than one,
|
|
||||||
// we have a conflict (two fields named "X" at the same level) and we
|
|
||||||
// return no field.
|
|
||||||
if len(fields) > 1 {
|
|
||||||
return field{}, false
|
|
||||||
}
|
|
||||||
return fields[0], true
|
|
||||||
}
|
|
||||||
|
|
||||||
var fieldCache struct {
|
|
||||||
sync.RWMutex
|
|
||||||
m map[reflect.Type][]field
|
|
||||||
}
|
|
||||||
|
|
||||||
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
|
|
||||||
func cachedTypeFields(t reflect.Type) []field {
|
|
||||||
fieldCache.RLock()
|
|
||||||
f := fieldCache.m[t]
|
|
||||||
fieldCache.RUnlock()
|
|
||||||
if f != nil {
|
|
||||||
return f
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compute fields without lock.
|
|
||||||
// Might duplicate effort but won't hold other computations back.
|
|
||||||
f = typeFields(t)
|
|
||||||
if f == nil {
|
|
||||||
f = []field{}
|
|
||||||
}
|
|
||||||
|
|
||||||
fieldCache.Lock()
|
|
||||||
if fieldCache.m == nil {
|
|
||||||
fieldCache.m = map[reflect.Type][]field{}
|
|
||||||
}
|
|
||||||
fieldCache.m[t] = f
|
|
||||||
fieldCache.Unlock()
|
|
||||||
return f
|
|
||||||
}
|
|
6
vendor/github.com/go-ini/ini/.gitignore
generated
vendored
Normal file
6
vendor/github.com/go-ini/ini/.gitignore
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
testdata/conf_out.ini
|
||||||
|
ini.sublime-project
|
||||||
|
ini.sublime-workspace
|
||||||
|
testdata/conf_reflect.ini
|
||||||
|
.idea
|
||||||
|
/.vscode
|
191
vendor/github.com/go-ini/ini/LICENSE
generated
vendored
Normal file
191
vendor/github.com/go-ini/ini/LICENSE
generated
vendored
Normal file
@ -0,0 +1,191 @@
|
|||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction, and
|
||||||
|
distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by the copyright
|
||||||
|
owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all other entities
|
||||||
|
that control, are controlled by, or are under common control with that entity.
|
||||||
|
For the purposes of this definition, "control" means (i) the power, direct or
|
||||||
|
indirect, to cause the direction or management of such entity, whether by
|
||||||
|
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity exercising
|
||||||
|
permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications, including
|
||||||
|
but not limited to software source code, documentation source, and configuration
|
||||||
|
files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical transformation or
|
||||||
|
translation of a Source form, including but not limited to compiled object code,
|
||||||
|
generated documentation, and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or Object form, made
|
||||||
|
available under the License, as indicated by a copyright notice that is included
|
||||||
|
in or attached to the work (an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object form, that
|
||||||
|
is based on (or derived from) the Work and for which the editorial revisions,
|
||||||
|
annotations, elaborations, or other modifications represent, as a whole, an
|
||||||
|
original work of authorship. For the purposes of this License, Derivative Works
|
||||||
|
shall not include works that remain separable from, or merely link (or bind by
|
||||||
|
name) to the interfaces of, the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including the original version
|
||||||
|
of the Work and any modifications or additions to that Work or Derivative Works
|
||||||
|
thereof, that is intentionally submitted to Licensor for inclusion in the Work
|
||||||
|
by the copyright owner or by an individual or Legal Entity authorized to submit
|
||||||
|
on behalf of the copyright owner. For the purposes of this definition,
|
||||||
|
"submitted" means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems, and
|
||||||
|
issue tracking systems that are managed by, or on behalf of, the Licensor for
|
||||||
|
the purpose of discussing and improving the Work, but excluding communication
|
||||||
|
that is conspicuously marked or otherwise designated in writing by the copyright
|
||||||
|
owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
|
||||||
|
of whom a Contribution has been received by Licensor and subsequently
|
||||||
|
incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License.
|
||||||
|
|
||||||
|
Subject to the terms and conditions of this License, each Contributor hereby
|
||||||
|
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||||
|
irrevocable copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the Work and such
|
||||||
|
Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License.
|
||||||
|
|
||||||
|
Subject to the terms and conditions of this License, each Contributor hereby
|
||||||
|
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||||
|
irrevocable (except as stated in this section) patent license to make, have
|
||||||
|
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
|
||||||
|
such license applies only to those patent claims licensable by such Contributor
|
||||||
|
that are necessarily infringed by their Contribution(s) alone or by combination
|
||||||
|
of their Contribution(s) with the Work to which such Contribution(s) was
|
||||||
|
submitted. If You institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
|
||||||
|
Contribution incorporated within the Work constitutes direct or contributory
|
||||||
|
patent infringement, then any patent licenses granted to You under this License
|
||||||
|
for that Work shall terminate as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution.
|
||||||
|
|
||||||
|
You may reproduce and distribute copies of the Work or Derivative Works thereof
|
||||||
|
in any medium, with or without modifications, and in Source or Object form,
|
||||||
|
provided that You meet the following conditions:
|
||||||
|
|
||||||
|
You must give any other recipients of the Work or Derivative Works a copy of
|
||||||
|
this License; and
|
||||||
|
You must cause any modified files to carry prominent notices stating that You
|
||||||
|
changed the files; and
|
||||||
|
You must retain, in the Source form of any Derivative Works that You distribute,
|
||||||
|
all copyright, patent, trademark, and attribution notices from the Source form
|
||||||
|
of the Work, excluding those notices that do not pertain to any part of the
|
||||||
|
Derivative Works; and
|
||||||
|
If the Work includes a "NOTICE" text file as part of its distribution, then any
|
||||||
|
Derivative Works that You distribute must include a readable copy of the
|
||||||
|
attribution notices contained within such NOTICE file, excluding those notices
|
||||||
|
that do not pertain to any part of the Derivative Works, in at least one of the
|
||||||
|
following places: within a NOTICE text file distributed as part of the
|
||||||
|
Derivative Works; within the Source form or documentation, if provided along
|
||||||
|
with the Derivative Works; or, within a display generated by the Derivative
|
||||||
|
Works, if and wherever such third-party notices normally appear. The contents of
|
||||||
|
the NOTICE file are for informational purposes only and do not modify the
|
||||||
|
License. You may add Your own attribution notices within Derivative Works that
|
||||||
|
You distribute, alongside or as an addendum to the NOTICE text from the Work,
|
||||||
|
provided that such additional attribution notices cannot be construed as
|
||||||
|
modifying the License.
|
||||||
|
You may add Your own copyright statement to Your modifications and may provide
|
||||||
|
additional or different license terms and conditions for use, reproduction, or
|
||||||
|
distribution of Your modifications, or for any such Derivative Works as a whole,
|
||||||
|
provided Your use, reproduction, and distribution of the Work otherwise complies
|
||||||
|
with the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions.
|
||||||
|
|
||||||
|
Unless You explicitly state otherwise, any Contribution intentionally submitted
|
||||||
|
for inclusion in the Work by You to the Licensor shall be under the terms and
|
||||||
|
conditions of this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify the terms of
|
||||||
|
any separate license agreement you may have executed with Licensor regarding
|
||||||
|
such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks.
|
||||||
|
|
||||||
|
This License does not grant permission to use the trade names, trademarks,
|
||||||
|
service marks, or product names of the Licensor, except as required for
|
||||||
|
reasonable and customary use in describing the origin of the Work and
|
||||||
|
reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty.
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, Licensor provides the
|
||||||
|
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
|
||||||
|
including, without limitation, any warranties or conditions of TITLE,
|
||||||
|
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
|
||||||
|
solely responsible for determining the appropriateness of using or
|
||||||
|
redistributing the Work and assume any risks associated with Your exercise of
|
||||||
|
permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability.
|
||||||
|
|
||||||
|
In no event and under no legal theory, whether in tort (including negligence),
|
||||||
|
contract, or otherwise, unless required by applicable law (such as deliberate
|
||||||
|
and grossly negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special, incidental,
|
||||||
|
or consequential damages of any character arising as a result of this License or
|
||||||
|
out of the use or inability to use the Work (including but not limited to
|
||||||
|
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
|
||||||
|
any and all other commercial damages or losses), even if such Contributor has
|
||||||
|
been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability.
|
||||||
|
|
||||||
|
While redistributing the Work or Derivative Works thereof, You may choose to
|
||||||
|
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
|
||||||
|
other liability obligations and/or rights consistent with this License. However,
|
||||||
|
in accepting such obligations, You may act only on Your own behalf and on Your
|
||||||
|
sole responsibility, not on behalf of any other Contributor, and only if You
|
||||||
|
agree to indemnify, defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason of your
|
||||||
|
accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following boilerplate
|
||||||
|
notice, with the fields enclosed by brackets "[]" replaced with your own
|
||||||
|
identifying information. (Don't include the brackets!) The text should be
|
||||||
|
enclosed in the appropriate comment syntax for the file format. We also
|
||||||
|
recommend that a file or class name and description of purpose be included on
|
||||||
|
the same "printed page" as the copyright notice for easier identification within
|
||||||
|
third-party archives.
|
||||||
|
|
||||||
|
Copyright 2014 Unknwon
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
15
vendor/github.com/go-ini/ini/Makefile
generated
vendored
Normal file
15
vendor/github.com/go-ini/ini/Makefile
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
.PHONY: build test bench vet coverage
|
||||||
|
|
||||||
|
build: vet bench
|
||||||
|
|
||||||
|
test:
|
||||||
|
go test -v -cover -race
|
||||||
|
|
||||||
|
bench:
|
||||||
|
go test -v -cover -test.bench=. -test.benchmem
|
||||||
|
|
||||||
|
vet:
|
||||||
|
go vet
|
||||||
|
|
||||||
|
coverage:
|
||||||
|
go test -coverprofile=c.out && go tool cover -html=c.out && rm c.out
|
43
vendor/github.com/go-ini/ini/README.md
generated
vendored
Normal file
43
vendor/github.com/go-ini/ini/README.md
generated
vendored
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
# INI
|
||||||
|
|
||||||
|
[![GitHub Workflow Status](https://img.shields.io/github/workflow/status/go-ini/ini/Go?logo=github&style=for-the-badge)](https://github.com/go-ini/ini/actions?query=workflow%3AGo)
|
||||||
|
[![codecov](https://img.shields.io/codecov/c/github/go-ini/ini/master?logo=codecov&style=for-the-badge)](https://codecov.io/gh/go-ini/ini)
|
||||||
|
[![GoDoc](https://img.shields.io/badge/GoDoc-Reference-blue?style=for-the-badge&logo=go)](https://pkg.go.dev/github.com/go-ini/ini?tab=doc)
|
||||||
|
[![Sourcegraph](https://img.shields.io/badge/view%20on-Sourcegraph-brightgreen.svg?style=for-the-badge&logo=sourcegraph)](https://sourcegraph.com/github.com/go-ini/ini)
|
||||||
|
|
||||||
|
![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200)
|
||||||
|
|
||||||
|
Package ini provides INI file read and write functionality in Go.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- Load from multiple data sources(file, `[]byte`, `io.Reader` and `io.ReadCloser`) with overwrites.
|
||||||
|
- Read with recursion values.
|
||||||
|
- Read with parent-child sections.
|
||||||
|
- Read with auto-increment key names.
|
||||||
|
- Read with multiple-line values.
|
||||||
|
- Read with tons of helper methods.
|
||||||
|
- Read and convert values to Go types.
|
||||||
|
- Read and **WRITE** comments of sections and keys.
|
||||||
|
- Manipulate sections, keys and comments with ease.
|
||||||
|
- Keep sections and keys in order as you parse and save.
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
The minimum requirement of Go is **1.6**.
|
||||||
|
|
||||||
|
```sh
|
||||||
|
$ go get gopkg.in/ini.v1
|
||||||
|
```
|
||||||
|
|
||||||
|
Please add `-u` flag to update in the future.
|
||||||
|
|
||||||
|
## Getting Help
|
||||||
|
|
||||||
|
- [Getting Started](https://ini.unknwon.io/docs/intro/getting_started)
|
||||||
|
- [API Documentation](https://gowalker.org/gopkg.in/ini.v1)
|
||||||
|
- 中国大陆镜像:https://ini.unknwon.cn
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text.
|
9
vendor/github.com/go-ini/ini/codecov.yml
generated
vendored
Normal file
9
vendor/github.com/go-ini/ini/codecov.yml
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
coverage:
|
||||||
|
range: "60...95"
|
||||||
|
status:
|
||||||
|
project:
|
||||||
|
default:
|
||||||
|
threshold: 1%
|
||||||
|
|
||||||
|
comment:
|
||||||
|
layout: 'diff, files'
|
76
vendor/github.com/go-ini/ini/data_source.go
generated
vendored
Normal file
76
vendor/github.com/go-ini/ini/data_source.go
generated
vendored
Normal file
@ -0,0 +1,76 @@
|
|||||||
|
// Copyright 2019 Unknwon
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||||
|
// not use this file except in compliance with the License. You may obtain
|
||||||
|
// a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
// License for the specific language governing permissions and limitations
|
||||||
|
// under the License.
|
||||||
|
|
||||||
|
package ini
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
_ dataSource = (*sourceFile)(nil)
|
||||||
|
_ dataSource = (*sourceData)(nil)
|
||||||
|
_ dataSource = (*sourceReadCloser)(nil)
|
||||||
|
)
|
||||||
|
|
||||||
|
// dataSource is an interface that returns object which can be read and closed.
|
||||||
|
type dataSource interface {
|
||||||
|
ReadCloser() (io.ReadCloser, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// sourceFile represents an object that contains content on the local file system.
|
||||||
|
type sourceFile struct {
|
||||||
|
name string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) {
|
||||||
|
return os.Open(s.name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// sourceData represents an object that contains content in memory.
|
||||||
|
type sourceData struct {
|
||||||
|
data []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *sourceData) ReadCloser() (io.ReadCloser, error) {
|
||||||
|
return ioutil.NopCloser(bytes.NewReader(s.data)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// sourceReadCloser represents an input stream with Close method.
|
||||||
|
type sourceReadCloser struct {
|
||||||
|
reader io.ReadCloser
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) {
|
||||||
|
return s.reader, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseDataSource(source interface{}) (dataSource, error) {
|
||||||
|
switch s := source.(type) {
|
||||||
|
case string:
|
||||||
|
return sourceFile{s}, nil
|
||||||
|
case []byte:
|
||||||
|
return &sourceData{s}, nil
|
||||||
|
case io.ReadCloser:
|
||||||
|
return &sourceReadCloser{s}, nil
|
||||||
|
case io.Reader:
|
||||||
|
return &sourceReadCloser{ioutil.NopCloser(s)}, nil
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("error parsing data source: unknown type %q", s)
|
||||||
|
}
|
||||||
|
}
|
25
vendor/github.com/go-ini/ini/deprecated.go
generated
vendored
Normal file
25
vendor/github.com/go-ini/ini/deprecated.go
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
// Copyright 2019 Unknwon
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||||
|
// not use this file except in compliance with the License. You may obtain
|
||||||
|
// a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
// License for the specific language governing permissions and limitations
|
||||||
|
// under the License.
|
||||||
|
|
||||||
|
package ini
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Deprecated: Use "DefaultSection" instead.
|
||||||
|
DEFAULT_SECTION = DefaultSection
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Deprecated: AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE.
|
||||||
|
AllCapsUnderscore = SnackCase
|
||||||
|
)
|
34
vendor/github.com/go-ini/ini/error.go
generated
vendored
Normal file
34
vendor/github.com/go-ini/ini/error.go
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
// Copyright 2016 Unknwon
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||||
|
// not use this file except in compliance with the License. You may obtain
|
||||||
|
// a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
// License for the specific language governing permissions and limitations
|
||||||
|
// under the License.
|
||||||
|
|
||||||
|
package ini
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ErrDelimiterNotFound indicates the error type of no delimiter is found which there should be one.
|
||||||
|
type ErrDelimiterNotFound struct {
|
||||||
|
Line string
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsErrDelimiterNotFound returns true if the given error is an instance of ErrDelimiterNotFound.
|
||||||
|
func IsErrDelimiterNotFound(err error) bool {
|
||||||
|
_, ok := err.(ErrDelimiterNotFound)
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func (err ErrDelimiterNotFound) Error() string {
|
||||||
|
return fmt.Sprintf("key-value delimiter not found: %s", err.Line)
|
||||||
|
}
|
517
vendor/github.com/go-ini/ini/file.go
generated
vendored
Normal file
517
vendor/github.com/go-ini/ini/file.go
generated
vendored
Normal file
@ -0,0 +1,517 @@
|
|||||||
|
// Copyright 2017 Unknwon
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||||
|
// not use this file except in compliance with the License. You may obtain
|
||||||
|
// a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
// License for the specific language governing permissions and limitations
|
||||||
|
// under the License.
|
||||||
|
|
||||||
|
package ini
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// File represents a combination of one or more INI files in memory.
|
||||||
|
type File struct {
|
||||||
|
options LoadOptions
|
||||||
|
dataSources []dataSource
|
||||||
|
|
||||||
|
// Should make things safe, but sometimes doesn't matter.
|
||||||
|
BlockMode bool
|
||||||
|
lock sync.RWMutex
|
||||||
|
|
||||||
|
// To keep data in order.
|
||||||
|
sectionList []string
|
||||||
|
// To keep track of the index of a section with same name.
|
||||||
|
// This meta list is only used with non-unique section names are allowed.
|
||||||
|
sectionIndexes []int
|
||||||
|
|
||||||
|
// Actual data is stored here.
|
||||||
|
sections map[string][]*Section
|
||||||
|
|
||||||
|
NameMapper
|
||||||
|
ValueMapper
|
||||||
|
}
|
||||||
|
|
||||||
|
// newFile initializes File object with given data sources.
|
||||||
|
func newFile(dataSources []dataSource, opts LoadOptions) *File {
|
||||||
|
if len(opts.KeyValueDelimiters) == 0 {
|
||||||
|
opts.KeyValueDelimiters = "=:"
|
||||||
|
}
|
||||||
|
if len(opts.KeyValueDelimiterOnWrite) == 0 {
|
||||||
|
opts.KeyValueDelimiterOnWrite = "="
|
||||||
|
}
|
||||||
|
if len(opts.ChildSectionDelimiter) == 0 {
|
||||||
|
opts.ChildSectionDelimiter = "."
|
||||||
|
}
|
||||||
|
|
||||||
|
return &File{
|
||||||
|
BlockMode: true,
|
||||||
|
dataSources: dataSources,
|
||||||
|
sections: make(map[string][]*Section),
|
||||||
|
options: opts,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Empty returns an empty file object.
|
||||||
|
func Empty(opts ...LoadOptions) *File {
|
||||||
|
var opt LoadOptions
|
||||||
|
if len(opts) > 0 {
|
||||||
|
opt = opts[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ignore error here, we are sure our data is good.
|
||||||
|
f, _ := LoadSources(opt, []byte(""))
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSection creates a new section.
|
||||||
|
func (f *File) NewSection(name string) (*Section, error) {
|
||||||
|
if len(name) == 0 {
|
||||||
|
return nil, errors.New("empty section name")
|
||||||
|
}
|
||||||
|
|
||||||
|
if (f.options.Insensitive || f.options.InsensitiveSections) && name != DefaultSection {
|
||||||
|
name = strings.ToLower(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if f.BlockMode {
|
||||||
|
f.lock.Lock()
|
||||||
|
defer f.lock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
if !f.options.AllowNonUniqueSections && inSlice(name, f.sectionList) {
|
||||||
|
return f.sections[name][0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
f.sectionList = append(f.sectionList, name)
|
||||||
|
|
||||||
|
// NOTE: Append to indexes must happen before appending to sections,
|
||||||
|
// otherwise index will have off-by-one problem.
|
||||||
|
f.sectionIndexes = append(f.sectionIndexes, len(f.sections[name]))
|
||||||
|
|
||||||
|
sec := newSection(f, name)
|
||||||
|
f.sections[name] = append(f.sections[name], sec)
|
||||||
|
|
||||||
|
return sec, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRawSection creates a new section with an unparseable body.
|
||||||
|
func (f *File) NewRawSection(name, body string) (*Section, error) {
|
||||||
|
section, err := f.NewSection(name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
section.isRawSection = true
|
||||||
|
section.rawBody = body
|
||||||
|
return section, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSections creates a list of sections.
|
||||||
|
func (f *File) NewSections(names ...string) (err error) {
|
||||||
|
for _, name := range names {
|
||||||
|
if _, err = f.NewSection(name); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSection returns section by given name.
|
||||||
|
func (f *File) GetSection(name string) (*Section, error) {
|
||||||
|
secs, err := f.SectionsByName(name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return secs[0], err
|
||||||
|
}
|
||||||
|
|
||||||
|
// SectionsByName returns all sections with given name.
|
||||||
|
func (f *File) SectionsByName(name string) ([]*Section, error) {
|
||||||
|
if len(name) == 0 {
|
||||||
|
name = DefaultSection
|
||||||
|
}
|
||||||
|
if f.options.Insensitive || f.options.InsensitiveSections {
|
||||||
|
name = strings.ToLower(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if f.BlockMode {
|
||||||
|
f.lock.RLock()
|
||||||
|
defer f.lock.RUnlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
secs := f.sections[name]
|
||||||
|
if len(secs) == 0 {
|
||||||
|
return nil, fmt.Errorf("section %q does not exist", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
return secs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Section assumes named section exists and returns a zero-value when not.
|
||||||
|
func (f *File) Section(name string) *Section {
|
||||||
|
sec, err := f.GetSection(name)
|
||||||
|
if err != nil {
|
||||||
|
// Note: It's OK here because the only possible error is empty section name,
|
||||||
|
// but if it's empty, this piece of code won't be executed.
|
||||||
|
sec, _ = f.NewSection(name)
|
||||||
|
return sec
|
||||||
|
}
|
||||||
|
return sec
|
||||||
|
}
|
||||||
|
|
||||||
|
// SectionWithIndex assumes named section exists and returns a new section when not.
|
||||||
|
func (f *File) SectionWithIndex(name string, index int) *Section {
|
||||||
|
secs, err := f.SectionsByName(name)
|
||||||
|
if err != nil || len(secs) <= index {
|
||||||
|
// NOTE: It's OK here because the only possible error is empty section name,
|
||||||
|
// but if it's empty, this piece of code won't be executed.
|
||||||
|
newSec, _ := f.NewSection(name)
|
||||||
|
return newSec
|
||||||
|
}
|
||||||
|
|
||||||
|
return secs[index]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sections returns a list of Section stored in the current instance.
|
||||||
|
func (f *File) Sections() []*Section {
|
||||||
|
if f.BlockMode {
|
||||||
|
f.lock.RLock()
|
||||||
|
defer f.lock.RUnlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
sections := make([]*Section, len(f.sectionList))
|
||||||
|
for i, name := range f.sectionList {
|
||||||
|
sections[i] = f.sections[name][f.sectionIndexes[i]]
|
||||||
|
}
|
||||||
|
return sections
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChildSections returns a list of child sections of given section name.
|
||||||
|
func (f *File) ChildSections(name string) []*Section {
|
||||||
|
return f.Section(name).ChildSections()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SectionStrings returns list of section names.
|
||||||
|
func (f *File) SectionStrings() []string {
|
||||||
|
list := make([]string, len(f.sectionList))
|
||||||
|
copy(list, f.sectionList)
|
||||||
|
return list
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteSection deletes a section or all sections with given name.
|
||||||
|
func (f *File) DeleteSection(name string) {
|
||||||
|
secs, err := f.SectionsByName(name)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < len(secs); i++ {
|
||||||
|
// For non-unique sections, it is always needed to remove the first one so
|
||||||
|
// in the next iteration, the subsequent section continue having index 0.
|
||||||
|
// Ignoring the error as index 0 never returns an error.
|
||||||
|
_ = f.DeleteSectionWithIndex(name, 0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteSectionWithIndex deletes a section with given name and index.
|
||||||
|
func (f *File) DeleteSectionWithIndex(name string, index int) error {
|
||||||
|
if !f.options.AllowNonUniqueSections && index != 0 {
|
||||||
|
return fmt.Errorf("delete section with non-zero index is only allowed when non-unique sections is enabled")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(name) == 0 {
|
||||||
|
name = DefaultSection
|
||||||
|
}
|
||||||
|
if f.options.Insensitive || f.options.InsensitiveSections {
|
||||||
|
name = strings.ToLower(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if f.BlockMode {
|
||||||
|
f.lock.Lock()
|
||||||
|
defer f.lock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count occurrences of the sections
|
||||||
|
occurrences := 0
|
||||||
|
|
||||||
|
sectionListCopy := make([]string, len(f.sectionList))
|
||||||
|
copy(sectionListCopy, f.sectionList)
|
||||||
|
|
||||||
|
for i, s := range sectionListCopy {
|
||||||
|
if s != name {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if occurrences == index {
|
||||||
|
if len(f.sections[name]) <= 1 {
|
||||||
|
delete(f.sections, name) // The last one in the map
|
||||||
|
} else {
|
||||||
|
f.sections[name] = append(f.sections[name][:index], f.sections[name][index+1:]...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fix section lists
|
||||||
|
f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...)
|
||||||
|
f.sectionIndexes = append(f.sectionIndexes[:i], f.sectionIndexes[i+1:]...)
|
||||||
|
|
||||||
|
} else if occurrences > index {
|
||||||
|
// Fix the indices of all following sections with this name.
|
||||||
|
f.sectionIndexes[i-1]--
|
||||||
|
}
|
||||||
|
|
||||||
|
occurrences++
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *File) reload(s dataSource) error {
|
||||||
|
r, err := s.ReadCloser()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer r.Close()
|
||||||
|
|
||||||
|
return f.parse(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reload reloads and parses all data sources.
|
||||||
|
func (f *File) Reload() (err error) {
|
||||||
|
for _, s := range f.dataSources {
|
||||||
|
if err = f.reload(s); err != nil {
|
||||||
|
// In loose mode, we create an empty default section for nonexistent files.
|
||||||
|
if os.IsNotExist(err) && f.options.Loose {
|
||||||
|
_ = f.parse(bytes.NewBuffer(nil))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if f.options.ShortCircuit {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Append appends one or more data sources and reloads automatically.
|
||||||
|
func (f *File) Append(source interface{}, others ...interface{}) error {
|
||||||
|
ds, err := parseDataSource(source)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
f.dataSources = append(f.dataSources, ds)
|
||||||
|
for _, s := range others {
|
||||||
|
ds, err = parseDataSource(s)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
f.dataSources = append(f.dataSources, ds)
|
||||||
|
}
|
||||||
|
return f.Reload()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) {
|
||||||
|
equalSign := DefaultFormatLeft + f.options.KeyValueDelimiterOnWrite + DefaultFormatRight
|
||||||
|
|
||||||
|
if PrettyFormat || PrettyEqual {
|
||||||
|
equalSign = fmt.Sprintf(" %s ", f.options.KeyValueDelimiterOnWrite)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use buffer to make sure target is safe until finish encoding.
|
||||||
|
buf := bytes.NewBuffer(nil)
|
||||||
|
for i, sname := range f.sectionList {
|
||||||
|
sec := f.SectionWithIndex(sname, f.sectionIndexes[i])
|
||||||
|
if len(sec.Comment) > 0 {
|
||||||
|
// Support multiline comments
|
||||||
|
lines := strings.Split(sec.Comment, LineBreak)
|
||||||
|
for i := range lines {
|
||||||
|
if lines[i][0] != '#' && lines[i][0] != ';' {
|
||||||
|
lines[i] = "; " + lines[i]
|
||||||
|
} else {
|
||||||
|
lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:])
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := buf.WriteString(lines[i] + LineBreak); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if i > 0 || DefaultHeader || (i == 0 && strings.ToUpper(sec.name) != DefaultSection) {
|
||||||
|
if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Write nothing if default section is empty
|
||||||
|
if len(sec.keyList) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if sec.isRawSection {
|
||||||
|
if _, err := buf.WriteString(sec.rawBody); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if PrettySection {
|
||||||
|
// Put a line between sections
|
||||||
|
if _, err := buf.WriteString(LineBreak); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count and generate alignment length and buffer spaces using the
|
||||||
|
// longest key. Keys may be modified if they contain certain characters so
|
||||||
|
// we need to take that into account in our calculation.
|
||||||
|
alignLength := 0
|
||||||
|
if PrettyFormat {
|
||||||
|
for _, kname := range sec.keyList {
|
||||||
|
keyLength := len(kname)
|
||||||
|
// First case will surround key by ` and second by """
|
||||||
|
if strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters) {
|
||||||
|
keyLength += 2
|
||||||
|
} else if strings.Contains(kname, "`") {
|
||||||
|
keyLength += 6
|
||||||
|
}
|
||||||
|
|
||||||
|
if keyLength > alignLength {
|
||||||
|
alignLength = keyLength
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
alignSpaces := bytes.Repeat([]byte(" "), alignLength)
|
||||||
|
|
||||||
|
KeyList:
|
||||||
|
for _, kname := range sec.keyList {
|
||||||
|
key := sec.Key(kname)
|
||||||
|
if len(key.Comment) > 0 {
|
||||||
|
if len(indent) > 0 && sname != DefaultSection {
|
||||||
|
buf.WriteString(indent)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Support multiline comments
|
||||||
|
lines := strings.Split(key.Comment, LineBreak)
|
||||||
|
for i := range lines {
|
||||||
|
if lines[i][0] != '#' && lines[i][0] != ';' {
|
||||||
|
lines[i] = "; " + strings.TrimSpace(lines[i])
|
||||||
|
} else {
|
||||||
|
lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:])
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := buf.WriteString(lines[i] + LineBreak); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(indent) > 0 && sname != DefaultSection {
|
||||||
|
buf.WriteString(indent)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case key.isAutoIncrement:
|
||||||
|
kname = "-"
|
||||||
|
case strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters):
|
||||||
|
kname = "`" + kname + "`"
|
||||||
|
case strings.Contains(kname, "`"):
|
||||||
|
kname = `"""` + kname + `"""`
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, val := range key.ValueWithShadows() {
|
||||||
|
if _, err := buf.WriteString(kname); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if key.isBooleanType {
|
||||||
|
if kname != sec.keyList[len(sec.keyList)-1] {
|
||||||
|
buf.WriteString(LineBreak)
|
||||||
|
}
|
||||||
|
continue KeyList
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write out alignment spaces before "=" sign
|
||||||
|
if PrettyFormat {
|
||||||
|
buf.Write(alignSpaces[:alignLength-len(kname)])
|
||||||
|
}
|
||||||
|
|
||||||
|
// In case key value contains "\n", "`", "\"", "#" or ";"
|
||||||
|
if strings.ContainsAny(val, "\n`") {
|
||||||
|
val = `"""` + val + `"""`
|
||||||
|
} else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") {
|
||||||
|
val = "`" + val + "`"
|
||||||
|
} else if len(strings.TrimSpace(val)) != len(val) {
|
||||||
|
val = `"` + val + `"`
|
||||||
|
}
|
||||||
|
if _, err := buf.WriteString(equalSign + val + LineBreak); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, val := range key.nestedValues {
|
||||||
|
if _, err := buf.WriteString(indent + " " + val + LineBreak); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if PrettySection {
|
||||||
|
// Put a line between sections
|
||||||
|
if _, err := buf.WriteString(LineBreak); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteToIndent writes content into io.Writer with given indention.
|
||||||
|
// If PrettyFormat has been set to be true,
|
||||||
|
// it will align "=" sign with spaces under each section.
|
||||||
|
func (f *File) WriteToIndent(w io.Writer, indent string) (int64, error) {
|
||||||
|
buf, err := f.writeToBuffer(indent)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return buf.WriteTo(w)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteTo writes file content into io.Writer.
|
||||||
|
func (f *File) WriteTo(w io.Writer) (int64, error) {
|
||||||
|
return f.WriteToIndent(w, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveToIndent writes content to file system with given value indention.
|
||||||
|
func (f *File) SaveToIndent(filename, indent string) error {
|
||||||
|
// Note: Because we are truncating with os.Create,
|
||||||
|
// so it's safer to save to a temporary file location and rename after done.
|
||||||
|
buf, err := f.writeToBuffer(indent)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ioutil.WriteFile(filename, buf.Bytes(), 0666)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveTo writes content to file system.
|
||||||
|
func (f *File) SaveTo(filename string) error {
|
||||||
|
return f.SaveToIndent(filename, "")
|
||||||
|
}
|
24
vendor/github.com/go-ini/ini/helper.go
generated
vendored
Normal file
24
vendor/github.com/go-ini/ini/helper.go
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
// Copyright 2019 Unknwon
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||||
|
// not use this file except in compliance with the License. You may obtain
|
||||||
|
// a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
// License for the specific language governing permissions and limitations
|
||||||
|
// under the License.
|
||||||
|
|
||||||
|
package ini
|
||||||
|
|
||||||
|
func inSlice(str string, s []string) bool {
|
||||||
|
for _, v := range s {
|
||||||
|
if str == v {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
176
vendor/github.com/go-ini/ini/ini.go
generated
vendored
Normal file
176
vendor/github.com/go-ini/ini/ini.go
generated
vendored
Normal file
@ -0,0 +1,176 @@
|
|||||||
|
// +build go1.6
|
||||||
|
|
||||||
|
// Copyright 2014 Unknwon
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||||
|
// not use this file except in compliance with the License. You may obtain
|
||||||
|
// a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
// License for the specific language governing permissions and limitations
|
||||||
|
// under the License.
|
||||||
|
|
||||||
|
// Package ini provides INI file read and write functionality in Go.
|
||||||
|
package ini
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"regexp"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// DefaultSection is the name of default section. You can use this constant or the string literal.
|
||||||
|
// In most of cases, an empty string is all you need to access the section.
|
||||||
|
DefaultSection = "DEFAULT"
|
||||||
|
|
||||||
|
// Maximum allowed depth when recursively substituing variable names.
|
||||||
|
depthValues = 99
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// LineBreak is the delimiter to determine or compose a new line.
|
||||||
|
// This variable will be changed to "\r\n" automatically on Windows at package init time.
|
||||||
|
LineBreak = "\n"
|
||||||
|
|
||||||
|
// Variable regexp pattern: %(variable)s
|
||||||
|
varPattern = regexp.MustCompile(`%\(([^)]+)\)s`)
|
||||||
|
|
||||||
|
// DefaultHeader explicitly writes default section header.
|
||||||
|
DefaultHeader = false
|
||||||
|
|
||||||
|
// PrettySection indicates whether to put a line between sections.
|
||||||
|
PrettySection = true
|
||||||
|
// PrettyFormat indicates whether to align "=" sign with spaces to produce pretty output
|
||||||
|
// or reduce all possible spaces for compact format.
|
||||||
|
PrettyFormat = true
|
||||||
|
// PrettyEqual places spaces around "=" sign even when PrettyFormat is false.
|
||||||
|
PrettyEqual = false
|
||||||
|
// DefaultFormatLeft places custom spaces on the left when PrettyFormat and PrettyEqual are both disabled.
|
||||||
|
DefaultFormatLeft = ""
|
||||||
|
// DefaultFormatRight places custom spaces on the right when PrettyFormat and PrettyEqual are both disabled.
|
||||||
|
DefaultFormatRight = ""
|
||||||
|
)
|
||||||
|
|
||||||
|
var inTest = len(os.Args) > 0 && strings.HasSuffix(strings.TrimSuffix(os.Args[0], ".exe"), ".test")
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
if runtime.GOOS == "windows" && !inTest {
|
||||||
|
LineBreak = "\r\n"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadOptions contains all customized options used for load data source(s).
|
||||||
|
type LoadOptions struct {
|
||||||
|
// Loose indicates whether the parser should ignore nonexistent files or return error.
|
||||||
|
Loose bool
|
||||||
|
// Insensitive indicates whether the parser forces all section and key names to lowercase.
|
||||||
|
Insensitive bool
|
||||||
|
// InsensitiveSections indicates whether the parser forces all section to lowercase.
|
||||||
|
InsensitiveSections bool
|
||||||
|
// InsensitiveKeys indicates whether the parser forces all key names to lowercase.
|
||||||
|
InsensitiveKeys bool
|
||||||
|
// IgnoreContinuation indicates whether to ignore continuation lines while parsing.
|
||||||
|
IgnoreContinuation bool
|
||||||
|
// IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value.
|
||||||
|
IgnoreInlineComment bool
|
||||||
|
// SkipUnrecognizableLines indicates whether to skip unrecognizable lines that do not conform to key/value pairs.
|
||||||
|
SkipUnrecognizableLines bool
|
||||||
|
// ShortCircuit indicates whether to ignore other configuration sources after loaded the first available configuration source.
|
||||||
|
ShortCircuit bool
|
||||||
|
// AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing.
|
||||||
|
// This type of keys are mostly used in my.cnf.
|
||||||
|
AllowBooleanKeys bool
|
||||||
|
// AllowShadows indicates whether to keep track of keys with same name under same section.
|
||||||
|
AllowShadows bool
|
||||||
|
// AllowNestedValues indicates whether to allow AWS-like nested values.
|
||||||
|
// Docs: http://docs.aws.amazon.com/cli/latest/topic/config-vars.html#nested-values
|
||||||
|
AllowNestedValues bool
|
||||||
|
// AllowPythonMultilineValues indicates whether to allow Python-like multi-line values.
|
||||||
|
// Docs: https://docs.python.org/3/library/configparser.html#supported-ini-file-structure
|
||||||
|
// Relevant quote: Values can also span multiple lines, as long as they are indented deeper
|
||||||
|
// than the first line of the value.
|
||||||
|
AllowPythonMultilineValues bool
|
||||||
|
// SpaceBeforeInlineComment indicates whether to allow comment symbols (\# and \;) inside value.
|
||||||
|
// Docs: https://docs.python.org/2/library/configparser.html
|
||||||
|
// Quote: Comments may appear on their own in an otherwise empty line, or may be entered in lines holding values or section names.
|
||||||
|
// In the latter case, they need to be preceded by a whitespace character to be recognized as a comment.
|
||||||
|
SpaceBeforeInlineComment bool
|
||||||
|
// UnescapeValueDoubleQuotes indicates whether to unescape double quotes inside value to regular format
|
||||||
|
// when value is surrounded by double quotes, e.g. key="a \"value\"" => key=a "value"
|
||||||
|
UnescapeValueDoubleQuotes bool
|
||||||
|
// UnescapeValueCommentSymbols indicates to unescape comment symbols (\# and \;) inside value to regular format
|
||||||
|
// when value is NOT surrounded by any quotes.
|
||||||
|
// Note: UNSTABLE, behavior might change to only unescape inside double quotes but may noy necessary at all.
|
||||||
|
UnescapeValueCommentSymbols bool
|
||||||
|
// UnparseableSections stores a list of blocks that are allowed with raw content which do not otherwise
|
||||||
|
// conform to key/value pairs. Specify the names of those blocks here.
|
||||||
|
UnparseableSections []string
|
||||||
|
// KeyValueDelimiters is the sequence of delimiters that are used to separate key and value. By default, it is "=:".
|
||||||
|
KeyValueDelimiters string
|
||||||
|
// KeyValueDelimiterOnWrite is the delimiter that are used to separate key and value output. By default, it is "=".
|
||||||
|
KeyValueDelimiterOnWrite string
|
||||||
|
// ChildSectionDelimiter is the delimiter that is used to separate child sections. By default, it is ".".
|
||||||
|
ChildSectionDelimiter string
|
||||||
|
// PreserveSurroundedQuote indicates whether to preserve surrounded quote (single and double quotes).
|
||||||
|
PreserveSurroundedQuote bool
|
||||||
|
// DebugFunc is called to collect debug information (currently only useful to debug parsing Python-style multiline values).
|
||||||
|
DebugFunc DebugFunc
|
||||||
|
// ReaderBufferSize is the buffer size of the reader in bytes.
|
||||||
|
ReaderBufferSize int
|
||||||
|
// AllowNonUniqueSections indicates whether to allow sections with the same name multiple times.
|
||||||
|
AllowNonUniqueSections bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// DebugFunc is the type of function called to log parse events.
|
||||||
|
type DebugFunc func(message string)
|
||||||
|
|
||||||
|
// LoadSources allows caller to apply customized options for loading from data source(s).
|
||||||
|
func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) {
|
||||||
|
sources := make([]dataSource, len(others)+1)
|
||||||
|
sources[0], err = parseDataSource(source)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for i := range others {
|
||||||
|
sources[i+1], err = parseDataSource(others[i])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
f := newFile(sources, opts)
|
||||||
|
if err = f.Reload(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load loads and parses from INI data sources.
|
||||||
|
// Arguments can be mixed of file name with string type, or raw data in []byte.
|
||||||
|
// It will return error if list contains nonexistent files.
|
||||||
|
func Load(source interface{}, others ...interface{}) (*File, error) {
|
||||||
|
return LoadSources(LoadOptions{}, source, others...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LooseLoad has exactly same functionality as Load function
|
||||||
|
// except it ignores nonexistent files instead of returning error.
|
||||||
|
func LooseLoad(source interface{}, others ...interface{}) (*File, error) {
|
||||||
|
return LoadSources(LoadOptions{Loose: true}, source, others...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsensitiveLoad has exactly same functionality as Load function
|
||||||
|
// except it forces all section and key names to be lowercased.
|
||||||
|
func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) {
|
||||||
|
return LoadSources(LoadOptions{Insensitive: true}, source, others...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ShadowLoad has exactly same functionality as Load function
|
||||||
|
// except it allows have shadow keys.
|
||||||
|
func ShadowLoad(source interface{}, others ...interface{}) (*File, error) {
|
||||||
|
return LoadSources(LoadOptions{AllowShadows: true}, source, others...)
|
||||||
|
}
|
829
vendor/github.com/go-ini/ini/key.go
generated
vendored
Normal file
829
vendor/github.com/go-ini/ini/key.go
generated
vendored
Normal file
@ -0,0 +1,829 @@
|
|||||||
|
// Copyright 2014 Unknwon
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||||
|
// not use this file except in compliance with the License. You may obtain
|
||||||
|
// a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
// License for the specific language governing permissions and limitations
|
||||||
|
// under the License.
|
||||||
|
|
||||||
|
package ini
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Key represents a key under a section.
|
||||||
|
type Key struct {
|
||||||
|
s *Section
|
||||||
|
Comment string
|
||||||
|
name string
|
||||||
|
value string
|
||||||
|
isAutoIncrement bool
|
||||||
|
isBooleanType bool
|
||||||
|
|
||||||
|
isShadow bool
|
||||||
|
shadows []*Key
|
||||||
|
|
||||||
|
nestedValues []string
|
||||||
|
}
|
||||||
|
|
||||||
|
// newKey simply return a key object with given values.
|
||||||
|
func newKey(s *Section, name, val string) *Key {
|
||||||
|
return &Key{
|
||||||
|
s: s,
|
||||||
|
name: name,
|
||||||
|
value: val,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k *Key) addShadow(val string) error {
|
||||||
|
if k.isShadow {
|
||||||
|
return errors.New("cannot add shadow to another shadow key")
|
||||||
|
} else if k.isAutoIncrement || k.isBooleanType {
|
||||||
|
return errors.New("cannot add shadow to auto-increment or boolean key")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deduplicate shadows based on their values.
|
||||||
|
if k.value == val {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
for i := range k.shadows {
|
||||||
|
if k.shadows[i].value == val {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
shadow := newKey(k.s, k.name, val)
|
||||||
|
shadow.isShadow = true
|
||||||
|
k.shadows = append(k.shadows, shadow)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddShadow adds a new shadow key to itself.
|
||||||
|
func (k *Key) AddShadow(val string) error {
|
||||||
|
if !k.s.f.options.AllowShadows {
|
||||||
|
return errors.New("shadow key is not allowed")
|
||||||
|
}
|
||||||
|
return k.addShadow(val)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k *Key) addNestedValue(val string) error {
|
||||||
|
if k.isAutoIncrement || k.isBooleanType {
|
||||||
|
return errors.New("cannot add nested value to auto-increment or boolean key")
|
||||||
|
}
|
||||||
|
|
||||||
|
k.nestedValues = append(k.nestedValues, val)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddNestedValue adds a nested value to the key.
|
||||||
|
func (k *Key) AddNestedValue(val string) error {
|
||||||
|
if !k.s.f.options.AllowNestedValues {
|
||||||
|
return errors.New("nested value is not allowed")
|
||||||
|
}
|
||||||
|
return k.addNestedValue(val)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv
|
||||||
|
type ValueMapper func(string) string
|
||||||
|
|
||||||
|
// Name returns name of key.
|
||||||
|
func (k *Key) Name() string {
|
||||||
|
return k.name
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value returns raw value of key for performance purpose.
|
||||||
|
func (k *Key) Value() string {
|
||||||
|
return k.value
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValueWithShadows returns raw values of key and its shadows if any.
|
||||||
|
func (k *Key) ValueWithShadows() []string {
|
||||||
|
if len(k.shadows) == 0 {
|
||||||
|
return []string{k.value}
|
||||||
|
}
|
||||||
|
vals := make([]string, len(k.shadows)+1)
|
||||||
|
vals[0] = k.value
|
||||||
|
for i := range k.shadows {
|
||||||
|
vals[i+1] = k.shadows[i].value
|
||||||
|
}
|
||||||
|
return vals
|
||||||
|
}
|
||||||
|
|
||||||
|
// NestedValues returns nested values stored in the key.
|
||||||
|
// It is possible returned value is nil if no nested values stored in the key.
|
||||||
|
func (k *Key) NestedValues() []string {
|
||||||
|
return k.nestedValues
|
||||||
|
}
|
||||||
|
|
||||||
|
// transformValue takes a raw value and transforms to its final string.
|
||||||
|
func (k *Key) transformValue(val string) string {
|
||||||
|
if k.s.f.ValueMapper != nil {
|
||||||
|
val = k.s.f.ValueMapper(val)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fail-fast if no indicate char found for recursive value
|
||||||
|
if !strings.Contains(val, "%") {
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
for i := 0; i < depthValues; i++ {
|
||||||
|
vr := varPattern.FindString(val)
|
||||||
|
if len(vr) == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Take off leading '%(' and trailing ')s'.
|
||||||
|
noption := vr[2 : len(vr)-2]
|
||||||
|
|
||||||
|
// Search in the same section.
|
||||||
|
// If not found or found the key itself, then search again in default section.
|
||||||
|
nk, err := k.s.GetKey(noption)
|
||||||
|
if err != nil || k == nk {
|
||||||
|
nk, _ = k.s.f.Section("").GetKey(noption)
|
||||||
|
if nk == nil {
|
||||||
|
// Stop when no results found in the default section,
|
||||||
|
// and returns the value as-is.
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Substitute by new value and take off leading '%(' and trailing ')s'.
|
||||||
|
val = strings.Replace(val, vr, nk.value, -1)
|
||||||
|
}
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns string representation of value.
|
||||||
|
func (k *Key) String() string {
|
||||||
|
return k.transformValue(k.value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate accepts a validate function which can
|
||||||
|
// return modifed result as key value.
|
||||||
|
func (k *Key) Validate(fn func(string) string) string {
|
||||||
|
return fn(k.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseBool returns the boolean value represented by the string.
|
||||||
|
//
|
||||||
|
// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On,
|
||||||
|
// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off.
|
||||||
|
// Any other value returns an error.
|
||||||
|
func parseBool(str string) (value bool, err error) {
|
||||||
|
switch str {
|
||||||
|
case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On":
|
||||||
|
return true, nil
|
||||||
|
case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off":
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
return false, fmt.Errorf("parsing \"%s\": invalid syntax", str)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bool returns bool type value.
|
||||||
|
func (k *Key) Bool() (bool, error) {
|
||||||
|
return parseBool(k.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64 returns float64 type value.
|
||||||
|
func (k *Key) Float64() (float64, error) {
|
||||||
|
return strconv.ParseFloat(k.String(), 64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int returns int type value.
|
||||||
|
func (k *Key) Int() (int, error) {
|
||||||
|
v, err := strconv.ParseInt(k.String(), 0, 64)
|
||||||
|
return int(v), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int64 returns int64 type value.
|
||||||
|
func (k *Key) Int64() (int64, error) {
|
||||||
|
return strconv.ParseInt(k.String(), 0, 64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint returns uint type valued.
|
||||||
|
func (k *Key) Uint() (uint, error) {
|
||||||
|
u, e := strconv.ParseUint(k.String(), 0, 64)
|
||||||
|
return uint(u), e
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint64 returns uint64 type value.
|
||||||
|
func (k *Key) Uint64() (uint64, error) {
|
||||||
|
return strconv.ParseUint(k.String(), 0, 64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Duration returns time.Duration type value.
|
||||||
|
func (k *Key) Duration() (time.Duration, error) {
|
||||||
|
return time.ParseDuration(k.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimeFormat parses with given format and returns time.Time type value.
|
||||||
|
func (k *Key) TimeFormat(format string) (time.Time, error) {
|
||||||
|
return time.Parse(format, k.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Time parses with RFC3339 format and returns time.Time type value.
|
||||||
|
func (k *Key) Time() (time.Time, error) {
|
||||||
|
return k.TimeFormat(time.RFC3339)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustString returns default value if key value is empty.
|
||||||
|
func (k *Key) MustString(defaultVal string) string {
|
||||||
|
val := k.String()
|
||||||
|
if len(val) == 0 {
|
||||||
|
k.value = defaultVal
|
||||||
|
return defaultVal
|
||||||
|
}
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustBool always returns value without error,
|
||||||
|
// it returns false if error occurs.
|
||||||
|
func (k *Key) MustBool(defaultVal ...bool) bool {
|
||||||
|
val, err := k.Bool()
|
||||||
|
if len(defaultVal) > 0 && err != nil {
|
||||||
|
k.value = strconv.FormatBool(defaultVal[0])
|
||||||
|
return defaultVal[0]
|
||||||
|
}
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustFloat64 always returns value without error,
|
||||||
|
// it returns 0.0 if error occurs.
|
||||||
|
func (k *Key) MustFloat64(defaultVal ...float64) float64 {
|
||||||
|
val, err := k.Float64()
|
||||||
|
if len(defaultVal) > 0 && err != nil {
|
||||||
|
k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64)
|
||||||
|
return defaultVal[0]
|
||||||
|
}
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustInt always returns value without error,
|
||||||
|
// it returns 0 if error occurs.
|
||||||
|
func (k *Key) MustInt(defaultVal ...int) int {
|
||||||
|
val, err := k.Int()
|
||||||
|
if len(defaultVal) > 0 && err != nil {
|
||||||
|
k.value = strconv.FormatInt(int64(defaultVal[0]), 10)
|
||||||
|
return defaultVal[0]
|
||||||
|
}
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustInt64 always returns value without error,
|
||||||
|
// it returns 0 if error occurs.
|
||||||
|
func (k *Key) MustInt64(defaultVal ...int64) int64 {
|
||||||
|
val, err := k.Int64()
|
||||||
|
if len(defaultVal) > 0 && err != nil {
|
||||||
|
k.value = strconv.FormatInt(defaultVal[0], 10)
|
||||||
|
return defaultVal[0]
|
||||||
|
}
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustUint always returns value without error,
|
||||||
|
// it returns 0 if error occurs.
|
||||||
|
func (k *Key) MustUint(defaultVal ...uint) uint {
|
||||||
|
val, err := k.Uint()
|
||||||
|
if len(defaultVal) > 0 && err != nil {
|
||||||
|
k.value = strconv.FormatUint(uint64(defaultVal[0]), 10)
|
||||||
|
return defaultVal[0]
|
||||||
|
}
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustUint64 always returns value without error,
|
||||||
|
// it returns 0 if error occurs.
|
||||||
|
func (k *Key) MustUint64(defaultVal ...uint64) uint64 {
|
||||||
|
val, err := k.Uint64()
|
||||||
|
if len(defaultVal) > 0 && err != nil {
|
||||||
|
k.value = strconv.FormatUint(defaultVal[0], 10)
|
||||||
|
return defaultVal[0]
|
||||||
|
}
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustDuration always returns value without error,
|
||||||
|
// it returns zero value if error occurs.
|
||||||
|
func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration {
|
||||||
|
val, err := k.Duration()
|
||||||
|
if len(defaultVal) > 0 && err != nil {
|
||||||
|
k.value = defaultVal[0].String()
|
||||||
|
return defaultVal[0]
|
||||||
|
}
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustTimeFormat always parses with given format and returns value without error,
|
||||||
|
// it returns zero value if error occurs.
|
||||||
|
func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time {
|
||||||
|
val, err := k.TimeFormat(format)
|
||||||
|
if len(defaultVal) > 0 && err != nil {
|
||||||
|
k.value = defaultVal[0].Format(format)
|
||||||
|
return defaultVal[0]
|
||||||
|
}
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustTime always parses with RFC3339 format and returns value without error,
|
||||||
|
// it returns zero value if error occurs.
|
||||||
|
func (k *Key) MustTime(defaultVal ...time.Time) time.Time {
|
||||||
|
return k.MustTimeFormat(time.RFC3339, defaultVal...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// In always returns value without error,
|
||||||
|
// it returns default value if error occurs or doesn't fit into candidates.
|
||||||
|
func (k *Key) In(defaultVal string, candidates []string) string {
|
||||||
|
val := k.String()
|
||||||
|
for _, cand := range candidates {
|
||||||
|
if val == cand {
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return defaultVal
|
||||||
|
}
|
||||||
|
|
||||||
|
// InFloat64 always returns value without error,
|
||||||
|
// it returns default value if error occurs or doesn't fit into candidates.
|
||||||
|
func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 {
|
||||||
|
val := k.MustFloat64()
|
||||||
|
for _, cand := range candidates {
|
||||||
|
if val == cand {
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return defaultVal
|
||||||
|
}
|
||||||
|
|
||||||
|
// InInt always returns value without error,
|
||||||
|
// it returns default value if error occurs or doesn't fit into candidates.
|
||||||
|
func (k *Key) InInt(defaultVal int, candidates []int) int {
|
||||||
|
val := k.MustInt()
|
||||||
|
for _, cand := range candidates {
|
||||||
|
if val == cand {
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return defaultVal
|
||||||
|
}
|
||||||
|
|
||||||
|
// InInt64 always returns value without error,
|
||||||
|
// it returns default value if error occurs or doesn't fit into candidates.
|
||||||
|
func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 {
|
||||||
|
val := k.MustInt64()
|
||||||
|
for _, cand := range candidates {
|
||||||
|
if val == cand {
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return defaultVal
|
||||||
|
}
|
||||||
|
|
||||||
|
// InUint always returns value without error,
|
||||||
|
// it returns default value if error occurs or doesn't fit into candidates.
|
||||||
|
func (k *Key) InUint(defaultVal uint, candidates []uint) uint {
|
||||||
|
val := k.MustUint()
|
||||||
|
for _, cand := range candidates {
|
||||||
|
if val == cand {
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return defaultVal
|
||||||
|
}
|
||||||
|
|
||||||
|
// InUint64 always returns value without error,
|
||||||
|
// it returns default value if error occurs or doesn't fit into candidates.
|
||||||
|
func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 {
|
||||||
|
val := k.MustUint64()
|
||||||
|
for _, cand := range candidates {
|
||||||
|
if val == cand {
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return defaultVal
|
||||||
|
}
|
||||||
|
|
||||||
|
// InTimeFormat always parses with given format and returns value without error,
|
||||||
|
// it returns default value if error occurs or doesn't fit into candidates.
|
||||||
|
func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time {
|
||||||
|
val := k.MustTimeFormat(format)
|
||||||
|
for _, cand := range candidates {
|
||||||
|
if val == cand {
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return defaultVal
|
||||||
|
}
|
||||||
|
|
||||||
|
// InTime always parses with RFC3339 format and returns value without error,
|
||||||
|
// it returns default value if error occurs or doesn't fit into candidates.
|
||||||
|
func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time {
|
||||||
|
return k.InTimeFormat(time.RFC3339, defaultVal, candidates)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RangeFloat64 checks if value is in given range inclusively,
|
||||||
|
// and returns default value if it's not.
|
||||||
|
func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 {
|
||||||
|
val := k.MustFloat64()
|
||||||
|
if val < min || val > max {
|
||||||
|
return defaultVal
|
||||||
|
}
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
|
||||||
|
// RangeInt checks if value is in given range inclusively,
|
||||||
|
// and returns default value if it's not.
|
||||||
|
func (k *Key) RangeInt(defaultVal, min, max int) int {
|
||||||
|
val := k.MustInt()
|
||||||
|
if val < min || val > max {
|
||||||
|
return defaultVal
|
||||||
|
}
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
|
||||||
|
// RangeInt64 checks if value is in given range inclusively,
|
||||||
|
// and returns default value if it's not.
|
||||||
|
func (k *Key) RangeInt64(defaultVal, min, max int64) int64 {
|
||||||
|
val := k.MustInt64()
|
||||||
|
if val < min || val > max {
|
||||||
|
return defaultVal
|
||||||
|
}
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
|
||||||
|
// RangeTimeFormat checks if value with given format is in given range inclusively,
|
||||||
|
// and returns default value if it's not.
|
||||||
|
func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time {
|
||||||
|
val := k.MustTimeFormat(format)
|
||||||
|
if val.Unix() < min.Unix() || val.Unix() > max.Unix() {
|
||||||
|
return defaultVal
|
||||||
|
}
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
|
||||||
|
// RangeTime checks if value with RFC3339 format is in given range inclusively,
|
||||||
|
// and returns default value if it's not.
|
||||||
|
func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time {
|
||||||
|
return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Strings returns list of string divided by given delimiter.
|
||||||
|
func (k *Key) Strings(delim string) []string {
|
||||||
|
str := k.String()
|
||||||
|
if len(str) == 0 {
|
||||||
|
return []string{}
|
||||||
|
}
|
||||||
|
|
||||||
|
runes := []rune(str)
|
||||||
|
vals := make([]string, 0, 2)
|
||||||
|
var buf bytes.Buffer
|
||||||
|
escape := false
|
||||||
|
idx := 0
|
||||||
|
for {
|
||||||
|
if escape {
|
||||||
|
escape = false
|
||||||
|
if runes[idx] != '\\' && !strings.HasPrefix(string(runes[idx:]), delim) {
|
||||||
|
buf.WriteRune('\\')
|
||||||
|
}
|
||||||
|
buf.WriteRune(runes[idx])
|
||||||
|
} else {
|
||||||
|
if runes[idx] == '\\' {
|
||||||
|
escape = true
|
||||||
|
} else if strings.HasPrefix(string(runes[idx:]), delim) {
|
||||||
|
idx += len(delim) - 1
|
||||||
|
vals = append(vals, strings.TrimSpace(buf.String()))
|
||||||
|
buf.Reset()
|
||||||
|
} else {
|
||||||
|
buf.WriteRune(runes[idx])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
idx++
|
||||||
|
if idx == len(runes) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if buf.Len() > 0 {
|
||||||
|
vals = append(vals, strings.TrimSpace(buf.String()))
|
||||||
|
}
|
||||||
|
|
||||||
|
return vals
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringsWithShadows returns list of string divided by given delimiter.
|
||||||
|
// Shadows will also be appended if any.
|
||||||
|
func (k *Key) StringsWithShadows(delim string) []string {
|
||||||
|
vals := k.ValueWithShadows()
|
||||||
|
results := make([]string, 0, len(vals)*2)
|
||||||
|
for i := range vals {
|
||||||
|
if len(vals) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
results = append(results, strings.Split(vals[i], delim)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range results {
|
||||||
|
results[i] = k.transformValue(strings.TrimSpace(results[i]))
|
||||||
|
}
|
||||||
|
return results
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value.
|
||||||
|
func (k *Key) Float64s(delim string) []float64 {
|
||||||
|
vals, _ := k.parseFloat64s(k.Strings(delim), true, false)
|
||||||
|
return vals
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value.
|
||||||
|
func (k *Key) Ints(delim string) []int {
|
||||||
|
vals, _ := k.parseInts(k.Strings(delim), true, false)
|
||||||
|
return vals
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value.
|
||||||
|
func (k *Key) Int64s(delim string) []int64 {
|
||||||
|
vals, _ := k.parseInt64s(k.Strings(delim), true, false)
|
||||||
|
return vals
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value.
|
||||||
|
func (k *Key) Uints(delim string) []uint {
|
||||||
|
vals, _ := k.parseUints(k.Strings(delim), true, false)
|
||||||
|
return vals
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value.
|
||||||
|
func (k *Key) Uint64s(delim string) []uint64 {
|
||||||
|
vals, _ := k.parseUint64s(k.Strings(delim), true, false)
|
||||||
|
return vals
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bools returns list of bool divided by given delimiter. Any invalid input will be treated as zero value.
|
||||||
|
func (k *Key) Bools(delim string) []bool {
|
||||||
|
vals, _ := k.parseBools(k.Strings(delim), true, false)
|
||||||
|
return vals
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimesFormat parses with given format and returns list of time.Time divided by given delimiter.
|
||||||
|
// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
|
||||||
|
func (k *Key) TimesFormat(format, delim string) []time.Time {
|
||||||
|
vals, _ := k.parseTimesFormat(format, k.Strings(delim), true, false)
|
||||||
|
return vals
|
||||||
|
}
|
||||||
|
|
||||||
|
// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter.
|
||||||
|
// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
|
||||||
|
func (k *Key) Times(delim string) []time.Time {
|
||||||
|
return k.TimesFormat(time.RFC3339, delim)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then
|
||||||
|
// it will not be included to result list.
|
||||||
|
func (k *Key) ValidFloat64s(delim string) []float64 {
|
||||||
|
vals, _ := k.parseFloat64s(k.Strings(delim), false, false)
|
||||||
|
return vals
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will
|
||||||
|
// not be included to result list.
|
||||||
|
func (k *Key) ValidInts(delim string) []int {
|
||||||
|
vals, _ := k.parseInts(k.Strings(delim), false, false)
|
||||||
|
return vals
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer,
|
||||||
|
// then it will not be included to result list.
|
||||||
|
func (k *Key) ValidInt64s(delim string) []int64 {
|
||||||
|
vals, _ := k.parseInt64s(k.Strings(delim), false, false)
|
||||||
|
return vals
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer,
|
||||||
|
// then it will not be included to result list.
|
||||||
|
func (k *Key) ValidUints(delim string) []uint {
|
||||||
|
vals, _ := k.parseUints(k.Strings(delim), false, false)
|
||||||
|
return vals
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned
|
||||||
|
// integer, then it will not be included to result list.
|
||||||
|
func (k *Key) ValidUint64s(delim string) []uint64 {
|
||||||
|
vals, _ := k.parseUint64s(k.Strings(delim), false, false)
|
||||||
|
return vals
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidBools returns list of bool divided by given delimiter. If some value is not 64-bit unsigned
|
||||||
|
// integer, then it will not be included to result list.
|
||||||
|
func (k *Key) ValidBools(delim string) []bool {
|
||||||
|
vals, _ := k.parseBools(k.Strings(delim), false, false)
|
||||||
|
return vals
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter.
|
||||||
|
func (k *Key) ValidTimesFormat(format, delim string) []time.Time {
|
||||||
|
vals, _ := k.parseTimesFormat(format, k.Strings(delim), false, false)
|
||||||
|
return vals
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter.
|
||||||
|
func (k *Key) ValidTimes(delim string) []time.Time {
|
||||||
|
return k.ValidTimesFormat(time.RFC3339, delim)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input.
|
||||||
|
func (k *Key) StrictFloat64s(delim string) ([]float64, error) {
|
||||||
|
return k.parseFloat64s(k.Strings(delim), false, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StrictInts returns list of int divided by given delimiter or error on first invalid input.
|
||||||
|
func (k *Key) StrictInts(delim string) ([]int, error) {
|
||||||
|
return k.parseInts(k.Strings(delim), false, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input.
|
||||||
|
func (k *Key) StrictInt64s(delim string) ([]int64, error) {
|
||||||
|
return k.parseInt64s(k.Strings(delim), false, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StrictUints returns list of uint divided by given delimiter or error on first invalid input.
|
||||||
|
func (k *Key) StrictUints(delim string) ([]uint, error) {
|
||||||
|
return k.parseUints(k.Strings(delim), false, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input.
|
||||||
|
func (k *Key) StrictUint64s(delim string) ([]uint64, error) {
|
||||||
|
return k.parseUint64s(k.Strings(delim), false, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StrictBools returns list of bool divided by given delimiter or error on first invalid input.
|
||||||
|
func (k *Key) StrictBools(delim string) ([]bool, error) {
|
||||||
|
return k.parseBools(k.Strings(delim), false, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter
|
||||||
|
// or error on first invalid input.
|
||||||
|
func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) {
|
||||||
|
return k.parseTimesFormat(format, k.Strings(delim), false, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter
|
||||||
|
// or error on first invalid input.
|
||||||
|
func (k *Key) StrictTimes(delim string) ([]time.Time, error) {
|
||||||
|
return k.StrictTimesFormat(time.RFC3339, delim)
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseBools transforms strings to bools.
|
||||||
|
func (k *Key) parseBools(strs []string, addInvalid, returnOnInvalid bool) ([]bool, error) {
|
||||||
|
vals := make([]bool, 0, len(strs))
|
||||||
|
parser := func(str string) (interface{}, error) {
|
||||||
|
val, err := parseBool(str)
|
||||||
|
return val, err
|
||||||
|
}
|
||||||
|
rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
|
||||||
|
if err == nil {
|
||||||
|
for _, val := range rawVals {
|
||||||
|
vals = append(vals, val.(bool))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return vals, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseFloat64s transforms strings to float64s.
|
||||||
|
func (k *Key) parseFloat64s(strs []string, addInvalid, returnOnInvalid bool) ([]float64, error) {
|
||||||
|
vals := make([]float64, 0, len(strs))
|
||||||
|
parser := func(str string) (interface{}, error) {
|
||||||
|
val, err := strconv.ParseFloat(str, 64)
|
||||||
|
return val, err
|
||||||
|
}
|
||||||
|
rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
|
||||||
|
if err == nil {
|
||||||
|
for _, val := range rawVals {
|
||||||
|
vals = append(vals, val.(float64))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return vals, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseInts transforms strings to ints.
|
||||||
|
func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) {
|
||||||
|
vals := make([]int, 0, len(strs))
|
||||||
|
parser := func(str string) (interface{}, error) {
|
||||||
|
val, err := strconv.ParseInt(str, 0, 64)
|
||||||
|
return val, err
|
||||||
|
}
|
||||||
|
rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
|
||||||
|
if err == nil {
|
||||||
|
for _, val := range rawVals {
|
||||||
|
vals = append(vals, int(val.(int64)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return vals, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseInt64s transforms strings to int64s.
|
||||||
|
func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) {
|
||||||
|
vals := make([]int64, 0, len(strs))
|
||||||
|
parser := func(str string) (interface{}, error) {
|
||||||
|
val, err := strconv.ParseInt(str, 0, 64)
|
||||||
|
return val, err
|
||||||
|
}
|
||||||
|
|
||||||
|
rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
|
||||||
|
if err == nil {
|
||||||
|
for _, val := range rawVals {
|
||||||
|
vals = append(vals, val.(int64))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return vals, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseUints transforms strings to uints.
|
||||||
|
func (k *Key) parseUints(strs []string, addInvalid, returnOnInvalid bool) ([]uint, error) {
|
||||||
|
vals := make([]uint, 0, len(strs))
|
||||||
|
parser := func(str string) (interface{}, error) {
|
||||||
|
val, err := strconv.ParseUint(str, 0, 64)
|
||||||
|
return val, err
|
||||||
|
}
|
||||||
|
|
||||||
|
rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
|
||||||
|
if err == nil {
|
||||||
|
for _, val := range rawVals {
|
||||||
|
vals = append(vals, uint(val.(uint64)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return vals, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseUint64s transforms strings to uint64s.
|
||||||
|
func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]uint64, error) {
|
||||||
|
vals := make([]uint64, 0, len(strs))
|
||||||
|
parser := func(str string) (interface{}, error) {
|
||||||
|
val, err := strconv.ParseUint(str, 0, 64)
|
||||||
|
return val, err
|
||||||
|
}
|
||||||
|
rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
|
||||||
|
if err == nil {
|
||||||
|
for _, val := range rawVals {
|
||||||
|
vals = append(vals, val.(uint64))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return vals, err
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
type Parser func(str string) (interface{}, error)
|
||||||
|
|
||||||
|
|
||||||
|
// parseTimesFormat transforms strings to times in given format.
|
||||||
|
func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) {
|
||||||
|
vals := make([]time.Time, 0, len(strs))
|
||||||
|
parser := func(str string) (interface{}, error) {
|
||||||
|
val, err := time.Parse(format, str)
|
||||||
|
return val, err
|
||||||
|
}
|
||||||
|
rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
|
||||||
|
if err == nil {
|
||||||
|
for _, val := range rawVals {
|
||||||
|
vals = append(vals, val.(time.Time))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return vals, err
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// doParse transforms strings to different types
|
||||||
|
func (k *Key) doParse(strs []string, addInvalid, returnOnInvalid bool, parser Parser) ([]interface{}, error) {
|
||||||
|
vals := make([]interface{}, 0, len(strs))
|
||||||
|
for _, str := range strs {
|
||||||
|
val, err := parser(str)
|
||||||
|
if err != nil && returnOnInvalid {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err == nil || addInvalid {
|
||||||
|
vals = append(vals, val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return vals, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetValue changes key value.
|
||||||
|
func (k *Key) SetValue(v string) {
|
||||||
|
if k.s.f.BlockMode {
|
||||||
|
k.s.f.lock.Lock()
|
||||||
|
defer k.s.f.lock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
k.value = v
|
||||||
|
k.s.keysHash[k.name] = v
|
||||||
|
}
|
535
vendor/github.com/go-ini/ini/parser.go
generated
vendored
Normal file
535
vendor/github.com/go-ini/ini/parser.go
generated
vendored
Normal file
@ -0,0 +1,535 @@
|
|||||||
|
// Copyright 2015 Unknwon
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||||
|
// not use this file except in compliance with the License. You may obtain
|
||||||
|
// a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
// License for the specific language governing permissions and limitations
|
||||||
|
// under the License.
|
||||||
|
|
||||||
|
package ini
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"unicode"
|
||||||
|
)
|
||||||
|
|
||||||
|
const minReaderBufferSize = 4096
|
||||||
|
|
||||||
|
var pythonMultiline = regexp.MustCompile(`^([\t\f ]+)(.*)`)
|
||||||
|
|
||||||
|
type parserOptions struct {
|
||||||
|
IgnoreContinuation bool
|
||||||
|
IgnoreInlineComment bool
|
||||||
|
AllowPythonMultilineValues bool
|
||||||
|
SpaceBeforeInlineComment bool
|
||||||
|
UnescapeValueDoubleQuotes bool
|
||||||
|
UnescapeValueCommentSymbols bool
|
||||||
|
PreserveSurroundedQuote bool
|
||||||
|
DebugFunc DebugFunc
|
||||||
|
ReaderBufferSize int
|
||||||
|
}
|
||||||
|
|
||||||
|
type parser struct {
|
||||||
|
buf *bufio.Reader
|
||||||
|
options parserOptions
|
||||||
|
|
||||||
|
isEOF bool
|
||||||
|
count int
|
||||||
|
comment *bytes.Buffer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *parser) debug(format string, args ...interface{}) {
|
||||||
|
if p.options.DebugFunc != nil {
|
||||||
|
p.options.DebugFunc(fmt.Sprintf(format, args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newParser(r io.Reader, opts parserOptions) *parser {
|
||||||
|
size := opts.ReaderBufferSize
|
||||||
|
if size < minReaderBufferSize {
|
||||||
|
size = minReaderBufferSize
|
||||||
|
}
|
||||||
|
|
||||||
|
return &parser{
|
||||||
|
buf: bufio.NewReaderSize(r, size),
|
||||||
|
options: opts,
|
||||||
|
count: 1,
|
||||||
|
comment: &bytes.Buffer{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format.
|
||||||
|
// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding
|
||||||
|
func (p *parser) BOM() error {
|
||||||
|
mask, err := p.buf.Peek(2)
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
|
return err
|
||||||
|
} else if len(mask) < 2 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case mask[0] == 254 && mask[1] == 255:
|
||||||
|
fallthrough
|
||||||
|
case mask[0] == 255 && mask[1] == 254:
|
||||||
|
_, err = p.buf.Read(mask)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
case mask[0] == 239 && mask[1] == 187:
|
||||||
|
mask, err := p.buf.Peek(3)
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
|
return err
|
||||||
|
} else if len(mask) < 3 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if mask[2] == 191 {
|
||||||
|
_, err = p.buf.Read(mask)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *parser) readUntil(delim byte) ([]byte, error) {
|
||||||
|
data, err := p.buf.ReadBytes(delim)
|
||||||
|
if err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
p.isEOF = true
|
||||||
|
} else {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func cleanComment(in []byte) ([]byte, bool) {
|
||||||
|
i := bytes.IndexAny(in, "#;")
|
||||||
|
if i == -1 {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
return in[i:], true
|
||||||
|
}
|
||||||
|
|
||||||
|
func readKeyName(delimiters string, in []byte) (string, int, error) {
|
||||||
|
line := string(in)
|
||||||
|
|
||||||
|
// Check if key name surrounded by quotes.
|
||||||
|
var keyQuote string
|
||||||
|
if line[0] == '"' {
|
||||||
|
if len(line) > 6 && string(line[0:3]) == `"""` {
|
||||||
|
keyQuote = `"""`
|
||||||
|
} else {
|
||||||
|
keyQuote = `"`
|
||||||
|
}
|
||||||
|
} else if line[0] == '`' {
|
||||||
|
keyQuote = "`"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get out key name
|
||||||
|
var endIdx int
|
||||||
|
if len(keyQuote) > 0 {
|
||||||
|
startIdx := len(keyQuote)
|
||||||
|
// FIXME: fail case -> """"""name"""=value
|
||||||
|
pos := strings.Index(line[startIdx:], keyQuote)
|
||||||
|
if pos == -1 {
|
||||||
|
return "", -1, fmt.Errorf("missing closing key quote: %s", line)
|
||||||
|
}
|
||||||
|
pos += startIdx
|
||||||
|
|
||||||
|
// Find key-value delimiter
|
||||||
|
i := strings.IndexAny(line[pos+startIdx:], delimiters)
|
||||||
|
if i < 0 {
|
||||||
|
return "", -1, ErrDelimiterNotFound{line}
|
||||||
|
}
|
||||||
|
endIdx = pos + i
|
||||||
|
return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
endIdx = strings.IndexAny(line, delimiters)
|
||||||
|
if endIdx < 0 {
|
||||||
|
return "", -1, ErrDelimiterNotFound{line}
|
||||||
|
}
|
||||||
|
return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *parser) readMultilines(line, val, valQuote string) (string, error) {
|
||||||
|
for {
|
||||||
|
data, err := p.readUntil('\n')
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
next := string(data)
|
||||||
|
|
||||||
|
pos := strings.LastIndex(next, valQuote)
|
||||||
|
if pos > -1 {
|
||||||
|
val += next[:pos]
|
||||||
|
|
||||||
|
comment, has := cleanComment([]byte(next[pos:]))
|
||||||
|
if has {
|
||||||
|
p.comment.Write(bytes.TrimSpace(comment))
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
val += next
|
||||||
|
if p.isEOF {
|
||||||
|
return "", fmt.Errorf("missing closing key quote from %q to %q", line, next)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return val, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *parser) readContinuationLines(val string) (string, error) {
|
||||||
|
for {
|
||||||
|
data, err := p.readUntil('\n')
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
next := strings.TrimSpace(string(data))
|
||||||
|
|
||||||
|
if len(next) == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
val += next
|
||||||
|
if val[len(val)-1] != '\\' {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
val = val[:len(val)-1]
|
||||||
|
}
|
||||||
|
return val, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// hasSurroundedQuote check if and only if the first and last characters
|
||||||
|
// are quotes \" or \'.
|
||||||
|
// It returns false if any other parts also contain same kind of quotes.
|
||||||
|
func hasSurroundedQuote(in string, quote byte) bool {
|
||||||
|
return len(in) >= 2 && in[0] == quote && in[len(in)-1] == quote &&
|
||||||
|
strings.IndexByte(in[1:], quote) == len(in)-2
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *parser) readValue(in []byte, bufferSize int) (string, error) {
|
||||||
|
|
||||||
|
line := strings.TrimLeftFunc(string(in), unicode.IsSpace)
|
||||||
|
if len(line) == 0 {
|
||||||
|
if p.options.AllowPythonMultilineValues && len(in) > 0 && in[len(in)-1] == '\n' {
|
||||||
|
return p.readPythonMultilines(line, bufferSize)
|
||||||
|
}
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var valQuote string
|
||||||
|
if len(line) > 3 && string(line[0:3]) == `"""` {
|
||||||
|
valQuote = `"""`
|
||||||
|
} else if line[0] == '`' {
|
||||||
|
valQuote = "`"
|
||||||
|
} else if p.options.UnescapeValueDoubleQuotes && line[0] == '"' {
|
||||||
|
valQuote = `"`
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(valQuote) > 0 {
|
||||||
|
startIdx := len(valQuote)
|
||||||
|
pos := strings.LastIndex(line[startIdx:], valQuote)
|
||||||
|
// Check for multi-line value
|
||||||
|
if pos == -1 {
|
||||||
|
return p.readMultilines(line, line[startIdx:], valQuote)
|
||||||
|
}
|
||||||
|
|
||||||
|
if p.options.UnescapeValueDoubleQuotes && valQuote == `"` {
|
||||||
|
return strings.Replace(line[startIdx:pos+startIdx], `\"`, `"`, -1), nil
|
||||||
|
}
|
||||||
|
return line[startIdx : pos+startIdx], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
lastChar := line[len(line)-1]
|
||||||
|
// Won't be able to reach here if value only contains whitespace
|
||||||
|
line = strings.TrimSpace(line)
|
||||||
|
trimmedLastChar := line[len(line)-1]
|
||||||
|
|
||||||
|
// Check continuation lines when desired
|
||||||
|
if !p.options.IgnoreContinuation && trimmedLastChar == '\\' {
|
||||||
|
return p.readContinuationLines(line[:len(line)-1])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if ignore inline comment
|
||||||
|
if !p.options.IgnoreInlineComment {
|
||||||
|
var i int
|
||||||
|
if p.options.SpaceBeforeInlineComment {
|
||||||
|
i = strings.Index(line, " #")
|
||||||
|
if i == -1 {
|
||||||
|
i = strings.Index(line, " ;")
|
||||||
|
}
|
||||||
|
|
||||||
|
} else {
|
||||||
|
i = strings.IndexAny(line, "#;")
|
||||||
|
}
|
||||||
|
|
||||||
|
if i > -1 {
|
||||||
|
p.comment.WriteString(line[i:])
|
||||||
|
line = strings.TrimSpace(line[:i])
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Trim single and double quotes
|
||||||
|
if (hasSurroundedQuote(line, '\'') ||
|
||||||
|
hasSurroundedQuote(line, '"')) && !p.options.PreserveSurroundedQuote {
|
||||||
|
line = line[1 : len(line)-1]
|
||||||
|
} else if len(valQuote) == 0 && p.options.UnescapeValueCommentSymbols {
|
||||||
|
if strings.Contains(line, `\;`) {
|
||||||
|
line = strings.Replace(line, `\;`, ";", -1)
|
||||||
|
}
|
||||||
|
if strings.Contains(line, `\#`) {
|
||||||
|
line = strings.Replace(line, `\#`, "#", -1)
|
||||||
|
}
|
||||||
|
} else if p.options.AllowPythonMultilineValues && lastChar == '\n' {
|
||||||
|
return p.readPythonMultilines(line, bufferSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
return line, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *parser) readPythonMultilines(line string, bufferSize int) (string, error) {
|
||||||
|
parserBufferPeekResult, _ := p.buf.Peek(bufferSize)
|
||||||
|
peekBuffer := bytes.NewBuffer(parserBufferPeekResult)
|
||||||
|
|
||||||
|
indentSize := 0
|
||||||
|
for {
|
||||||
|
peekData, peekErr := peekBuffer.ReadBytes('\n')
|
||||||
|
if peekErr != nil {
|
||||||
|
if peekErr == io.EOF {
|
||||||
|
p.debug("readPythonMultilines: io.EOF, peekData: %q, line: %q", string(peekData), line)
|
||||||
|
return line, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
p.debug("readPythonMultilines: failed to peek with error: %v", peekErr)
|
||||||
|
return "", peekErr
|
||||||
|
}
|
||||||
|
|
||||||
|
p.debug("readPythonMultilines: parsing %q", string(peekData))
|
||||||
|
|
||||||
|
peekMatches := pythonMultiline.FindStringSubmatch(string(peekData))
|
||||||
|
p.debug("readPythonMultilines: matched %d parts", len(peekMatches))
|
||||||
|
for n, v := range peekMatches {
|
||||||
|
p.debug(" %d: %q", n, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return if not a Python multiline value.
|
||||||
|
if len(peekMatches) != 3 {
|
||||||
|
p.debug("readPythonMultilines: end of value, got: %q", line)
|
||||||
|
return line, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine indent size and line prefix.
|
||||||
|
currentIndentSize := len(peekMatches[1])
|
||||||
|
if indentSize < 1 {
|
||||||
|
indentSize = currentIndentSize
|
||||||
|
p.debug("readPythonMultilines: indent size is %d", indentSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure each line is indented at least as far as first line.
|
||||||
|
if currentIndentSize < indentSize {
|
||||||
|
p.debug("readPythonMultilines: end of value, current indent: %d, expected indent: %d, line: %q", currentIndentSize, indentSize, line)
|
||||||
|
return line, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Advance the parser reader (buffer) in-sync with the peek buffer.
|
||||||
|
_, err := p.buf.Discard(len(peekData))
|
||||||
|
if err != nil {
|
||||||
|
p.debug("readPythonMultilines: failed to skip to the end, returning error")
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle indented empty line.
|
||||||
|
line += "\n" + peekMatches[1][indentSize:] + peekMatches[2]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// parse parses data through an io.Reader.
|
||||||
|
func (f *File) parse(reader io.Reader) (err error) {
|
||||||
|
p := newParser(reader, parserOptions{
|
||||||
|
IgnoreContinuation: f.options.IgnoreContinuation,
|
||||||
|
IgnoreInlineComment: f.options.IgnoreInlineComment,
|
||||||
|
AllowPythonMultilineValues: f.options.AllowPythonMultilineValues,
|
||||||
|
SpaceBeforeInlineComment: f.options.SpaceBeforeInlineComment,
|
||||||
|
UnescapeValueDoubleQuotes: f.options.UnescapeValueDoubleQuotes,
|
||||||
|
UnescapeValueCommentSymbols: f.options.UnescapeValueCommentSymbols,
|
||||||
|
PreserveSurroundedQuote: f.options.PreserveSurroundedQuote,
|
||||||
|
DebugFunc: f.options.DebugFunc,
|
||||||
|
ReaderBufferSize: f.options.ReaderBufferSize,
|
||||||
|
})
|
||||||
|
if err = p.BOM(); err != nil {
|
||||||
|
return fmt.Errorf("BOM: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ignore error because default section name is never empty string.
|
||||||
|
name := DefaultSection
|
||||||
|
if f.options.Insensitive || f.options.InsensitiveSections {
|
||||||
|
name = strings.ToLower(DefaultSection)
|
||||||
|
}
|
||||||
|
section, _ := f.NewSection(name)
|
||||||
|
|
||||||
|
// This "last" is not strictly equivalent to "previous one" if current key is not the first nested key
|
||||||
|
var isLastValueEmpty bool
|
||||||
|
var lastRegularKey *Key
|
||||||
|
|
||||||
|
var line []byte
|
||||||
|
var inUnparseableSection bool
|
||||||
|
|
||||||
|
// NOTE: Iterate and increase `currentPeekSize` until
|
||||||
|
// the size of the parser buffer is found.
|
||||||
|
// TODO(unknwon): When Golang 1.10 is the lowest version supported, replace with `parserBufferSize := p.buf.Size()`.
|
||||||
|
parserBufferSize := 0
|
||||||
|
// NOTE: Peek 4kb at a time.
|
||||||
|
currentPeekSize := minReaderBufferSize
|
||||||
|
|
||||||
|
if f.options.AllowPythonMultilineValues {
|
||||||
|
for {
|
||||||
|
peekBytes, _ := p.buf.Peek(currentPeekSize)
|
||||||
|
peekBytesLength := len(peekBytes)
|
||||||
|
|
||||||
|
if parserBufferSize >= peekBytesLength {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
currentPeekSize *= 2
|
||||||
|
parserBufferSize = peekBytesLength
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for !p.isEOF {
|
||||||
|
line, err = p.readUntil('\n')
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if f.options.AllowNestedValues &&
|
||||||
|
isLastValueEmpty && len(line) > 0 {
|
||||||
|
if line[0] == ' ' || line[0] == '\t' {
|
||||||
|
err = lastRegularKey.addNestedValue(string(bytes.TrimSpace(line)))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
line = bytes.TrimLeftFunc(line, unicode.IsSpace)
|
||||||
|
if len(line) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Comments
|
||||||
|
if line[0] == '#' || line[0] == ';' {
|
||||||
|
// Note: we do not care ending line break,
|
||||||
|
// it is needed for adding second line,
|
||||||
|
// so just clean it once at the end when set to value.
|
||||||
|
p.comment.Write(line)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Section
|
||||||
|
if line[0] == '[' {
|
||||||
|
// Read to the next ']' (TODO: support quoted strings)
|
||||||
|
closeIdx := bytes.LastIndexByte(line, ']')
|
||||||
|
if closeIdx == -1 {
|
||||||
|
return fmt.Errorf("unclosed section: %s", line)
|
||||||
|
}
|
||||||
|
|
||||||
|
name := string(line[1:closeIdx])
|
||||||
|
section, err = f.NewSection(name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
comment, has := cleanComment(line[closeIdx+1:])
|
||||||
|
if has {
|
||||||
|
p.comment.Write(comment)
|
||||||
|
}
|
||||||
|
|
||||||
|
section.Comment = strings.TrimSpace(p.comment.String())
|
||||||
|
|
||||||
|
// Reset auto-counter and comments
|
||||||
|
p.comment.Reset()
|
||||||
|
p.count = 1
|
||||||
|
|
||||||
|
inUnparseableSection = false
|
||||||
|
for i := range f.options.UnparseableSections {
|
||||||
|
if f.options.UnparseableSections[i] == name ||
|
||||||
|
((f.options.Insensitive || f.options.InsensitiveSections) && strings.EqualFold(f.options.UnparseableSections[i], name)) {
|
||||||
|
inUnparseableSection = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if inUnparseableSection {
|
||||||
|
section.isRawSection = true
|
||||||
|
section.rawBody += string(line)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
kname, offset, err := readKeyName(f.options.KeyValueDelimiters, line)
|
||||||
|
if err != nil {
|
||||||
|
// Treat as boolean key when desired, and whole line is key name.
|
||||||
|
if IsErrDelimiterNotFound(err) {
|
||||||
|
switch {
|
||||||
|
case f.options.AllowBooleanKeys:
|
||||||
|
kname, err := p.readValue(line, parserBufferSize)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
key, err := section.NewBooleanKey(kname)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
key.Comment = strings.TrimSpace(p.comment.String())
|
||||||
|
p.comment.Reset()
|
||||||
|
continue
|
||||||
|
|
||||||
|
case f.options.SkipUnrecognizableLines:
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Auto increment.
|
||||||
|
isAutoIncr := false
|
||||||
|
if kname == "-" {
|
||||||
|
isAutoIncr = true
|
||||||
|
kname = "#" + strconv.Itoa(p.count)
|
||||||
|
p.count++
|
||||||
|
}
|
||||||
|
|
||||||
|
value, err := p.readValue(line[offset:], parserBufferSize)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
isLastValueEmpty = len(value) == 0
|
||||||
|
|
||||||
|
key, err := section.NewKey(kname, value)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
key.isAutoIncrement = isAutoIncr
|
||||||
|
key.Comment = strings.TrimSpace(p.comment.String())
|
||||||
|
p.comment.Reset()
|
||||||
|
lastRegularKey = key
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
256
vendor/github.com/go-ini/ini/section.go
generated
vendored
Normal file
256
vendor/github.com/go-ini/ini/section.go
generated
vendored
Normal file
@ -0,0 +1,256 @@
|
|||||||
|
// Copyright 2014 Unknwon
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||||
|
// not use this file except in compliance with the License. You may obtain
|
||||||
|
// a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
// License for the specific language governing permissions and limitations
|
||||||
|
// under the License.
|
||||||
|
|
||||||
|
package ini
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Section represents a config section.
|
||||||
|
type Section struct {
|
||||||
|
f *File
|
||||||
|
Comment string
|
||||||
|
name string
|
||||||
|
keys map[string]*Key
|
||||||
|
keyList []string
|
||||||
|
keysHash map[string]string
|
||||||
|
|
||||||
|
isRawSection bool
|
||||||
|
rawBody string
|
||||||
|
}
|
||||||
|
|
||||||
|
func newSection(f *File, name string) *Section {
|
||||||
|
return &Section{
|
||||||
|
f: f,
|
||||||
|
name: name,
|
||||||
|
keys: make(map[string]*Key),
|
||||||
|
keyList: make([]string, 0, 10),
|
||||||
|
keysHash: make(map[string]string),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name returns name of Section.
|
||||||
|
func (s *Section) Name() string {
|
||||||
|
return s.name
|
||||||
|
}
|
||||||
|
|
||||||
|
// Body returns rawBody of Section if the section was marked as unparseable.
|
||||||
|
// It still follows the other rules of the INI format surrounding leading/trailing whitespace.
|
||||||
|
func (s *Section) Body() string {
|
||||||
|
return strings.TrimSpace(s.rawBody)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetBody updates body content only if section is raw.
|
||||||
|
func (s *Section) SetBody(body string) {
|
||||||
|
if !s.isRawSection {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.rawBody = body
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewKey creates a new key to given section.
|
||||||
|
func (s *Section) NewKey(name, val string) (*Key, error) {
|
||||||
|
if len(name) == 0 {
|
||||||
|
return nil, errors.New("error creating new key: empty key name")
|
||||||
|
} else if s.f.options.Insensitive || s.f.options.InsensitiveKeys {
|
||||||
|
name = strings.ToLower(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.f.BlockMode {
|
||||||
|
s.f.lock.Lock()
|
||||||
|
defer s.f.lock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
if inSlice(name, s.keyList) {
|
||||||
|
if s.f.options.AllowShadows {
|
||||||
|
if err := s.keys[name].addShadow(val); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
s.keys[name].value = val
|
||||||
|
s.keysHash[name] = val
|
||||||
|
}
|
||||||
|
return s.keys[name], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
s.keyList = append(s.keyList, name)
|
||||||
|
s.keys[name] = newKey(s, name, val)
|
||||||
|
s.keysHash[name] = val
|
||||||
|
return s.keys[name], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBooleanKey creates a new boolean type key to given section.
|
||||||
|
func (s *Section) NewBooleanKey(name string) (*Key, error) {
|
||||||
|
key, err := s.NewKey(name, "true")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
key.isBooleanType = true
|
||||||
|
return key, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetKey returns key in section by given name.
|
||||||
|
func (s *Section) GetKey(name string) (*Key, error) {
|
||||||
|
if s.f.BlockMode {
|
||||||
|
s.f.lock.RLock()
|
||||||
|
}
|
||||||
|
if s.f.options.Insensitive || s.f.options.InsensitiveKeys {
|
||||||
|
name = strings.ToLower(name)
|
||||||
|
}
|
||||||
|
key := s.keys[name]
|
||||||
|
if s.f.BlockMode {
|
||||||
|
s.f.lock.RUnlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
if key == nil {
|
||||||
|
// Check if it is a child-section.
|
||||||
|
sname := s.name
|
||||||
|
for {
|
||||||
|
if i := strings.LastIndex(sname, s.f.options.ChildSectionDelimiter); i > -1 {
|
||||||
|
sname = sname[:i]
|
||||||
|
sec, err := s.f.GetSection(sname)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return sec.GetKey(name)
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("error when getting key of section %q: key %q not exists", s.name, name)
|
||||||
|
}
|
||||||
|
return key, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasKey returns true if section contains a key with given name.
|
||||||
|
func (s *Section) HasKey(name string) bool {
|
||||||
|
key, _ := s.GetKey(name)
|
||||||
|
return key != nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use "HasKey" instead.
|
||||||
|
func (s *Section) Haskey(name string) bool {
|
||||||
|
return s.HasKey(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasValue returns true if section contains given raw value.
|
||||||
|
func (s *Section) HasValue(value string) bool {
|
||||||
|
if s.f.BlockMode {
|
||||||
|
s.f.lock.RLock()
|
||||||
|
defer s.f.lock.RUnlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, k := range s.keys {
|
||||||
|
if value == k.value {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Key assumes named Key exists in section and returns a zero-value when not.
|
||||||
|
func (s *Section) Key(name string) *Key {
|
||||||
|
key, err := s.GetKey(name)
|
||||||
|
if err != nil {
|
||||||
|
// It's OK here because the only possible error is empty key name,
|
||||||
|
// but if it's empty, this piece of code won't be executed.
|
||||||
|
key, _ = s.NewKey(name, "")
|
||||||
|
return key
|
||||||
|
}
|
||||||
|
return key
|
||||||
|
}
|
||||||
|
|
||||||
|
// Keys returns list of keys of section.
|
||||||
|
func (s *Section) Keys() []*Key {
|
||||||
|
keys := make([]*Key, len(s.keyList))
|
||||||
|
for i := range s.keyList {
|
||||||
|
keys[i] = s.Key(s.keyList[i])
|
||||||
|
}
|
||||||
|
return keys
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParentKeys returns list of keys of parent section.
|
||||||
|
func (s *Section) ParentKeys() []*Key {
|
||||||
|
var parentKeys []*Key
|
||||||
|
sname := s.name
|
||||||
|
for {
|
||||||
|
if i := strings.LastIndex(sname, s.f.options.ChildSectionDelimiter); i > -1 {
|
||||||
|
sname = sname[:i]
|
||||||
|
sec, err := s.f.GetSection(sname)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
parentKeys = append(parentKeys, sec.Keys()...)
|
||||||
|
} else {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
return parentKeys
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyStrings returns list of key names of section.
|
||||||
|
func (s *Section) KeyStrings() []string {
|
||||||
|
list := make([]string, len(s.keyList))
|
||||||
|
copy(list, s.keyList)
|
||||||
|
return list
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeysHash returns keys hash consisting of names and values.
|
||||||
|
func (s *Section) KeysHash() map[string]string {
|
||||||
|
if s.f.BlockMode {
|
||||||
|
s.f.lock.RLock()
|
||||||
|
defer s.f.lock.RUnlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
hash := map[string]string{}
|
||||||
|
for key, value := range s.keysHash {
|
||||||
|
hash[key] = value
|
||||||
|
}
|
||||||
|
return hash
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteKey deletes a key from section.
|
||||||
|
func (s *Section) DeleteKey(name string) {
|
||||||
|
if s.f.BlockMode {
|
||||||
|
s.f.lock.Lock()
|
||||||
|
defer s.f.lock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, k := range s.keyList {
|
||||||
|
if k == name {
|
||||||
|
s.keyList = append(s.keyList[:i], s.keyList[i+1:]...)
|
||||||
|
delete(s.keys, name)
|
||||||
|
delete(s.keysHash, name)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChildSections returns a list of child sections of current section.
|
||||||
|
// For example, "[parent.child1]" and "[parent.child12]" are child sections
|
||||||
|
// of section "[parent]".
|
||||||
|
func (s *Section) ChildSections() []*Section {
|
||||||
|
prefix := s.name + s.f.options.ChildSectionDelimiter
|
||||||
|
children := make([]*Section, 0, 3)
|
||||||
|
for _, name := range s.f.sectionList {
|
||||||
|
if strings.HasPrefix(name, prefix) {
|
||||||
|
children = append(children, s.f.sections[name]...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return children
|
||||||
|
}
|
747
vendor/github.com/go-ini/ini/struct.go
generated
vendored
Normal file
747
vendor/github.com/go-ini/ini/struct.go
generated
vendored
Normal file
@ -0,0 +1,747 @@
|
|||||||
|
// Copyright 2014 Unknwon
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||||
|
// not use this file except in compliance with the License. You may obtain
|
||||||
|
// a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
// License for the specific language governing permissions and limitations
|
||||||
|
// under the License.
|
||||||
|
|
||||||
|
package ini
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
"unicode"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NameMapper represents a ini tag name mapper.
|
||||||
|
type NameMapper func(string) string
|
||||||
|
|
||||||
|
// Built-in name getters.
|
||||||
|
var (
|
||||||
|
// SnackCase converts to format SNACK_CASE.
|
||||||
|
SnackCase NameMapper = func(raw string) string {
|
||||||
|
newstr := make([]rune, 0, len(raw))
|
||||||
|
for i, chr := range raw {
|
||||||
|
if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
|
||||||
|
if i > 0 {
|
||||||
|
newstr = append(newstr, '_')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
newstr = append(newstr, unicode.ToUpper(chr))
|
||||||
|
}
|
||||||
|
return string(newstr)
|
||||||
|
}
|
||||||
|
// TitleUnderscore converts to format title_underscore.
|
||||||
|
TitleUnderscore NameMapper = func(raw string) string {
|
||||||
|
newstr := make([]rune, 0, len(raw))
|
||||||
|
for i, chr := range raw {
|
||||||
|
if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
|
||||||
|
if i > 0 {
|
||||||
|
newstr = append(newstr, '_')
|
||||||
|
}
|
||||||
|
chr -= 'A' - 'a'
|
||||||
|
}
|
||||||
|
newstr = append(newstr, chr)
|
||||||
|
}
|
||||||
|
return string(newstr)
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func (s *Section) parseFieldName(raw, actual string) string {
|
||||||
|
if len(actual) > 0 {
|
||||||
|
return actual
|
||||||
|
}
|
||||||
|
if s.f.NameMapper != nil {
|
||||||
|
return s.f.NameMapper(raw)
|
||||||
|
}
|
||||||
|
return raw
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseDelim(actual string) string {
|
||||||
|
if len(actual) > 0 {
|
||||||
|
return actual
|
||||||
|
}
|
||||||
|
return ","
|
||||||
|
}
|
||||||
|
|
||||||
|
var reflectTime = reflect.TypeOf(time.Now()).Kind()
|
||||||
|
|
||||||
|
// setSliceWithProperType sets proper values to slice based on its type.
|
||||||
|
func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error {
|
||||||
|
var strs []string
|
||||||
|
if allowShadow {
|
||||||
|
strs = key.StringsWithShadows(delim)
|
||||||
|
} else {
|
||||||
|
strs = key.Strings(delim)
|
||||||
|
}
|
||||||
|
|
||||||
|
numVals := len(strs)
|
||||||
|
if numVals == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var vals interface{}
|
||||||
|
var err error
|
||||||
|
|
||||||
|
sliceOf := field.Type().Elem().Kind()
|
||||||
|
switch sliceOf {
|
||||||
|
case reflect.String:
|
||||||
|
vals = strs
|
||||||
|
case reflect.Int:
|
||||||
|
vals, err = key.parseInts(strs, true, false)
|
||||||
|
case reflect.Int64:
|
||||||
|
vals, err = key.parseInt64s(strs, true, false)
|
||||||
|
case reflect.Uint:
|
||||||
|
vals, err = key.parseUints(strs, true, false)
|
||||||
|
case reflect.Uint64:
|
||||||
|
vals, err = key.parseUint64s(strs, true, false)
|
||||||
|
case reflect.Float64:
|
||||||
|
vals, err = key.parseFloat64s(strs, true, false)
|
||||||
|
case reflect.Bool:
|
||||||
|
vals, err = key.parseBools(strs, true, false)
|
||||||
|
case reflectTime:
|
||||||
|
vals, err = key.parseTimesFormat(time.RFC3339, strs, true, false)
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unsupported type '[]%s'", sliceOf)
|
||||||
|
}
|
||||||
|
if err != nil && isStrict {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
slice := reflect.MakeSlice(field.Type(), numVals, numVals)
|
||||||
|
for i := 0; i < numVals; i++ {
|
||||||
|
switch sliceOf {
|
||||||
|
case reflect.String:
|
||||||
|
slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i]))
|
||||||
|
case reflect.Int:
|
||||||
|
slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i]))
|
||||||
|
case reflect.Int64:
|
||||||
|
slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i]))
|
||||||
|
case reflect.Uint:
|
||||||
|
slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i]))
|
||||||
|
case reflect.Uint64:
|
||||||
|
slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i]))
|
||||||
|
case reflect.Float64:
|
||||||
|
slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i]))
|
||||||
|
case reflect.Bool:
|
||||||
|
slice.Index(i).Set(reflect.ValueOf(vals.([]bool)[i]))
|
||||||
|
case reflectTime:
|
||||||
|
slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i]))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
field.Set(slice)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func wrapStrictError(err error, isStrict bool) error {
|
||||||
|
if isStrict {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// setWithProperType sets proper value to field based on its type,
|
||||||
|
// but it does not return error for failing parsing,
|
||||||
|
// because we want to use default value that is already assigned to struct.
|
||||||
|
func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error {
|
||||||
|
vt := t
|
||||||
|
isPtr := t.Kind() == reflect.Ptr
|
||||||
|
if isPtr {
|
||||||
|
vt = t.Elem()
|
||||||
|
}
|
||||||
|
switch vt.Kind() {
|
||||||
|
case reflect.String:
|
||||||
|
stringVal := key.String()
|
||||||
|
if isPtr {
|
||||||
|
field.Set(reflect.ValueOf(&stringVal))
|
||||||
|
} else if len(stringVal) > 0 {
|
||||||
|
field.SetString(key.String())
|
||||||
|
}
|
||||||
|
case reflect.Bool:
|
||||||
|
boolVal, err := key.Bool()
|
||||||
|
if err != nil {
|
||||||
|
return wrapStrictError(err, isStrict)
|
||||||
|
}
|
||||||
|
if isPtr {
|
||||||
|
field.Set(reflect.ValueOf(&boolVal))
|
||||||
|
} else {
|
||||||
|
field.SetBool(boolVal)
|
||||||
|
}
|
||||||
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||||
|
// ParseDuration will not return err for `0`, so check the type name
|
||||||
|
if vt.Name() == "Duration" {
|
||||||
|
durationVal, err := key.Duration()
|
||||||
|
if err != nil {
|
||||||
|
if intVal, err := key.Int64(); err == nil {
|
||||||
|
field.SetInt(intVal)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return wrapStrictError(err, isStrict)
|
||||||
|
}
|
||||||
|
if isPtr {
|
||||||
|
field.Set(reflect.ValueOf(&durationVal))
|
||||||
|
} else if int64(durationVal) > 0 {
|
||||||
|
field.Set(reflect.ValueOf(durationVal))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
intVal, err := key.Int64()
|
||||||
|
if err != nil {
|
||||||
|
return wrapStrictError(err, isStrict)
|
||||||
|
}
|
||||||
|
if isPtr {
|
||||||
|
pv := reflect.New(t.Elem())
|
||||||
|
pv.Elem().SetInt(intVal)
|
||||||
|
field.Set(pv)
|
||||||
|
} else {
|
||||||
|
field.SetInt(intVal)
|
||||||
|
}
|
||||||
|
// byte is an alias for uint8, so supporting uint8 breaks support for byte
|
||||||
|
case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||||
|
durationVal, err := key.Duration()
|
||||||
|
// Skip zero value
|
||||||
|
if err == nil && uint64(durationVal) > 0 {
|
||||||
|
if isPtr {
|
||||||
|
field.Set(reflect.ValueOf(&durationVal))
|
||||||
|
} else {
|
||||||
|
field.Set(reflect.ValueOf(durationVal))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
uintVal, err := key.Uint64()
|
||||||
|
if err != nil {
|
||||||
|
return wrapStrictError(err, isStrict)
|
||||||
|
}
|
||||||
|
if isPtr {
|
||||||
|
pv := reflect.New(t.Elem())
|
||||||
|
pv.Elem().SetUint(uintVal)
|
||||||
|
field.Set(pv)
|
||||||
|
} else {
|
||||||
|
field.SetUint(uintVal)
|
||||||
|
}
|
||||||
|
|
||||||
|
case reflect.Float32, reflect.Float64:
|
||||||
|
floatVal, err := key.Float64()
|
||||||
|
if err != nil {
|
||||||
|
return wrapStrictError(err, isStrict)
|
||||||
|
}
|
||||||
|
if isPtr {
|
||||||
|
pv := reflect.New(t.Elem())
|
||||||
|
pv.Elem().SetFloat(floatVal)
|
||||||
|
field.Set(pv)
|
||||||
|
} else {
|
||||||
|
field.SetFloat(floatVal)
|
||||||
|
}
|
||||||
|
case reflectTime:
|
||||||
|
timeVal, err := key.Time()
|
||||||
|
if err != nil {
|
||||||
|
return wrapStrictError(err, isStrict)
|
||||||
|
}
|
||||||
|
if isPtr {
|
||||||
|
field.Set(reflect.ValueOf(&timeVal))
|
||||||
|
} else {
|
||||||
|
field.Set(reflect.ValueOf(timeVal))
|
||||||
|
}
|
||||||
|
case reflect.Slice:
|
||||||
|
return setSliceWithProperType(key, field, delim, allowShadow, isStrict)
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unsupported type %q", t)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool, allowNonUnique bool, extends bool) {
|
||||||
|
opts := strings.SplitN(tag, ",", 5)
|
||||||
|
rawName = opts[0]
|
||||||
|
for _, opt := range opts[1:] {
|
||||||
|
omitEmpty = omitEmpty || (opt == "omitempty")
|
||||||
|
allowShadow = allowShadow || (opt == "allowshadow")
|
||||||
|
allowNonUnique = allowNonUnique || (opt == "nonunique")
|
||||||
|
extends = extends || (opt == "extends")
|
||||||
|
}
|
||||||
|
return rawName, omitEmpty, allowShadow, allowNonUnique, extends
|
||||||
|
}
|
||||||
|
|
||||||
|
// mapToField maps the given value to the matching field of the given section.
|
||||||
|
// The sectionIndex is the index (if non unique sections are enabled) to which the value should be added.
|
||||||
|
func (s *Section) mapToField(val reflect.Value, isStrict bool, sectionIndex int, sectionName string) error {
|
||||||
|
if val.Kind() == reflect.Ptr {
|
||||||
|
val = val.Elem()
|
||||||
|
}
|
||||||
|
typ := val.Type()
|
||||||
|
|
||||||
|
for i := 0; i < typ.NumField(); i++ {
|
||||||
|
field := val.Field(i)
|
||||||
|
tpField := typ.Field(i)
|
||||||
|
|
||||||
|
tag := tpField.Tag.Get("ini")
|
||||||
|
if tag == "-" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
rawName, _, allowShadow, allowNonUnique, extends := parseTagOptions(tag)
|
||||||
|
fieldName := s.parseFieldName(tpField.Name, rawName)
|
||||||
|
if len(fieldName) == 0 || !field.CanSet() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
isStruct := tpField.Type.Kind() == reflect.Struct
|
||||||
|
isStructPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct
|
||||||
|
isAnonymousPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous
|
||||||
|
if isAnonymousPtr {
|
||||||
|
field.Set(reflect.New(tpField.Type.Elem()))
|
||||||
|
}
|
||||||
|
|
||||||
|
if extends && (isAnonymousPtr || (isStruct && tpField.Anonymous)) {
|
||||||
|
if isStructPtr && field.IsNil() {
|
||||||
|
field.Set(reflect.New(tpField.Type.Elem()))
|
||||||
|
}
|
||||||
|
fieldSection := s
|
||||||
|
if rawName != "" {
|
||||||
|
sectionName = s.name + s.f.options.ChildSectionDelimiter + rawName
|
||||||
|
if secs, err := s.f.SectionsByName(sectionName); err == nil && sectionIndex < len(secs) {
|
||||||
|
fieldSection = secs[sectionIndex]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := fieldSection.mapToField(field, isStrict, sectionIndex, sectionName); err != nil {
|
||||||
|
return fmt.Errorf("map to field %q: %v", fieldName, err)
|
||||||
|
}
|
||||||
|
} else if isAnonymousPtr || isStruct || isStructPtr {
|
||||||
|
if secs, err := s.f.SectionsByName(fieldName); err == nil {
|
||||||
|
if len(secs) <= sectionIndex {
|
||||||
|
return fmt.Errorf("there are not enough sections (%d <= %d) for the field %q", len(secs), sectionIndex, fieldName)
|
||||||
|
}
|
||||||
|
// Only set the field to non-nil struct value if we have a section for it.
|
||||||
|
// Otherwise, we end up with a non-nil struct ptr even though there is no data.
|
||||||
|
if isStructPtr && field.IsNil() {
|
||||||
|
field.Set(reflect.New(tpField.Type.Elem()))
|
||||||
|
}
|
||||||
|
if err = secs[sectionIndex].mapToField(field, isStrict, sectionIndex, fieldName); err != nil {
|
||||||
|
return fmt.Errorf("map to field %q: %v", fieldName, err)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Map non-unique sections
|
||||||
|
if allowNonUnique && tpField.Type.Kind() == reflect.Slice {
|
||||||
|
newField, err := s.mapToSlice(fieldName, field, isStrict)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("map to slice %q: %v", fieldName, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
field.Set(newField)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if key, err := s.GetKey(fieldName); err == nil {
|
||||||
|
delim := parseDelim(tpField.Tag.Get("delim"))
|
||||||
|
if err = setWithProperType(tpField.Type, key, field, delim, allowShadow, isStrict); err != nil {
|
||||||
|
return fmt.Errorf("set field %q: %v", fieldName, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// mapToSlice maps all sections with the same name and returns the new value.
|
||||||
|
// The type of the Value must be a slice.
|
||||||
|
func (s *Section) mapToSlice(secName string, val reflect.Value, isStrict bool) (reflect.Value, error) {
|
||||||
|
secs, err := s.f.SectionsByName(secName)
|
||||||
|
if err != nil {
|
||||||
|
return reflect.Value{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
typ := val.Type().Elem()
|
||||||
|
for i, sec := range secs {
|
||||||
|
elem := reflect.New(typ)
|
||||||
|
if err = sec.mapToField(elem, isStrict, i, sec.name); err != nil {
|
||||||
|
return reflect.Value{}, fmt.Errorf("map to field from section %q: %v", secName, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
val = reflect.Append(val, elem.Elem())
|
||||||
|
}
|
||||||
|
return val, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// mapTo maps a section to object v.
|
||||||
|
func (s *Section) mapTo(v interface{}, isStrict bool) error {
|
||||||
|
typ := reflect.TypeOf(v)
|
||||||
|
val := reflect.ValueOf(v)
|
||||||
|
if typ.Kind() == reflect.Ptr {
|
||||||
|
typ = typ.Elem()
|
||||||
|
val = val.Elem()
|
||||||
|
} else {
|
||||||
|
return errors.New("not a pointer to a struct")
|
||||||
|
}
|
||||||
|
|
||||||
|
if typ.Kind() == reflect.Slice {
|
||||||
|
newField, err := s.mapToSlice(s.name, val, isStrict)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
val.Set(newField)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.mapToField(val, isStrict, 0, s.name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MapTo maps section to given struct.
|
||||||
|
func (s *Section) MapTo(v interface{}) error {
|
||||||
|
return s.mapTo(v, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StrictMapTo maps section to given struct in strict mode,
|
||||||
|
// which returns all possible error including value parsing error.
|
||||||
|
func (s *Section) StrictMapTo(v interface{}) error {
|
||||||
|
return s.mapTo(v, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MapTo maps file to given struct.
|
||||||
|
func (f *File) MapTo(v interface{}) error {
|
||||||
|
return f.Section("").MapTo(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StrictMapTo maps file to given struct in strict mode,
|
||||||
|
// which returns all possible error including value parsing error.
|
||||||
|
func (f *File) StrictMapTo(v interface{}) error {
|
||||||
|
return f.Section("").StrictMapTo(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MapToWithMapper maps data sources to given struct with name mapper.
|
||||||
|
func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error {
|
||||||
|
cfg, err := Load(source, others...)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
cfg.NameMapper = mapper
|
||||||
|
return cfg.MapTo(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StrictMapToWithMapper maps data sources to given struct with name mapper in strict mode,
|
||||||
|
// which returns all possible error including value parsing error.
|
||||||
|
func StrictMapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error {
|
||||||
|
cfg, err := Load(source, others...)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
cfg.NameMapper = mapper
|
||||||
|
return cfg.StrictMapTo(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MapTo maps data sources to given struct.
|
||||||
|
func MapTo(v, source interface{}, others ...interface{}) error {
|
||||||
|
return MapToWithMapper(v, nil, source, others...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StrictMapTo maps data sources to given struct in strict mode,
|
||||||
|
// which returns all possible error including value parsing error.
|
||||||
|
func StrictMapTo(v, source interface{}, others ...interface{}) error {
|
||||||
|
return StrictMapToWithMapper(v, nil, source, others...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// reflectSliceWithProperType does the opposite thing as setSliceWithProperType.
|
||||||
|
func reflectSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow bool) error {
|
||||||
|
slice := field.Slice(0, field.Len())
|
||||||
|
if field.Len() == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
sliceOf := field.Type().Elem().Kind()
|
||||||
|
|
||||||
|
if allowShadow {
|
||||||
|
var keyWithShadows *Key
|
||||||
|
for i := 0; i < field.Len(); i++ {
|
||||||
|
var val string
|
||||||
|
switch sliceOf {
|
||||||
|
case reflect.String:
|
||||||
|
val = slice.Index(i).String()
|
||||||
|
case reflect.Int, reflect.Int64:
|
||||||
|
val = fmt.Sprint(slice.Index(i).Int())
|
||||||
|
case reflect.Uint, reflect.Uint64:
|
||||||
|
val = fmt.Sprint(slice.Index(i).Uint())
|
||||||
|
case reflect.Float64:
|
||||||
|
val = fmt.Sprint(slice.Index(i).Float())
|
||||||
|
case reflect.Bool:
|
||||||
|
val = fmt.Sprint(slice.Index(i).Bool())
|
||||||
|
case reflectTime:
|
||||||
|
val = slice.Index(i).Interface().(time.Time).Format(time.RFC3339)
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unsupported type '[]%s'", sliceOf)
|
||||||
|
}
|
||||||
|
|
||||||
|
if i == 0 {
|
||||||
|
keyWithShadows = newKey(key.s, key.name, val)
|
||||||
|
} else {
|
||||||
|
_ = keyWithShadows.AddShadow(val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*key = *keyWithShadows
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
for i := 0; i < field.Len(); i++ {
|
||||||
|
switch sliceOf {
|
||||||
|
case reflect.String:
|
||||||
|
buf.WriteString(slice.Index(i).String())
|
||||||
|
case reflect.Int, reflect.Int64:
|
||||||
|
buf.WriteString(fmt.Sprint(slice.Index(i).Int()))
|
||||||
|
case reflect.Uint, reflect.Uint64:
|
||||||
|
buf.WriteString(fmt.Sprint(slice.Index(i).Uint()))
|
||||||
|
case reflect.Float64:
|
||||||
|
buf.WriteString(fmt.Sprint(slice.Index(i).Float()))
|
||||||
|
case reflect.Bool:
|
||||||
|
buf.WriteString(fmt.Sprint(slice.Index(i).Bool()))
|
||||||
|
case reflectTime:
|
||||||
|
buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339))
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unsupported type '[]%s'", sliceOf)
|
||||||
|
}
|
||||||
|
buf.WriteString(delim)
|
||||||
|
}
|
||||||
|
key.SetValue(buf.String()[:buf.Len()-len(delim)])
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// reflectWithProperType does the opposite thing as setWithProperType.
|
||||||
|
func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow bool) error {
|
||||||
|
switch t.Kind() {
|
||||||
|
case reflect.String:
|
||||||
|
key.SetValue(field.String())
|
||||||
|
case reflect.Bool:
|
||||||
|
key.SetValue(fmt.Sprint(field.Bool()))
|
||||||
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||||
|
key.SetValue(fmt.Sprint(field.Int()))
|
||||||
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||||
|
key.SetValue(fmt.Sprint(field.Uint()))
|
||||||
|
case reflect.Float32, reflect.Float64:
|
||||||
|
key.SetValue(fmt.Sprint(field.Float()))
|
||||||
|
case reflectTime:
|
||||||
|
key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339)))
|
||||||
|
case reflect.Slice:
|
||||||
|
return reflectSliceWithProperType(key, field, delim, allowShadow)
|
||||||
|
case reflect.Ptr:
|
||||||
|
if !field.IsNil() {
|
||||||
|
return reflectWithProperType(t.Elem(), key, field.Elem(), delim, allowShadow)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unsupported type %q", t)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CR: copied from encoding/json/encode.go with modifications of time.Time support.
|
||||||
|
// TODO: add more test coverage.
|
||||||
|
func isEmptyValue(v reflect.Value) bool {
|
||||||
|
switch v.Kind() {
|
||||||
|
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
|
||||||
|
return v.Len() == 0
|
||||||
|
case reflect.Bool:
|
||||||
|
return !v.Bool()
|
||||||
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||||
|
return v.Int() == 0
|
||||||
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||||
|
return v.Uint() == 0
|
||||||
|
case reflect.Float32, reflect.Float64:
|
||||||
|
return v.Float() == 0
|
||||||
|
case reflect.Interface, reflect.Ptr:
|
||||||
|
return v.IsNil()
|
||||||
|
case reflectTime:
|
||||||
|
t, ok := v.Interface().(time.Time)
|
||||||
|
return ok && t.IsZero()
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// StructReflector is the interface implemented by struct types that can extract themselves into INI objects.
|
||||||
|
type StructReflector interface {
|
||||||
|
ReflectINIStruct(*File) error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Section) reflectFrom(val reflect.Value) error {
|
||||||
|
if val.Kind() == reflect.Ptr {
|
||||||
|
val = val.Elem()
|
||||||
|
}
|
||||||
|
typ := val.Type()
|
||||||
|
|
||||||
|
for i := 0; i < typ.NumField(); i++ {
|
||||||
|
if !val.Field(i).CanInterface() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
field := val.Field(i)
|
||||||
|
tpField := typ.Field(i)
|
||||||
|
|
||||||
|
tag := tpField.Tag.Get("ini")
|
||||||
|
if tag == "-" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
rawName, omitEmpty, allowShadow, allowNonUnique, extends := parseTagOptions(tag)
|
||||||
|
if omitEmpty && isEmptyValue(field) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if r, ok := field.Interface().(StructReflector); ok {
|
||||||
|
return r.ReflectINIStruct(s.f)
|
||||||
|
}
|
||||||
|
|
||||||
|
fieldName := s.parseFieldName(tpField.Name, rawName)
|
||||||
|
if len(fieldName) == 0 || !field.CanSet() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if extends && tpField.Anonymous && (tpField.Type.Kind() == reflect.Ptr || tpField.Type.Kind() == reflect.Struct) {
|
||||||
|
if err := s.reflectFrom(field); err != nil {
|
||||||
|
return fmt.Errorf("reflect from field %q: %v", fieldName, err)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct) ||
|
||||||
|
(tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") {
|
||||||
|
// Note: The only error here is section doesn't exist.
|
||||||
|
sec, err := s.f.GetSection(fieldName)
|
||||||
|
if err != nil {
|
||||||
|
// Note: fieldName can never be empty here, ignore error.
|
||||||
|
sec, _ = s.f.NewSection(fieldName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add comment from comment tag
|
||||||
|
if len(sec.Comment) == 0 {
|
||||||
|
sec.Comment = tpField.Tag.Get("comment")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = sec.reflectFrom(field); err != nil {
|
||||||
|
return fmt.Errorf("reflect from field %q: %v", fieldName, err)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if allowNonUnique && tpField.Type.Kind() == reflect.Slice {
|
||||||
|
slice := field.Slice(0, field.Len())
|
||||||
|
if field.Len() == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
sliceOf := field.Type().Elem().Kind()
|
||||||
|
|
||||||
|
for i := 0; i < field.Len(); i++ {
|
||||||
|
if sliceOf != reflect.Struct && sliceOf != reflect.Ptr {
|
||||||
|
return fmt.Errorf("field %q is not a slice of pointer or struct", fieldName)
|
||||||
|
}
|
||||||
|
|
||||||
|
sec, err := s.f.NewSection(fieldName)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add comment from comment tag
|
||||||
|
if len(sec.Comment) == 0 {
|
||||||
|
sec.Comment = tpField.Tag.Get("comment")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := sec.reflectFrom(slice.Index(i)); err != nil {
|
||||||
|
return fmt.Errorf("reflect from field %q: %v", fieldName, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note: Same reason as section.
|
||||||
|
key, err := s.GetKey(fieldName)
|
||||||
|
if err != nil {
|
||||||
|
key, _ = s.NewKey(fieldName, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add comment from comment tag
|
||||||
|
if len(key.Comment) == 0 {
|
||||||
|
key.Comment = tpField.Tag.Get("comment")
|
||||||
|
}
|
||||||
|
|
||||||
|
delim := parseDelim(tpField.Tag.Get("delim"))
|
||||||
|
if err = reflectWithProperType(tpField.Type, key, field, delim, allowShadow); err != nil {
|
||||||
|
return fmt.Errorf("reflect field %q: %v", fieldName, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReflectFrom reflects section from given struct. It overwrites existing ones.
|
||||||
|
func (s *Section) ReflectFrom(v interface{}) error {
|
||||||
|
typ := reflect.TypeOf(v)
|
||||||
|
val := reflect.ValueOf(v)
|
||||||
|
|
||||||
|
if s.name != DefaultSection && s.f.options.AllowNonUniqueSections &&
|
||||||
|
(typ.Kind() == reflect.Slice || typ.Kind() == reflect.Ptr) {
|
||||||
|
// Clear sections to make sure none exists before adding the new ones
|
||||||
|
s.f.DeleteSection(s.name)
|
||||||
|
|
||||||
|
if typ.Kind() == reflect.Ptr {
|
||||||
|
sec, err := s.f.NewSection(s.name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return sec.reflectFrom(val.Elem())
|
||||||
|
}
|
||||||
|
|
||||||
|
slice := val.Slice(0, val.Len())
|
||||||
|
sliceOf := val.Type().Elem().Kind()
|
||||||
|
if sliceOf != reflect.Ptr {
|
||||||
|
return fmt.Errorf("not a slice of pointers")
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < slice.Len(); i++ {
|
||||||
|
sec, err := s.f.NewSection(s.name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = sec.reflectFrom(slice.Index(i))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("reflect from %dth field: %v", i, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if typ.Kind() == reflect.Ptr {
|
||||||
|
val = val.Elem()
|
||||||
|
} else {
|
||||||
|
return errors.New("not a pointer to a struct")
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.reflectFrom(val)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReflectFrom reflects file from given struct.
|
||||||
|
func (f *File) ReflectFrom(v interface{}) error {
|
||||||
|
return f.Section("").ReflectFrom(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReflectFromWithMapper reflects data sources from given struct with name mapper.
|
||||||
|
func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error {
|
||||||
|
cfg.NameMapper = mapper
|
||||||
|
return cfg.ReflectFrom(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReflectFrom reflects data sources from given struct.
|
||||||
|
func ReflectFrom(cfg *File, v interface{}) error {
|
||||||
|
return ReflectFromWithMapper(cfg, v, nil)
|
||||||
|
}
|
23
vendor/github.com/go-sql-driver/mysql/.github/CONTRIBUTING.md
generated
vendored
23
vendor/github.com/go-sql-driver/mysql/.github/CONTRIBUTING.md
generated
vendored
@ -1,23 +0,0 @@
|
|||||||
# Contributing Guidelines
|
|
||||||
|
|
||||||
## Reporting Issues
|
|
||||||
|
|
||||||
Before creating a new Issue, please check first if a similar Issue [already exists](https://github.com/go-sql-driver/mysql/issues?state=open) or was [recently closed](https://github.com/go-sql-driver/mysql/issues?direction=desc&page=1&sort=updated&state=closed).
|
|
||||||
|
|
||||||
## Contributing Code
|
|
||||||
|
|
||||||
By contributing to this project, you share your code under the Mozilla Public License 2, as specified in the LICENSE file.
|
|
||||||
Don't forget to add yourself to the AUTHORS file.
|
|
||||||
|
|
||||||
### Code Review
|
|
||||||
|
|
||||||
Everyone is invited to review and comment on pull requests.
|
|
||||||
If it looks fine to you, comment with "LGTM" (Looks good to me).
|
|
||||||
|
|
||||||
If changes are required, notice the reviewers with "PTAL" (Please take another look) after committing the fixes.
|
|
||||||
|
|
||||||
Before merging the Pull Request, at least one [team member](https://github.com/go-sql-driver?tab=members) must have commented with "LGTM".
|
|
||||||
|
|
||||||
## Development Ideas
|
|
||||||
|
|
||||||
If you are looking for ideas for code contributions, please check our [Development Ideas](https://github.com/go-sql-driver/mysql/wiki/Development-Ideas) Wiki page.
|
|
21
vendor/github.com/go-sql-driver/mysql/.github/ISSUE_TEMPLATE.md
generated
vendored
21
vendor/github.com/go-sql-driver/mysql/.github/ISSUE_TEMPLATE.md
generated
vendored
@ -1,21 +0,0 @@
|
|||||||
### Issue description
|
|
||||||
Tell us what should happen and what happens instead
|
|
||||||
|
|
||||||
### Example code
|
|
||||||
```go
|
|
||||||
If possible, please enter some example code here to reproduce the issue.
|
|
||||||
```
|
|
||||||
|
|
||||||
### Error log
|
|
||||||
```
|
|
||||||
If you have an error log, please paste it here.
|
|
||||||
```
|
|
||||||
|
|
||||||
### Configuration
|
|
||||||
*Driver version (or git SHA):*
|
|
||||||
|
|
||||||
*Go version:* run `go version` in your console
|
|
||||||
|
|
||||||
*Server version:* E.g. MySQL 5.6, MariaDB 10.0.20
|
|
||||||
|
|
||||||
*Server OS:* E.g. Debian 8.1 (Jessie), Windows 10
|
|
9
vendor/github.com/go-sql-driver/mysql/.github/PULL_REQUEST_TEMPLATE.md
generated
vendored
9
vendor/github.com/go-sql-driver/mysql/.github/PULL_REQUEST_TEMPLATE.md
generated
vendored
@ -1,9 +0,0 @@
|
|||||||
### Description
|
|
||||||
Please explain the changes you made here.
|
|
||||||
|
|
||||||
### Checklist
|
|
||||||
- [ ] Code compiles correctly
|
|
||||||
- [ ] Created tests which fail without the change (if possible)
|
|
||||||
- [ ] All tests passing
|
|
||||||
- [ ] Extended the README / documentation, if necessary
|
|
||||||
- [ ] Added myself / the copyright holder to the AUTHORS file
|
|
5
vendor/github.com/go-sql-driver/mysql/.travis/docker.cnf
generated
vendored
5
vendor/github.com/go-sql-driver/mysql/.travis/docker.cnf
generated
vendored
@ -1,5 +0,0 @@
|
|||||||
[client]
|
|
||||||
user = gotest
|
|
||||||
password = secret
|
|
||||||
host = 127.0.0.1
|
|
||||||
port = 3307
|
|
7
vendor/github.com/go-sql-driver/mysql/.travis/gofmt.sh
generated
vendored
7
vendor/github.com/go-sql-driver/mysql/.travis/gofmt.sh
generated
vendored
@ -1,7 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
set -ev
|
|
||||||
|
|
||||||
# Only check for go1.10+ since the gofmt style changed
|
|
||||||
if [[ $(go version) =~ go1\.([0-9]+) ]] && ((${BASH_REMATCH[1]} >= 10)); then
|
|
||||||
test -z "$(gofmt -d -s . | tee /dev/stderr)"
|
|
||||||
fi
|
|
8
vendor/github.com/go-sql-driver/mysql/.travis/wait_mysql.sh
generated
vendored
8
vendor/github.com/go-sql-driver/mysql/.travis/wait_mysql.sh
generated
vendored
@ -1,8 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
while :
|
|
||||||
do
|
|
||||||
if mysql -e 'select version()' 2>&1 | grep 'version()\|ERROR 2059 (HY000):'; then
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
sleep 3
|
|
||||||
done
|
|
1
vendor/github.com/go-sql-driver/mysql/AUTHORS
generated
vendored
1
vendor/github.com/go-sql-driver/mysql/AUTHORS
generated
vendored
@ -13,7 +13,6 @@
|
|||||||
|
|
||||||
Aaron Hopkins <go-sql-driver at die.net>
|
Aaron Hopkins <go-sql-driver at die.net>
|
||||||
Achille Roussel <achille.roussel at gmail.com>
|
Achille Roussel <achille.roussel at gmail.com>
|
||||||
Alex Snast <alexsn at fb.com>
|
|
||||||
Alexey Palazhchenko <alexey.palazhchenko at gmail.com>
|
Alexey Palazhchenko <alexey.palazhchenko at gmail.com>
|
||||||
Andrew Reid <andrew.reid at tixtrack.com>
|
Andrew Reid <andrew.reid at tixtrack.com>
|
||||||
Arne Hormann <arnehormann at gmail.com>
|
Arne Hormann <arnehormann at gmail.com>
|
||||||
|
1330
vendor/github.com/go-sql-driver/mysql/auth_test.go
generated
vendored
1330
vendor/github.com/go-sql-driver/mysql/auth_test.go
generated
vendored
File diff suppressed because it is too large
Load Diff
373
vendor/github.com/go-sql-driver/mysql/benchmark_test.go
generated
vendored
373
vendor/github.com/go-sql-driver/mysql/benchmark_test.go
generated
vendored
@ -1,373 +0,0 @@
|
|||||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
|
||||||
//
|
|
||||||
// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
|
|
||||||
//
|
|
||||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
||||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
||||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
|
||||||
|
|
||||||
package mysql
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"database/sql"
|
|
||||||
"database/sql/driver"
|
|
||||||
"fmt"
|
|
||||||
"math"
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
type TB testing.B
|
|
||||||
|
|
||||||
func (tb *TB) check(err error) {
|
|
||||||
if err != nil {
|
|
||||||
tb.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tb *TB) checkDB(db *sql.DB, err error) *sql.DB {
|
|
||||||
tb.check(err)
|
|
||||||
return db
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tb *TB) checkRows(rows *sql.Rows, err error) *sql.Rows {
|
|
||||||
tb.check(err)
|
|
||||||
return rows
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tb *TB) checkStmt(stmt *sql.Stmt, err error) *sql.Stmt {
|
|
||||||
tb.check(err)
|
|
||||||
return stmt
|
|
||||||
}
|
|
||||||
|
|
||||||
func initDB(b *testing.B, queries ...string) *sql.DB {
|
|
||||||
tb := (*TB)(b)
|
|
||||||
db := tb.checkDB(sql.Open("mysql", dsn))
|
|
||||||
for _, query := range queries {
|
|
||||||
if _, err := db.Exec(query); err != nil {
|
|
||||||
b.Fatalf("error on %q: %v", query, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return db
|
|
||||||
}
|
|
||||||
|
|
||||||
const concurrencyLevel = 10
|
|
||||||
|
|
||||||
func BenchmarkQuery(b *testing.B) {
|
|
||||||
tb := (*TB)(b)
|
|
||||||
b.StopTimer()
|
|
||||||
b.ReportAllocs()
|
|
||||||
db := initDB(b,
|
|
||||||
"DROP TABLE IF EXISTS foo",
|
|
||||||
"CREATE TABLE foo (id INT PRIMARY KEY, val CHAR(50))",
|
|
||||||
`INSERT INTO foo VALUES (1, "one")`,
|
|
||||||
`INSERT INTO foo VALUES (2, "two")`,
|
|
||||||
)
|
|
||||||
db.SetMaxIdleConns(concurrencyLevel)
|
|
||||||
defer db.Close()
|
|
||||||
|
|
||||||
stmt := tb.checkStmt(db.Prepare("SELECT val FROM foo WHERE id=?"))
|
|
||||||
defer stmt.Close()
|
|
||||||
|
|
||||||
remain := int64(b.N)
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
wg.Add(concurrencyLevel)
|
|
||||||
defer wg.Wait()
|
|
||||||
b.StartTimer()
|
|
||||||
|
|
||||||
for i := 0; i < concurrencyLevel; i++ {
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
if atomic.AddInt64(&remain, -1) < 0 {
|
|
||||||
wg.Done()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var got string
|
|
||||||
tb.check(stmt.QueryRow(1).Scan(&got))
|
|
||||||
if got != "one" {
|
|
||||||
b.Errorf("query = %q; want one", got)
|
|
||||||
wg.Done()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkExec(b *testing.B) {
|
|
||||||
tb := (*TB)(b)
|
|
||||||
b.StopTimer()
|
|
||||||
b.ReportAllocs()
|
|
||||||
db := tb.checkDB(sql.Open("mysql", dsn))
|
|
||||||
db.SetMaxIdleConns(concurrencyLevel)
|
|
||||||
defer db.Close()
|
|
||||||
|
|
||||||
stmt := tb.checkStmt(db.Prepare("DO 1"))
|
|
||||||
defer stmt.Close()
|
|
||||||
|
|
||||||
remain := int64(b.N)
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
wg.Add(concurrencyLevel)
|
|
||||||
defer wg.Wait()
|
|
||||||
b.StartTimer()
|
|
||||||
|
|
||||||
for i := 0; i < concurrencyLevel; i++ {
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
if atomic.AddInt64(&remain, -1) < 0 {
|
|
||||||
wg.Done()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := stmt.Exec(); err != nil {
|
|
||||||
b.Fatal(err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// data, but no db writes
|
|
||||||
var roundtripSample []byte
|
|
||||||
|
|
||||||
func initRoundtripBenchmarks() ([]byte, int, int) {
|
|
||||||
if roundtripSample == nil {
|
|
||||||
roundtripSample = []byte(strings.Repeat("0123456789abcdef", 1024*1024))
|
|
||||||
}
|
|
||||||
return roundtripSample, 16, len(roundtripSample)
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkRoundtripTxt(b *testing.B) {
|
|
||||||
b.StopTimer()
|
|
||||||
sample, min, max := initRoundtripBenchmarks()
|
|
||||||
sampleString := string(sample)
|
|
||||||
b.ReportAllocs()
|
|
||||||
tb := (*TB)(b)
|
|
||||||
db := tb.checkDB(sql.Open("mysql", dsn))
|
|
||||||
defer db.Close()
|
|
||||||
b.StartTimer()
|
|
||||||
var result string
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
length := min + i
|
|
||||||
if length > max {
|
|
||||||
length = max
|
|
||||||
}
|
|
||||||
test := sampleString[0:length]
|
|
||||||
rows := tb.checkRows(db.Query(`SELECT "` + test + `"`))
|
|
||||||
if !rows.Next() {
|
|
||||||
rows.Close()
|
|
||||||
b.Fatalf("crashed")
|
|
||||||
}
|
|
||||||
err := rows.Scan(&result)
|
|
||||||
if err != nil {
|
|
||||||
rows.Close()
|
|
||||||
b.Fatalf("crashed")
|
|
||||||
}
|
|
||||||
if result != test {
|
|
||||||
rows.Close()
|
|
||||||
b.Errorf("mismatch")
|
|
||||||
}
|
|
||||||
rows.Close()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkRoundtripBin(b *testing.B) {
|
|
||||||
b.StopTimer()
|
|
||||||
sample, min, max := initRoundtripBenchmarks()
|
|
||||||
b.ReportAllocs()
|
|
||||||
tb := (*TB)(b)
|
|
||||||
db := tb.checkDB(sql.Open("mysql", dsn))
|
|
||||||
defer db.Close()
|
|
||||||
stmt := tb.checkStmt(db.Prepare("SELECT ?"))
|
|
||||||
defer stmt.Close()
|
|
||||||
b.StartTimer()
|
|
||||||
var result sql.RawBytes
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
length := min + i
|
|
||||||
if length > max {
|
|
||||||
length = max
|
|
||||||
}
|
|
||||||
test := sample[0:length]
|
|
||||||
rows := tb.checkRows(stmt.Query(test))
|
|
||||||
if !rows.Next() {
|
|
||||||
rows.Close()
|
|
||||||
b.Fatalf("crashed")
|
|
||||||
}
|
|
||||||
err := rows.Scan(&result)
|
|
||||||
if err != nil {
|
|
||||||
rows.Close()
|
|
||||||
b.Fatalf("crashed")
|
|
||||||
}
|
|
||||||
if !bytes.Equal(result, test) {
|
|
||||||
rows.Close()
|
|
||||||
b.Errorf("mismatch")
|
|
||||||
}
|
|
||||||
rows.Close()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkInterpolation(b *testing.B) {
|
|
||||||
mc := &mysqlConn{
|
|
||||||
cfg: &Config{
|
|
||||||
InterpolateParams: true,
|
|
||||||
Loc: time.UTC,
|
|
||||||
},
|
|
||||||
maxAllowedPacket: maxPacketSize,
|
|
||||||
maxWriteSize: maxPacketSize - 1,
|
|
||||||
buf: newBuffer(nil),
|
|
||||||
}
|
|
||||||
|
|
||||||
args := []driver.Value{
|
|
||||||
int64(42424242),
|
|
||||||
float64(math.Pi),
|
|
||||||
false,
|
|
||||||
time.Unix(1423411542, 807015000),
|
|
||||||
[]byte("bytes containing special chars ' \" \a \x00"),
|
|
||||||
"string containing special chars ' \" \a \x00",
|
|
||||||
}
|
|
||||||
q := "SELECT ?, ?, ?, ?, ?, ?"
|
|
||||||
|
|
||||||
b.ReportAllocs()
|
|
||||||
b.ResetTimer()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
_, err := mc.interpolateParams(q, args)
|
|
||||||
if err != nil {
|
|
||||||
b.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func benchmarkQueryContext(b *testing.B, db *sql.DB, p int) {
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
db.SetMaxIdleConns(p * runtime.GOMAXPROCS(0))
|
|
||||||
|
|
||||||
tb := (*TB)(b)
|
|
||||||
stmt := tb.checkStmt(db.PrepareContext(ctx, "SELECT val FROM foo WHERE id=?"))
|
|
||||||
defer stmt.Close()
|
|
||||||
|
|
||||||
b.SetParallelism(p)
|
|
||||||
b.ReportAllocs()
|
|
||||||
b.ResetTimer()
|
|
||||||
b.RunParallel(func(pb *testing.PB) {
|
|
||||||
var got string
|
|
||||||
for pb.Next() {
|
|
||||||
tb.check(stmt.QueryRow(1).Scan(&got))
|
|
||||||
if got != "one" {
|
|
||||||
b.Fatalf("query = %q; want one", got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkQueryContext(b *testing.B) {
|
|
||||||
db := initDB(b,
|
|
||||||
"DROP TABLE IF EXISTS foo",
|
|
||||||
"CREATE TABLE foo (id INT PRIMARY KEY, val CHAR(50))",
|
|
||||||
`INSERT INTO foo VALUES (1, "one")`,
|
|
||||||
`INSERT INTO foo VALUES (2, "two")`,
|
|
||||||
)
|
|
||||||
defer db.Close()
|
|
||||||
for _, p := range []int{1, 2, 3, 4} {
|
|
||||||
b.Run(fmt.Sprintf("%d", p), func(b *testing.B) {
|
|
||||||
benchmarkQueryContext(b, db, p)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func benchmarkExecContext(b *testing.B, db *sql.DB, p int) {
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
db.SetMaxIdleConns(p * runtime.GOMAXPROCS(0))
|
|
||||||
|
|
||||||
tb := (*TB)(b)
|
|
||||||
stmt := tb.checkStmt(db.PrepareContext(ctx, "DO 1"))
|
|
||||||
defer stmt.Close()
|
|
||||||
|
|
||||||
b.SetParallelism(p)
|
|
||||||
b.ReportAllocs()
|
|
||||||
b.ResetTimer()
|
|
||||||
b.RunParallel(func(pb *testing.PB) {
|
|
||||||
for pb.Next() {
|
|
||||||
if _, err := stmt.ExecContext(ctx); err != nil {
|
|
||||||
b.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkExecContext(b *testing.B) {
|
|
||||||
db := initDB(b,
|
|
||||||
"DROP TABLE IF EXISTS foo",
|
|
||||||
"CREATE TABLE foo (id INT PRIMARY KEY, val CHAR(50))",
|
|
||||||
`INSERT INTO foo VALUES (1, "one")`,
|
|
||||||
`INSERT INTO foo VALUES (2, "two")`,
|
|
||||||
)
|
|
||||||
defer db.Close()
|
|
||||||
for _, p := range []int{1, 2, 3, 4} {
|
|
||||||
b.Run(fmt.Sprintf("%d", p), func(b *testing.B) {
|
|
||||||
benchmarkQueryContext(b, db, p)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// BenchmarkQueryRawBytes benchmarks fetching 100 blobs using sql.RawBytes.
|
|
||||||
// "size=" means size of each blobs.
|
|
||||||
func BenchmarkQueryRawBytes(b *testing.B) {
|
|
||||||
var sizes []int = []int{100, 1000, 2000, 4000, 8000, 12000, 16000, 32000, 64000, 256000}
|
|
||||||
db := initDB(b,
|
|
||||||
"DROP TABLE IF EXISTS bench_rawbytes",
|
|
||||||
"CREATE TABLE bench_rawbytes (id INT PRIMARY KEY, val LONGBLOB)",
|
|
||||||
)
|
|
||||||
defer db.Close()
|
|
||||||
|
|
||||||
blob := make([]byte, sizes[len(sizes)-1])
|
|
||||||
for i := range blob {
|
|
||||||
blob[i] = 42
|
|
||||||
}
|
|
||||||
for i := 0; i < 100; i++ {
|
|
||||||
_, err := db.Exec("INSERT INTO bench_rawbytes VALUES (?, ?)", i, blob)
|
|
||||||
if err != nil {
|
|
||||||
b.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, s := range sizes {
|
|
||||||
b.Run(fmt.Sprintf("size=%v", s), func(b *testing.B) {
|
|
||||||
db.SetMaxIdleConns(0)
|
|
||||||
db.SetMaxIdleConns(1)
|
|
||||||
b.ReportAllocs()
|
|
||||||
b.ResetTimer()
|
|
||||||
|
|
||||||
for j := 0; j < b.N; j++ {
|
|
||||||
rows, err := db.Query("SELECT LEFT(val, ?) as v FROM bench_rawbytes", s)
|
|
||||||
if err != nil {
|
|
||||||
b.Fatal(err)
|
|
||||||
}
|
|
||||||
nrows := 0
|
|
||||||
for rows.Next() {
|
|
||||||
var buf sql.RawBytes
|
|
||||||
err := rows.Scan(&buf)
|
|
||||||
if err != nil {
|
|
||||||
b.Fatal(err)
|
|
||||||
}
|
|
||||||
if len(buf) != s {
|
|
||||||
b.Fatalf("size mismatch: expected %v, got %v", s, len(buf))
|
|
||||||
}
|
|
||||||
nrows++
|
|
||||||
}
|
|
||||||
rows.Close()
|
|
||||||
if nrows != 100 {
|
|
||||||
b.Fatalf("numbers of rows mismatch: expected %v, got %v", 100, nrows)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
38
vendor/github.com/go-sql-driver/mysql/conncheck_test.go
generated
vendored
38
vendor/github.com/go-sql-driver/mysql/conncheck_test.go
generated
vendored
@ -1,38 +0,0 @@
|
|||||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
|
||||||
//
|
|
||||||
// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
|
|
||||||
//
|
|
||||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
||||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
||||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
|
||||||
|
|
||||||
// +build linux darwin dragonfly freebsd netbsd openbsd solaris illumos
|
|
||||||
|
|
||||||
package mysql
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestStaleConnectionChecks(t *testing.T) {
|
|
||||||
runTests(t, dsn, func(dbt *DBTest) {
|
|
||||||
dbt.mustExec("SET @@SESSION.wait_timeout = 2")
|
|
||||||
|
|
||||||
if err := dbt.db.Ping(); err != nil {
|
|
||||||
dbt.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// wait for MySQL to close our connection
|
|
||||||
time.Sleep(3 * time.Second)
|
|
||||||
|
|
||||||
tx, err := dbt.db.Begin()
|
|
||||||
if err != nil {
|
|
||||||
dbt.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := tx.Rollback(); err != nil {
|
|
||||||
dbt.Fatal(err)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
9
vendor/github.com/go-sql-driver/mysql/connection.go
generated
vendored
9
vendor/github.com/go-sql-driver/mysql/connection.go
generated
vendored
@ -12,7 +12,6 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"database/sql"
|
"database/sql"
|
||||||
"database/sql/driver"
|
"database/sql/driver"
|
||||||
"encoding/json"
|
|
||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"strconv"
|
"strconv"
|
||||||
@ -272,14 +271,6 @@ func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (strin
|
|||||||
}
|
}
|
||||||
buf = append(buf, '\'')
|
buf = append(buf, '\'')
|
||||||
}
|
}
|
||||||
case json.RawMessage:
|
|
||||||
buf = append(buf, '\'')
|
|
||||||
if mc.status&statusNoBackslashEscapes == 0 {
|
|
||||||
buf = escapeBytesBackslash(buf, v)
|
|
||||||
} else {
|
|
||||||
buf = escapeBytesQuotes(buf, v)
|
|
||||||
}
|
|
||||||
buf = append(buf, '\'')
|
|
||||||
case []byte:
|
case []byte:
|
||||||
if v == nil {
|
if v == nil {
|
||||||
buf = append(buf, "NULL"...)
|
buf = append(buf, "NULL"...)
|
||||||
|
203
vendor/github.com/go-sql-driver/mysql/connection_test.go
generated
vendored
203
vendor/github.com/go-sql-driver/mysql/connection_test.go
generated
vendored
@ -1,203 +0,0 @@
|
|||||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
|
||||||
//
|
|
||||||
// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved.
|
|
||||||
//
|
|
||||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
||||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
||||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
|
||||||
|
|
||||||
package mysql
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"database/sql/driver"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"net"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestInterpolateParams(t *testing.T) {
|
|
||||||
mc := &mysqlConn{
|
|
||||||
buf: newBuffer(nil),
|
|
||||||
maxAllowedPacket: maxPacketSize,
|
|
||||||
cfg: &Config{
|
|
||||||
InterpolateParams: true,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
q, err := mc.interpolateParams("SELECT ?+?", []driver.Value{int64(42), "gopher"})
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Expected err=nil, got %#v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
expected := `SELECT 42+'gopher'`
|
|
||||||
if q != expected {
|
|
||||||
t.Errorf("Expected: %q\nGot: %q", expected, q)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestInterpolateParamsJSONRawMessage(t *testing.T) {
|
|
||||||
mc := &mysqlConn{
|
|
||||||
buf: newBuffer(nil),
|
|
||||||
maxAllowedPacket: maxPacketSize,
|
|
||||||
cfg: &Config{
|
|
||||||
InterpolateParams: true,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
buf, err := json.Marshal(struct {
|
|
||||||
Value int `json:"value"`
|
|
||||||
}{Value: 42})
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Expected err=nil, got %#v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
q, err := mc.interpolateParams("SELECT ?", []driver.Value{json.RawMessage(buf)})
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Expected err=nil, got %#v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
expected := `SELECT '{\"value\":42}'`
|
|
||||||
if q != expected {
|
|
||||||
t.Errorf("Expected: %q\nGot: %q", expected, q)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestInterpolateParamsTooManyPlaceholders(t *testing.T) {
|
|
||||||
mc := &mysqlConn{
|
|
||||||
buf: newBuffer(nil),
|
|
||||||
maxAllowedPacket: maxPacketSize,
|
|
||||||
cfg: &Config{
|
|
||||||
InterpolateParams: true,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
q, err := mc.interpolateParams("SELECT ?+?", []driver.Value{int64(42)})
|
|
||||||
if err != driver.ErrSkip {
|
|
||||||
t.Errorf("Expected err=driver.ErrSkip, got err=%#v, q=%#v", err, q)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// We don't support placeholder in string literal for now.
|
|
||||||
// https://github.com/go-sql-driver/mysql/pull/490
|
|
||||||
func TestInterpolateParamsPlaceholderInString(t *testing.T) {
|
|
||||||
mc := &mysqlConn{
|
|
||||||
buf: newBuffer(nil),
|
|
||||||
maxAllowedPacket: maxPacketSize,
|
|
||||||
cfg: &Config{
|
|
||||||
InterpolateParams: true,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
q, err := mc.interpolateParams("SELECT 'abc?xyz',?", []driver.Value{int64(42)})
|
|
||||||
// When InterpolateParams support string literal, this should return `"SELECT 'abc?xyz', 42`
|
|
||||||
if err != driver.ErrSkip {
|
|
||||||
t.Errorf("Expected err=driver.ErrSkip, got err=%#v, q=%#v", err, q)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestInterpolateParamsUint64(t *testing.T) {
|
|
||||||
mc := &mysqlConn{
|
|
||||||
buf: newBuffer(nil),
|
|
||||||
maxAllowedPacket: maxPacketSize,
|
|
||||||
cfg: &Config{
|
|
||||||
InterpolateParams: true,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
q, err := mc.interpolateParams("SELECT ?", []driver.Value{uint64(42)})
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Expected err=nil, got err=%#v, q=%#v", err, q)
|
|
||||||
}
|
|
||||||
if q != "SELECT 42" {
|
|
||||||
t.Errorf("Expected uint64 interpolation to work, got q=%#v", q)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCheckNamedValue(t *testing.T) {
|
|
||||||
value := driver.NamedValue{Value: ^uint64(0)}
|
|
||||||
x := &mysqlConn{}
|
|
||||||
err := x.CheckNamedValue(&value)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("uint64 high-bit not convertible", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if value.Value != ^uint64(0) {
|
|
||||||
t.Fatalf("uint64 high-bit converted, got %#v %T", value.Value, value.Value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestCleanCancel tests passed context is cancelled at start.
|
|
||||||
// No packet should be sent. Connection should keep current status.
|
|
||||||
func TestCleanCancel(t *testing.T) {
|
|
||||||
mc := &mysqlConn{
|
|
||||||
closech: make(chan struct{}),
|
|
||||||
}
|
|
||||||
mc.startWatcher()
|
|
||||||
defer mc.cleanup()
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
cancel()
|
|
||||||
|
|
||||||
for i := 0; i < 3; i++ { // Repeat same behavior
|
|
||||||
err := mc.Ping(ctx)
|
|
||||||
if err != context.Canceled {
|
|
||||||
t.Errorf("expected context.Canceled, got %#v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if mc.closed.IsSet() {
|
|
||||||
t.Error("expected mc is not closed, closed actually")
|
|
||||||
}
|
|
||||||
|
|
||||||
if mc.watching {
|
|
||||||
t.Error("expected watching is false, but true")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPingMarkBadConnection(t *testing.T) {
|
|
||||||
nc := badConnection{err: errors.New("boom")}
|
|
||||||
ms := &mysqlConn{
|
|
||||||
netConn: nc,
|
|
||||||
buf: newBuffer(nc),
|
|
||||||
maxAllowedPacket: defaultMaxAllowedPacket,
|
|
||||||
}
|
|
||||||
|
|
||||||
err := ms.Ping(context.Background())
|
|
||||||
|
|
||||||
if err != driver.ErrBadConn {
|
|
||||||
t.Errorf("expected driver.ErrBadConn, got %#v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPingErrInvalidConn(t *testing.T) {
|
|
||||||
nc := badConnection{err: errors.New("failed to write"), n: 10}
|
|
||||||
ms := &mysqlConn{
|
|
||||||
netConn: nc,
|
|
||||||
buf: newBuffer(nc),
|
|
||||||
maxAllowedPacket: defaultMaxAllowedPacket,
|
|
||||||
closech: make(chan struct{}),
|
|
||||||
}
|
|
||||||
|
|
||||||
err := ms.Ping(context.Background())
|
|
||||||
|
|
||||||
if err != ErrInvalidConn {
|
|
||||||
t.Errorf("expected ErrInvalidConn, got %#v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type badConnection struct {
|
|
||||||
n int
|
|
||||||
err error
|
|
||||||
net.Conn
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bc badConnection) Write(b []byte) (n int, err error) {
|
|
||||||
return bc.n, bc.err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bc badConnection) Close() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
30
vendor/github.com/go-sql-driver/mysql/connector_test.go
generated
vendored
30
vendor/github.com/go-sql-driver/mysql/connector_test.go
generated
vendored
@ -1,30 +0,0 @@
|
|||||||
package mysql
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"net"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestConnectorReturnsTimeout(t *testing.T) {
|
|
||||||
connector := &connector{&Config{
|
|
||||||
Net: "tcp",
|
|
||||||
Addr: "1.1.1.1:1234",
|
|
||||||
Timeout: 10 * time.Millisecond,
|
|
||||||
}}
|
|
||||||
|
|
||||||
_, err := connector.Connect(context.Background())
|
|
||||||
if err == nil {
|
|
||||||
t.Fatal("error expected")
|
|
||||||
}
|
|
||||||
|
|
||||||
if nerr, ok := err.(*net.OpError); ok {
|
|
||||||
expected := "dial tcp 1.1.1.1:1234: i/o timeout"
|
|
||||||
if nerr.Error() != expected {
|
|
||||||
t.Fatalf("expected %q, got %q", expected, nerr.Error())
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
t.Fatalf("expected %T, got %T", nerr, err)
|
|
||||||
}
|
|
||||||
}
|
|
3165
vendor/github.com/go-sql-driver/mysql/driver_test.go
generated
vendored
3165
vendor/github.com/go-sql-driver/mysql/driver_test.go
generated
vendored
File diff suppressed because it is too large
Load Diff
415
vendor/github.com/go-sql-driver/mysql/dsn_test.go
generated
vendored
415
vendor/github.com/go-sql-driver/mysql/dsn_test.go
generated
vendored
@ -1,415 +0,0 @@
|
|||||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
|
||||||
//
|
|
||||||
// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved.
|
|
||||||
//
|
|
||||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
||||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
||||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
|
||||||
|
|
||||||
package mysql
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/tls"
|
|
||||||
"fmt"
|
|
||||||
"net/url"
|
|
||||||
"reflect"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
var testDSNs = []struct {
|
|
||||||
in string
|
|
||||||
out *Config
|
|
||||||
}{{
|
|
||||||
"username:password@protocol(address)/dbname?param=value",
|
|
||||||
&Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
|
|
||||||
}, {
|
|
||||||
"username:password@protocol(address)/dbname?param=value&columnsWithAlias=true",
|
|
||||||
&Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true, ColumnsWithAlias: true},
|
|
||||||
}, {
|
|
||||||
"username:password@protocol(address)/dbname?param=value&columnsWithAlias=true&multiStatements=true",
|
|
||||||
&Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true, ColumnsWithAlias: true, MultiStatements: true},
|
|
||||||
}, {
|
|
||||||
"user@unix(/path/to/socket)/dbname?charset=utf8",
|
|
||||||
&Config{User: "user", Net: "unix", Addr: "/path/to/socket", DBName: "dbname", Params: map[string]string{"charset": "utf8"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
|
|
||||||
}, {
|
|
||||||
"user:password@tcp(localhost:5555)/dbname?charset=utf8&tls=true",
|
|
||||||
&Config{User: "user", Passwd: "password", Net: "tcp", Addr: "localhost:5555", DBName: "dbname", Params: map[string]string{"charset": "utf8"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true, TLSConfig: "true"},
|
|
||||||
}, {
|
|
||||||
"user:password@tcp(localhost:5555)/dbname?charset=utf8mb4,utf8&tls=skip-verify",
|
|
||||||
&Config{User: "user", Passwd: "password", Net: "tcp", Addr: "localhost:5555", DBName: "dbname", Params: map[string]string{"charset": "utf8mb4,utf8"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true, TLSConfig: "skip-verify"},
|
|
||||||
}, {
|
|
||||||
"user:password@/dbname?loc=UTC&timeout=30s&readTimeout=1s&writeTimeout=1s&allowAllFiles=1&clientFoundRows=true&allowOldPasswords=TRUE&collation=utf8mb4_unicode_ci&maxAllowedPacket=16777216&tls=false&allowCleartextPasswords=true&parseTime=true&rejectReadOnly=true",
|
|
||||||
&Config{User: "user", Passwd: "password", Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8mb4_unicode_ci", Loc: time.UTC, TLSConfig: "false", AllowCleartextPasswords: true, AllowNativePasswords: true, Timeout: 30 * time.Second, ReadTimeout: time.Second, WriteTimeout: time.Second, AllowAllFiles: true, AllowOldPasswords: true, CheckConnLiveness: true, ClientFoundRows: true, MaxAllowedPacket: 16777216, ParseTime: true, RejectReadOnly: true},
|
|
||||||
}, {
|
|
||||||
"user:password@/dbname?allowNativePasswords=false&checkConnLiveness=false&maxAllowedPacket=0",
|
|
||||||
&Config{User: "user", Passwd: "password", Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: 0, AllowNativePasswords: false, CheckConnLiveness: false},
|
|
||||||
}, {
|
|
||||||
"user:p@ss(word)@tcp([de:ad:be:ef::ca:fe]:80)/dbname?loc=Local",
|
|
||||||
&Config{User: "user", Passwd: "p@ss(word)", Net: "tcp", Addr: "[de:ad:be:ef::ca:fe]:80", DBName: "dbname", Collation: "utf8mb4_general_ci", Loc: time.Local, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
|
|
||||||
}, {
|
|
||||||
"/dbname",
|
|
||||||
&Config{Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
|
|
||||||
}, {
|
|
||||||
"@/",
|
|
||||||
&Config{Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
|
|
||||||
}, {
|
|
||||||
"/",
|
|
||||||
&Config{Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
|
|
||||||
}, {
|
|
||||||
"",
|
|
||||||
&Config{Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
|
|
||||||
}, {
|
|
||||||
"user:p@/ssword@/",
|
|
||||||
&Config{User: "user", Passwd: "p@/ssword", Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
|
|
||||||
}, {
|
|
||||||
"unix/?arg=%2Fsome%2Fpath.ext",
|
|
||||||
&Config{Net: "unix", Addr: "/tmp/mysql.sock", Params: map[string]string{"arg": "/some/path.ext"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
|
|
||||||
}, {
|
|
||||||
"tcp(127.0.0.1)/dbname",
|
|
||||||
&Config{Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
|
|
||||||
}, {
|
|
||||||
"tcp(de:ad:be:ef::ca:fe)/dbname",
|
|
||||||
&Config{Net: "tcp", Addr: "[de:ad:be:ef::ca:fe]:3306", DBName: "dbname", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDSNParser(t *testing.T) {
|
|
||||||
for i, tst := range testDSNs {
|
|
||||||
cfg, err := ParseDSN(tst.in)
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
// pointer not static
|
|
||||||
cfg.tls = nil
|
|
||||||
|
|
||||||
if !reflect.DeepEqual(cfg, tst.out) {
|
|
||||||
t.Errorf("%d. ParseDSN(%q) mismatch:\ngot %+v\nwant %+v", i, tst.in, cfg, tst.out)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDSNParserInvalid(t *testing.T) {
|
|
||||||
var invalidDSNs = []string{
|
|
||||||
"@net(addr/", // no closing brace
|
|
||||||
"@tcp(/", // no closing brace
|
|
||||||
"tcp(/", // no closing brace
|
|
||||||
"(/", // no closing brace
|
|
||||||
"net(addr)//", // unescaped
|
|
||||||
"User:pass@tcp(1.2.3.4:3306)", // no trailing slash
|
|
||||||
"net()/", // unknown default addr
|
|
||||||
//"/dbname?arg=/some/unescaped/path",
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, tst := range invalidDSNs {
|
|
||||||
if _, err := ParseDSN(tst); err == nil {
|
|
||||||
t.Errorf("invalid DSN #%d. (%s) didn't error!", i, tst)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDSNReformat(t *testing.T) {
|
|
||||||
for i, tst := range testDSNs {
|
|
||||||
dsn1 := tst.in
|
|
||||||
cfg1, err := ParseDSN(dsn1)
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err.Error())
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
cfg1.tls = nil // pointer not static
|
|
||||||
res1 := fmt.Sprintf("%+v", cfg1)
|
|
||||||
|
|
||||||
dsn2 := cfg1.FormatDSN()
|
|
||||||
cfg2, err := ParseDSN(dsn2)
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err.Error())
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
cfg2.tls = nil // pointer not static
|
|
||||||
res2 := fmt.Sprintf("%+v", cfg2)
|
|
||||||
|
|
||||||
if res1 != res2 {
|
|
||||||
t.Errorf("%d. %q does not match %q", i, res2, res1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDSNServerPubKey(t *testing.T) {
|
|
||||||
baseDSN := "User:password@tcp(localhost:5555)/dbname?serverPubKey="
|
|
||||||
|
|
||||||
RegisterServerPubKey("testKey", testPubKeyRSA)
|
|
||||||
defer DeregisterServerPubKey("testKey")
|
|
||||||
|
|
||||||
tst := baseDSN + "testKey"
|
|
||||||
cfg, err := ParseDSN(tst)
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.ServerPubKey != "testKey" {
|
|
||||||
t.Errorf("unexpected cfg.ServerPubKey value: %v", cfg.ServerPubKey)
|
|
||||||
}
|
|
||||||
if cfg.pubKey != testPubKeyRSA {
|
|
||||||
t.Error("pub key pointer doesn't match")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Key is missing
|
|
||||||
tst = baseDSN + "invalid_name"
|
|
||||||
cfg, err = ParseDSN(tst)
|
|
||||||
if err == nil {
|
|
||||||
t.Errorf("invalid name in DSN (%s) but did not error. Got config: %#v", tst, cfg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDSNServerPubKeyQueryEscape(t *testing.T) {
|
|
||||||
const name = "&%!:"
|
|
||||||
dsn := "User:password@tcp(localhost:5555)/dbname?serverPubKey=" + url.QueryEscape(name)
|
|
||||||
|
|
||||||
RegisterServerPubKey(name, testPubKeyRSA)
|
|
||||||
defer DeregisterServerPubKey(name)
|
|
||||||
|
|
||||||
cfg, err := ParseDSN(dsn)
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.pubKey != testPubKeyRSA {
|
|
||||||
t.Error("pub key pointer doesn't match")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDSNWithCustomTLS(t *testing.T) {
|
|
||||||
baseDSN := "User:password@tcp(localhost:5555)/dbname?tls="
|
|
||||||
tlsCfg := tls.Config{}
|
|
||||||
|
|
||||||
RegisterTLSConfig("utils_test", &tlsCfg)
|
|
||||||
defer DeregisterTLSConfig("utils_test")
|
|
||||||
|
|
||||||
// Custom TLS is missing
|
|
||||||
tst := baseDSN + "invalid_tls"
|
|
||||||
cfg, err := ParseDSN(tst)
|
|
||||||
if err == nil {
|
|
||||||
t.Errorf("invalid custom TLS in DSN (%s) but did not error. Got config: %#v", tst, cfg)
|
|
||||||
}
|
|
||||||
|
|
||||||
tst = baseDSN + "utils_test"
|
|
||||||
|
|
||||||
// Custom TLS with a server name
|
|
||||||
name := "foohost"
|
|
||||||
tlsCfg.ServerName = name
|
|
||||||
cfg, err = ParseDSN(tst)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err.Error())
|
|
||||||
} else if cfg.tls.ServerName != name {
|
|
||||||
t.Errorf("did not get the correct TLS ServerName (%s) parsing DSN (%s).", name, tst)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Custom TLS without a server name
|
|
||||||
name = "localhost"
|
|
||||||
tlsCfg.ServerName = ""
|
|
||||||
cfg, err = ParseDSN(tst)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err.Error())
|
|
||||||
} else if cfg.tls.ServerName != name {
|
|
||||||
t.Errorf("did not get the correct ServerName (%s) parsing DSN (%s).", name, tst)
|
|
||||||
} else if tlsCfg.ServerName != "" {
|
|
||||||
t.Errorf("tlsCfg was mutated ServerName (%s) should be empty parsing DSN (%s).", name, tst)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDSNTLSConfig(t *testing.T) {
|
|
||||||
expectedServerName := "example.com"
|
|
||||||
dsn := "tcp(example.com:1234)/?tls=true"
|
|
||||||
|
|
||||||
cfg, err := ParseDSN(dsn)
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err.Error())
|
|
||||||
}
|
|
||||||
if cfg.tls == nil {
|
|
||||||
t.Error("cfg.tls should not be nil")
|
|
||||||
}
|
|
||||||
if cfg.tls.ServerName != expectedServerName {
|
|
||||||
t.Errorf("cfg.tls.ServerName should be %q, got %q (host with port)", expectedServerName, cfg.tls.ServerName)
|
|
||||||
}
|
|
||||||
|
|
||||||
dsn = "tcp(example.com)/?tls=true"
|
|
||||||
cfg, err = ParseDSN(dsn)
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err.Error())
|
|
||||||
}
|
|
||||||
if cfg.tls == nil {
|
|
||||||
t.Error("cfg.tls should not be nil")
|
|
||||||
}
|
|
||||||
if cfg.tls.ServerName != expectedServerName {
|
|
||||||
t.Errorf("cfg.tls.ServerName should be %q, got %q (host without port)", expectedServerName, cfg.tls.ServerName)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDSNWithCustomTLSQueryEscape(t *testing.T) {
|
|
||||||
const configKey = "&%!:"
|
|
||||||
dsn := "User:password@tcp(localhost:5555)/dbname?tls=" + url.QueryEscape(configKey)
|
|
||||||
name := "foohost"
|
|
||||||
tlsCfg := tls.Config{ServerName: name}
|
|
||||||
|
|
||||||
RegisterTLSConfig(configKey, &tlsCfg)
|
|
||||||
defer DeregisterTLSConfig(configKey)
|
|
||||||
|
|
||||||
cfg, err := ParseDSN(dsn)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err.Error())
|
|
||||||
} else if cfg.tls.ServerName != name {
|
|
||||||
t.Errorf("did not get the correct TLS ServerName (%s) parsing DSN (%s).", name, dsn)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDSNUnsafeCollation(t *testing.T) {
|
|
||||||
_, err := ParseDSN("/dbname?collation=gbk_chinese_ci&interpolateParams=true")
|
|
||||||
if err != errInvalidDSNUnsafeCollation {
|
|
||||||
t.Errorf("expected %v, got %v", errInvalidDSNUnsafeCollation, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = ParseDSN("/dbname?collation=gbk_chinese_ci&interpolateParams=false")
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("expected %v, got %v", nil, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = ParseDSN("/dbname?collation=gbk_chinese_ci")
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("expected %v, got %v", nil, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = ParseDSN("/dbname?collation=ascii_bin&interpolateParams=true")
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("expected %v, got %v", nil, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = ParseDSN("/dbname?collation=latin1_german1_ci&interpolateParams=true")
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("expected %v, got %v", nil, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = ParseDSN("/dbname?collation=utf8_general_ci&interpolateParams=true")
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("expected %v, got %v", nil, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = ParseDSN("/dbname?collation=utf8mb4_general_ci&interpolateParams=true")
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("expected %v, got %v", nil, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestParamsAreSorted(t *testing.T) {
|
|
||||||
expected := "/dbname?interpolateParams=true&foobar=baz&quux=loo"
|
|
||||||
cfg := NewConfig()
|
|
||||||
cfg.DBName = "dbname"
|
|
||||||
cfg.InterpolateParams = true
|
|
||||||
cfg.Params = map[string]string{
|
|
||||||
"quux": "loo",
|
|
||||||
"foobar": "baz",
|
|
||||||
}
|
|
||||||
actual := cfg.FormatDSN()
|
|
||||||
if actual != expected {
|
|
||||||
t.Errorf("generic Config.Params were not sorted: want %#v, got %#v", expected, actual)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCloneConfig(t *testing.T) {
|
|
||||||
RegisterServerPubKey("testKey", testPubKeyRSA)
|
|
||||||
defer DeregisterServerPubKey("testKey")
|
|
||||||
|
|
||||||
expectedServerName := "example.com"
|
|
||||||
dsn := "tcp(example.com:1234)/?tls=true&foobar=baz&serverPubKey=testKey"
|
|
||||||
cfg, err := ParseDSN(dsn)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
cfg2 := cfg.Clone()
|
|
||||||
if cfg == cfg2 {
|
|
||||||
t.Errorf("Config.Clone did not create a separate config struct")
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg2.tls.ServerName != expectedServerName {
|
|
||||||
t.Errorf("cfg.tls.ServerName should be %q, got %q (host with port)", expectedServerName, cfg.tls.ServerName)
|
|
||||||
}
|
|
||||||
|
|
||||||
cfg2.tls.ServerName = "example2.com"
|
|
||||||
if cfg.tls.ServerName == cfg2.tls.ServerName {
|
|
||||||
t.Errorf("changed cfg.tls.Server name should not propagate to original Config")
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, ok := cfg2.Params["foobar"]; !ok {
|
|
||||||
t.Errorf("cloned Config is missing custom params")
|
|
||||||
}
|
|
||||||
|
|
||||||
delete(cfg2.Params, "foobar")
|
|
||||||
|
|
||||||
if _, ok := cfg.Params["foobar"]; !ok {
|
|
||||||
t.Errorf("custom params in cloned Config should not propagate to original Config")
|
|
||||||
}
|
|
||||||
|
|
||||||
if !reflect.DeepEqual(cfg.pubKey, cfg2.pubKey) {
|
|
||||||
t.Errorf("public key in Config should be identical")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNormalizeTLSConfig(t *testing.T) {
|
|
||||||
tt := []struct {
|
|
||||||
tlsConfig string
|
|
||||||
want *tls.Config
|
|
||||||
}{
|
|
||||||
{"", nil},
|
|
||||||
{"false", nil},
|
|
||||||
{"true", &tls.Config{ServerName: "myserver"}},
|
|
||||||
{"skip-verify", &tls.Config{InsecureSkipVerify: true}},
|
|
||||||
{"preferred", &tls.Config{InsecureSkipVerify: true}},
|
|
||||||
{"test_tls_config", &tls.Config{ServerName: "myServerName"}},
|
|
||||||
}
|
|
||||||
|
|
||||||
RegisterTLSConfig("test_tls_config", &tls.Config{ServerName: "myServerName"})
|
|
||||||
defer func() { DeregisterTLSConfig("test_tls_config") }()
|
|
||||||
|
|
||||||
for _, tc := range tt {
|
|
||||||
t.Run(tc.tlsConfig, func(t *testing.T) {
|
|
||||||
cfg := &Config{
|
|
||||||
Addr: "myserver:3306",
|
|
||||||
TLSConfig: tc.tlsConfig,
|
|
||||||
}
|
|
||||||
|
|
||||||
cfg.normalize()
|
|
||||||
|
|
||||||
if cfg.tls == nil {
|
|
||||||
if tc.want != nil {
|
|
||||||
t.Fatal("wanted a tls config but got nil instead")
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.tls.ServerName != tc.want.ServerName {
|
|
||||||
t.Errorf("tls.ServerName doesn't match (want: '%s', got: '%s')",
|
|
||||||
tc.want.ServerName, cfg.tls.ServerName)
|
|
||||||
}
|
|
||||||
if cfg.tls.InsecureSkipVerify != tc.want.InsecureSkipVerify {
|
|
||||||
t.Errorf("tls.InsecureSkipVerify doesn't match (want: %T, got :%T)",
|
|
||||||
tc.want.InsecureSkipVerify, cfg.tls.InsecureSkipVerify)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkParseDSN(b *testing.B) {
|
|
||||||
b.ReportAllocs()
|
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
for _, tst := range testDSNs {
|
|
||||||
if _, err := ParseDSN(tst.in); err != nil {
|
|
||||||
b.Error(err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
42
vendor/github.com/go-sql-driver/mysql/errors_test.go
generated
vendored
42
vendor/github.com/go-sql-driver/mysql/errors_test.go
generated
vendored
@ -1,42 +0,0 @@
|
|||||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
|
||||||
//
|
|
||||||
// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
|
|
||||||
//
|
|
||||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
||||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
||||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
|
||||||
|
|
||||||
package mysql
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"log"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestErrorsSetLogger(t *testing.T) {
|
|
||||||
previous := errLog
|
|
||||||
defer func() {
|
|
||||||
errLog = previous
|
|
||||||
}()
|
|
||||||
|
|
||||||
// set up logger
|
|
||||||
const expected = "prefix: test\n"
|
|
||||||
buffer := bytes.NewBuffer(make([]byte, 0, 64))
|
|
||||||
logger := log.New(buffer, "prefix: ", 0)
|
|
||||||
|
|
||||||
// print
|
|
||||||
SetLogger(logger)
|
|
||||||
errLog.Print("test")
|
|
||||||
|
|
||||||
// check result
|
|
||||||
if actual := buffer.String(); actual != expected {
|
|
||||||
t.Errorf("expected %q, got %q", expected, actual)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestErrorsStrictIgnoreNotes(t *testing.T) {
|
|
||||||
runTests(t, dsn+"&sql_notes=false", func(dbt *DBTest) {
|
|
||||||
dbt.mustExec("DROP TABLE IF EXISTS does_not_exist")
|
|
||||||
})
|
|
||||||
}
|
|
62
vendor/github.com/go-sql-driver/mysql/nulltime_test.go
generated
vendored
62
vendor/github.com/go-sql-driver/mysql/nulltime_test.go
generated
vendored
@ -1,62 +0,0 @@
|
|||||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
|
||||||
//
|
|
||||||
// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
|
|
||||||
//
|
|
||||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
||||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
||||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
|
||||||
|
|
||||||
package mysql
|
|
||||||
|
|
||||||
import (
|
|
||||||
"database/sql"
|
|
||||||
"database/sql/driver"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// Check implementation of interfaces
|
|
||||||
_ driver.Valuer = NullTime{}
|
|
||||||
_ sql.Scanner = (*NullTime)(nil)
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestScanNullTime(t *testing.T) {
|
|
||||||
var scanTests = []struct {
|
|
||||||
in interface{}
|
|
||||||
error bool
|
|
||||||
valid bool
|
|
||||||
time time.Time
|
|
||||||
}{
|
|
||||||
{tDate, false, true, tDate},
|
|
||||||
{sDate, false, true, tDate},
|
|
||||||
{[]byte(sDate), false, true, tDate},
|
|
||||||
{tDateTime, false, true, tDateTime},
|
|
||||||
{sDateTime, false, true, tDateTime},
|
|
||||||
{[]byte(sDateTime), false, true, tDateTime},
|
|
||||||
{tDate0, false, true, tDate0},
|
|
||||||
{sDate0, false, true, tDate0},
|
|
||||||
{[]byte(sDate0), false, true, tDate0},
|
|
||||||
{sDateTime0, false, true, tDate0},
|
|
||||||
{[]byte(sDateTime0), false, true, tDate0},
|
|
||||||
{"", true, false, tDate0},
|
|
||||||
{"1234", true, false, tDate0},
|
|
||||||
{0, true, false, tDate0},
|
|
||||||
}
|
|
||||||
|
|
||||||
var nt = NullTime{}
|
|
||||||
var err error
|
|
||||||
|
|
||||||
for _, tst := range scanTests {
|
|
||||||
err = nt.Scan(tst.in)
|
|
||||||
if (err != nil) != tst.error {
|
|
||||||
t.Errorf("%v: expected error status %t, got %t", tst.in, tst.error, (err != nil))
|
|
||||||
}
|
|
||||||
if nt.Valid != tst.valid {
|
|
||||||
t.Errorf("%v: expected valid status %t, got %t", tst.in, tst.valid, nt.Valid)
|
|
||||||
}
|
|
||||||
if nt.Time != tst.time {
|
|
||||||
t.Errorf("%v: expected time %v, got %v", tst.in, tst.time, nt.Time)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
336
vendor/github.com/go-sql-driver/mysql/packets_test.go
generated
vendored
336
vendor/github.com/go-sql-driver/mysql/packets_test.go
generated
vendored
@ -1,336 +0,0 @@
|
|||||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
|
||||||
//
|
|
||||||
// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved.
|
|
||||||
//
|
|
||||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
||||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
||||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
|
||||||
|
|
||||||
package mysql
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"errors"
|
|
||||||
"net"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
errConnClosed = errors.New("connection is closed")
|
|
||||||
errConnTooManyReads = errors.New("too many reads")
|
|
||||||
errConnTooManyWrites = errors.New("too many writes")
|
|
||||||
)
|
|
||||||
|
|
||||||
// struct to mock a net.Conn for testing purposes
|
|
||||||
type mockConn struct {
|
|
||||||
laddr net.Addr
|
|
||||||
raddr net.Addr
|
|
||||||
data []byte
|
|
||||||
written []byte
|
|
||||||
queuedReplies [][]byte
|
|
||||||
closed bool
|
|
||||||
read int
|
|
||||||
reads int
|
|
||||||
writes int
|
|
||||||
maxReads int
|
|
||||||
maxWrites int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *mockConn) Read(b []byte) (n int, err error) {
|
|
||||||
if m.closed {
|
|
||||||
return 0, errConnClosed
|
|
||||||
}
|
|
||||||
|
|
||||||
m.reads++
|
|
||||||
if m.maxReads > 0 && m.reads > m.maxReads {
|
|
||||||
return 0, errConnTooManyReads
|
|
||||||
}
|
|
||||||
|
|
||||||
n = copy(b, m.data)
|
|
||||||
m.read += n
|
|
||||||
m.data = m.data[n:]
|
|
||||||
return
|
|
||||||
}
|
|
||||||
func (m *mockConn) Write(b []byte) (n int, err error) {
|
|
||||||
if m.closed {
|
|
||||||
return 0, errConnClosed
|
|
||||||
}
|
|
||||||
|
|
||||||
m.writes++
|
|
||||||
if m.maxWrites > 0 && m.writes > m.maxWrites {
|
|
||||||
return 0, errConnTooManyWrites
|
|
||||||
}
|
|
||||||
|
|
||||||
n = len(b)
|
|
||||||
m.written = append(m.written, b...)
|
|
||||||
|
|
||||||
if n > 0 && len(m.queuedReplies) > 0 {
|
|
||||||
m.data = m.queuedReplies[0]
|
|
||||||
m.queuedReplies = m.queuedReplies[1:]
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
func (m *mockConn) Close() error {
|
|
||||||
m.closed = true
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
func (m *mockConn) LocalAddr() net.Addr {
|
|
||||||
return m.laddr
|
|
||||||
}
|
|
||||||
func (m *mockConn) RemoteAddr() net.Addr {
|
|
||||||
return m.raddr
|
|
||||||
}
|
|
||||||
func (m *mockConn) SetDeadline(t time.Time) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
func (m *mockConn) SetReadDeadline(t time.Time) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
func (m *mockConn) SetWriteDeadline(t time.Time) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// make sure mockConn implements the net.Conn interface
|
|
||||||
var _ net.Conn = new(mockConn)
|
|
||||||
|
|
||||||
func newRWMockConn(sequence uint8) (*mockConn, *mysqlConn) {
|
|
||||||
conn := new(mockConn)
|
|
||||||
mc := &mysqlConn{
|
|
||||||
buf: newBuffer(conn),
|
|
||||||
cfg: NewConfig(),
|
|
||||||
netConn: conn,
|
|
||||||
closech: make(chan struct{}),
|
|
||||||
maxAllowedPacket: defaultMaxAllowedPacket,
|
|
||||||
sequence: sequence,
|
|
||||||
}
|
|
||||||
return conn, mc
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestReadPacketSingleByte(t *testing.T) {
|
|
||||||
conn := new(mockConn)
|
|
||||||
mc := &mysqlConn{
|
|
||||||
buf: newBuffer(conn),
|
|
||||||
}
|
|
||||||
|
|
||||||
conn.data = []byte{0x01, 0x00, 0x00, 0x00, 0xff}
|
|
||||||
conn.maxReads = 1
|
|
||||||
packet, err := mc.readPacket()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if len(packet) != 1 {
|
|
||||||
t.Fatalf("unexpected packet length: expected %d, got %d", 1, len(packet))
|
|
||||||
}
|
|
||||||
if packet[0] != 0xff {
|
|
||||||
t.Fatalf("unexpected packet content: expected %x, got %x", 0xff, packet[0])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestReadPacketWrongSequenceID(t *testing.T) {
|
|
||||||
conn := new(mockConn)
|
|
||||||
mc := &mysqlConn{
|
|
||||||
buf: newBuffer(conn),
|
|
||||||
}
|
|
||||||
|
|
||||||
// too low sequence id
|
|
||||||
conn.data = []byte{0x01, 0x00, 0x00, 0x00, 0xff}
|
|
||||||
conn.maxReads = 1
|
|
||||||
mc.sequence = 1
|
|
||||||
_, err := mc.readPacket()
|
|
||||||
if err != ErrPktSync {
|
|
||||||
t.Errorf("expected ErrPktSync, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// reset
|
|
||||||
conn.reads = 0
|
|
||||||
mc.sequence = 0
|
|
||||||
mc.buf = newBuffer(conn)
|
|
||||||
|
|
||||||
// too high sequence id
|
|
||||||
conn.data = []byte{0x01, 0x00, 0x00, 0x42, 0xff}
|
|
||||||
_, err = mc.readPacket()
|
|
||||||
if err != ErrPktSyncMul {
|
|
||||||
t.Errorf("expected ErrPktSyncMul, got %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestReadPacketSplit(t *testing.T) {
|
|
||||||
conn := new(mockConn)
|
|
||||||
mc := &mysqlConn{
|
|
||||||
buf: newBuffer(conn),
|
|
||||||
}
|
|
||||||
|
|
||||||
data := make([]byte, maxPacketSize*2+4*3)
|
|
||||||
const pkt2ofs = maxPacketSize + 4
|
|
||||||
const pkt3ofs = 2 * (maxPacketSize + 4)
|
|
||||||
|
|
||||||
// case 1: payload has length maxPacketSize
|
|
||||||
data = data[:pkt2ofs+4]
|
|
||||||
|
|
||||||
// 1st packet has maxPacketSize length and sequence id 0
|
|
||||||
// ff ff ff 00 ...
|
|
||||||
data[0] = 0xff
|
|
||||||
data[1] = 0xff
|
|
||||||
data[2] = 0xff
|
|
||||||
|
|
||||||
// mark the payload start and end of 1st packet so that we can check if the
|
|
||||||
// content was correctly appended
|
|
||||||
data[4] = 0x11
|
|
||||||
data[maxPacketSize+3] = 0x22
|
|
||||||
|
|
||||||
// 2nd packet has payload length 0 and squence id 1
|
|
||||||
// 00 00 00 01
|
|
||||||
data[pkt2ofs+3] = 0x01
|
|
||||||
|
|
||||||
conn.data = data
|
|
||||||
conn.maxReads = 3
|
|
||||||
packet, err := mc.readPacket()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if len(packet) != maxPacketSize {
|
|
||||||
t.Fatalf("unexpected packet length: expected %d, got %d", maxPacketSize, len(packet))
|
|
||||||
}
|
|
||||||
if packet[0] != 0x11 {
|
|
||||||
t.Fatalf("unexpected payload start: expected %x, got %x", 0x11, packet[0])
|
|
||||||
}
|
|
||||||
if packet[maxPacketSize-1] != 0x22 {
|
|
||||||
t.Fatalf("unexpected payload end: expected %x, got %x", 0x22, packet[maxPacketSize-1])
|
|
||||||
}
|
|
||||||
|
|
||||||
// case 2: payload has length which is a multiple of maxPacketSize
|
|
||||||
data = data[:cap(data)]
|
|
||||||
|
|
||||||
// 2nd packet now has maxPacketSize length
|
|
||||||
data[pkt2ofs] = 0xff
|
|
||||||
data[pkt2ofs+1] = 0xff
|
|
||||||
data[pkt2ofs+2] = 0xff
|
|
||||||
|
|
||||||
// mark the payload start and end of the 2nd packet
|
|
||||||
data[pkt2ofs+4] = 0x33
|
|
||||||
data[pkt2ofs+maxPacketSize+3] = 0x44
|
|
||||||
|
|
||||||
// 3rd packet has payload length 0 and squence id 2
|
|
||||||
// 00 00 00 02
|
|
||||||
data[pkt3ofs+3] = 0x02
|
|
||||||
|
|
||||||
conn.data = data
|
|
||||||
conn.reads = 0
|
|
||||||
conn.maxReads = 5
|
|
||||||
mc.sequence = 0
|
|
||||||
packet, err = mc.readPacket()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if len(packet) != 2*maxPacketSize {
|
|
||||||
t.Fatalf("unexpected packet length: expected %d, got %d", 2*maxPacketSize, len(packet))
|
|
||||||
}
|
|
||||||
if packet[0] != 0x11 {
|
|
||||||
t.Fatalf("unexpected payload start: expected %x, got %x", 0x11, packet[0])
|
|
||||||
}
|
|
||||||
if packet[2*maxPacketSize-1] != 0x44 {
|
|
||||||
t.Fatalf("unexpected payload end: expected %x, got %x", 0x44, packet[2*maxPacketSize-1])
|
|
||||||
}
|
|
||||||
|
|
||||||
// case 3: payload has a length larger maxPacketSize, which is not an exact
|
|
||||||
// multiple of it
|
|
||||||
data = data[:pkt2ofs+4+42]
|
|
||||||
data[pkt2ofs] = 0x2a
|
|
||||||
data[pkt2ofs+1] = 0x00
|
|
||||||
data[pkt2ofs+2] = 0x00
|
|
||||||
data[pkt2ofs+4+41] = 0x44
|
|
||||||
|
|
||||||
conn.data = data
|
|
||||||
conn.reads = 0
|
|
||||||
conn.maxReads = 4
|
|
||||||
mc.sequence = 0
|
|
||||||
packet, err = mc.readPacket()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if len(packet) != maxPacketSize+42 {
|
|
||||||
t.Fatalf("unexpected packet length: expected %d, got %d", maxPacketSize+42, len(packet))
|
|
||||||
}
|
|
||||||
if packet[0] != 0x11 {
|
|
||||||
t.Fatalf("unexpected payload start: expected %x, got %x", 0x11, packet[0])
|
|
||||||
}
|
|
||||||
if packet[maxPacketSize+41] != 0x44 {
|
|
||||||
t.Fatalf("unexpected payload end: expected %x, got %x", 0x44, packet[maxPacketSize+41])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestReadPacketFail(t *testing.T) {
|
|
||||||
conn := new(mockConn)
|
|
||||||
mc := &mysqlConn{
|
|
||||||
buf: newBuffer(conn),
|
|
||||||
closech: make(chan struct{}),
|
|
||||||
}
|
|
||||||
|
|
||||||
// illegal empty (stand-alone) packet
|
|
||||||
conn.data = []byte{0x00, 0x00, 0x00, 0x00}
|
|
||||||
conn.maxReads = 1
|
|
||||||
_, err := mc.readPacket()
|
|
||||||
if err != ErrInvalidConn {
|
|
||||||
t.Errorf("expected ErrInvalidConn, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// reset
|
|
||||||
conn.reads = 0
|
|
||||||
mc.sequence = 0
|
|
||||||
mc.buf = newBuffer(conn)
|
|
||||||
|
|
||||||
// fail to read header
|
|
||||||
conn.closed = true
|
|
||||||
_, err = mc.readPacket()
|
|
||||||
if err != ErrInvalidConn {
|
|
||||||
t.Errorf("expected ErrInvalidConn, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// reset
|
|
||||||
conn.closed = false
|
|
||||||
conn.reads = 0
|
|
||||||
mc.sequence = 0
|
|
||||||
mc.buf = newBuffer(conn)
|
|
||||||
|
|
||||||
// fail to read body
|
|
||||||
conn.maxReads = 1
|
|
||||||
_, err = mc.readPacket()
|
|
||||||
if err != ErrInvalidConn {
|
|
||||||
t.Errorf("expected ErrInvalidConn, got %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// https://github.com/go-sql-driver/mysql/pull/801
|
|
||||||
// not-NUL terminated plugin_name in init packet
|
|
||||||
func TestRegression801(t *testing.T) {
|
|
||||||
conn := new(mockConn)
|
|
||||||
mc := &mysqlConn{
|
|
||||||
buf: newBuffer(conn),
|
|
||||||
cfg: new(Config),
|
|
||||||
sequence: 42,
|
|
||||||
closech: make(chan struct{}),
|
|
||||||
}
|
|
||||||
|
|
||||||
conn.data = []byte{72, 0, 0, 42, 10, 53, 46, 53, 46, 56, 0, 165, 0, 0, 0,
|
|
||||||
60, 70, 63, 58, 68, 104, 34, 97, 0, 223, 247, 33, 2, 0, 15, 128, 21, 0,
|
|
||||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 98, 120, 114, 47, 85, 75, 109, 99, 51, 77,
|
|
||||||
50, 64, 0, 109, 121, 115, 113, 108, 95, 110, 97, 116, 105, 118, 101, 95,
|
|
||||||
112, 97, 115, 115, 119, 111, 114, 100}
|
|
||||||
conn.maxReads = 1
|
|
||||||
|
|
||||||
authData, pluginName, err := mc.readHandshakePacket()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("got error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if pluginName != "mysql_native_password" {
|
|
||||||
t.Errorf("expected plugin name 'mysql_native_password', got '%s'", pluginName)
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedAuthData := []byte{60, 70, 63, 58, 68, 104, 34, 97, 98, 120, 114,
|
|
||||||
47, 85, 75, 109, 99, 51, 77, 50, 64}
|
|
||||||
if !bytes.Equal(authData, expectedAuthData) {
|
|
||||||
t.Errorf("expected authData '%v', got '%v'", expectedAuthData, authData)
|
|
||||||
}
|
|
||||||
}
|
|
126
vendor/github.com/go-sql-driver/mysql/statement_test.go
generated
vendored
126
vendor/github.com/go-sql-driver/mysql/statement_test.go
generated
vendored
@ -1,126 +0,0 @@
|
|||||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
|
||||||
//
|
|
||||||
// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved.
|
|
||||||
//
|
|
||||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
||||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
||||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
|
||||||
|
|
||||||
package mysql
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestConvertDerivedString(t *testing.T) {
|
|
||||||
type derived string
|
|
||||||
|
|
||||||
output, err := converter{}.ConvertValue(derived("value"))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Derived string type not convertible", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if output != "value" {
|
|
||||||
t.Fatalf("Derived string type not converted, got %#v %T", output, output)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestConvertDerivedByteSlice(t *testing.T) {
|
|
||||||
type derived []uint8
|
|
||||||
|
|
||||||
output, err := converter{}.ConvertValue(derived("value"))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Byte slice not convertible", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if bytes.Compare(output.([]byte), []byte("value")) != 0 {
|
|
||||||
t.Fatalf("Byte slice not converted, got %#v %T", output, output)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestConvertDerivedUnsupportedSlice(t *testing.T) {
|
|
||||||
type derived []int
|
|
||||||
|
|
||||||
_, err := converter{}.ConvertValue(derived{1})
|
|
||||||
if err == nil || err.Error() != "unsupported type mysql.derived, a slice of int" {
|
|
||||||
t.Fatal("Unexpected error", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestConvertDerivedBool(t *testing.T) {
|
|
||||||
type derived bool
|
|
||||||
|
|
||||||
output, err := converter{}.ConvertValue(derived(true))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Derived bool type not convertible", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if output != true {
|
|
||||||
t.Fatalf("Derived bool type not converted, got %#v %T", output, output)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestConvertPointer(t *testing.T) {
|
|
||||||
str := "value"
|
|
||||||
|
|
||||||
output, err := converter{}.ConvertValue(&str)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Pointer type not convertible", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if output != "value" {
|
|
||||||
t.Fatalf("Pointer type not converted, got %#v %T", output, output)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestConvertSignedIntegers(t *testing.T) {
|
|
||||||
values := []interface{}{
|
|
||||||
int8(-42),
|
|
||||||
int16(-42),
|
|
||||||
int32(-42),
|
|
||||||
int64(-42),
|
|
||||||
int(-42),
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, value := range values {
|
|
||||||
output, err := converter{}.ConvertValue(value)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("%T type not convertible %s", value, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if output != int64(-42) {
|
|
||||||
t.Fatalf("%T type not converted, got %#v %T", value, output, output)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestConvertUnsignedIntegers(t *testing.T) {
|
|
||||||
values := []interface{}{
|
|
||||||
uint8(42),
|
|
||||||
uint16(42),
|
|
||||||
uint32(42),
|
|
||||||
uint64(42),
|
|
||||||
uint(42),
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, value := range values {
|
|
||||||
output, err := converter{}.ConvertValue(value)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("%T type not convertible %s", value, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if output != uint64(42) {
|
|
||||||
t.Fatalf("%T type not converted, got %#v %T", value, output, output)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
output, err := converter{}.ConvertValue(^uint64(0))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("uint64 high-bit not convertible", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if output != ^uint64(0) {
|
|
||||||
t.Fatalf("uint64 high-bit converted, got %#v %T", output, output)
|
|
||||||
}
|
|
||||||
}
|
|
293
vendor/github.com/go-sql-driver/mysql/utils_test.go
generated
vendored
293
vendor/github.com/go-sql-driver/mysql/utils_test.go
generated
vendored
@ -1,293 +0,0 @@
|
|||||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
|
||||||
//
|
|
||||||
// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
|
|
||||||
//
|
|
||||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
||||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
||||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
|
||||||
|
|
||||||
package mysql
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"database/sql"
|
|
||||||
"database/sql/driver"
|
|
||||||
"encoding/binary"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestLengthEncodedInteger(t *testing.T) {
|
|
||||||
var integerTests = []struct {
|
|
||||||
num uint64
|
|
||||||
encoded []byte
|
|
||||||
}{
|
|
||||||
{0x0000000000000000, []byte{0x00}},
|
|
||||||
{0x0000000000000012, []byte{0x12}},
|
|
||||||
{0x00000000000000fa, []byte{0xfa}},
|
|
||||||
{0x0000000000000100, []byte{0xfc, 0x00, 0x01}},
|
|
||||||
{0x0000000000001234, []byte{0xfc, 0x34, 0x12}},
|
|
||||||
{0x000000000000ffff, []byte{0xfc, 0xff, 0xff}},
|
|
||||||
{0x0000000000010000, []byte{0xfd, 0x00, 0x00, 0x01}},
|
|
||||||
{0x0000000000123456, []byte{0xfd, 0x56, 0x34, 0x12}},
|
|
||||||
{0x0000000000ffffff, []byte{0xfd, 0xff, 0xff, 0xff}},
|
|
||||||
{0x0000000001000000, []byte{0xfe, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00}},
|
|
||||||
{0x123456789abcdef0, []byte{0xfe, 0xf0, 0xde, 0xbc, 0x9a, 0x78, 0x56, 0x34, 0x12}},
|
|
||||||
{0xffffffffffffffff, []byte{0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tst := range integerTests {
|
|
||||||
num, isNull, numLen := readLengthEncodedInteger(tst.encoded)
|
|
||||||
if isNull {
|
|
||||||
t.Errorf("%x: expected %d, got NULL", tst.encoded, tst.num)
|
|
||||||
}
|
|
||||||
if num != tst.num {
|
|
||||||
t.Errorf("%x: expected %d, got %d", tst.encoded, tst.num, num)
|
|
||||||
}
|
|
||||||
if numLen != len(tst.encoded) {
|
|
||||||
t.Errorf("%x: expected size %d, got %d", tst.encoded, len(tst.encoded), numLen)
|
|
||||||
}
|
|
||||||
encoded := appendLengthEncodedInteger(nil, num)
|
|
||||||
if !bytes.Equal(encoded, tst.encoded) {
|
|
||||||
t.Errorf("%v: expected %x, got %x", num, tst.encoded, encoded)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFormatBinaryDateTime(t *testing.T) {
|
|
||||||
rawDate := [11]byte{}
|
|
||||||
binary.LittleEndian.PutUint16(rawDate[:2], 1978) // years
|
|
||||||
rawDate[2] = 12 // months
|
|
||||||
rawDate[3] = 30 // days
|
|
||||||
rawDate[4] = 15 // hours
|
|
||||||
rawDate[5] = 46 // minutes
|
|
||||||
rawDate[6] = 23 // seconds
|
|
||||||
binary.LittleEndian.PutUint32(rawDate[7:], 987654) // microseconds
|
|
||||||
expect := func(expected string, inlen, outlen uint8) {
|
|
||||||
actual, _ := formatBinaryDateTime(rawDate[:inlen], outlen)
|
|
||||||
bytes, ok := actual.([]byte)
|
|
||||||
if !ok {
|
|
||||||
t.Errorf("formatBinaryDateTime must return []byte, was %T", actual)
|
|
||||||
}
|
|
||||||
if string(bytes) != expected {
|
|
||||||
t.Errorf(
|
|
||||||
"expected %q, got %q for length in %d, out %d",
|
|
||||||
expected, actual, inlen, outlen,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
expect("0000-00-00", 0, 10)
|
|
||||||
expect("0000-00-00 00:00:00", 0, 19)
|
|
||||||
expect("1978-12-30", 4, 10)
|
|
||||||
expect("1978-12-30 15:46:23", 7, 19)
|
|
||||||
expect("1978-12-30 15:46:23.987654", 11, 26)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFormatBinaryTime(t *testing.T) {
|
|
||||||
expect := func(expected string, src []byte, outlen uint8) {
|
|
||||||
actual, _ := formatBinaryTime(src, outlen)
|
|
||||||
bytes, ok := actual.([]byte)
|
|
||||||
if !ok {
|
|
||||||
t.Errorf("formatBinaryDateTime must return []byte, was %T", actual)
|
|
||||||
}
|
|
||||||
if string(bytes) != expected {
|
|
||||||
t.Errorf(
|
|
||||||
"expected %q, got %q for src=%q and outlen=%d",
|
|
||||||
expected, actual, src, outlen)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// binary format:
|
|
||||||
// sign (0: positive, 1: negative), days(4), hours, minutes, seconds, micro(4)
|
|
||||||
|
|
||||||
// Zeros
|
|
||||||
expect("00:00:00", []byte{}, 8)
|
|
||||||
expect("00:00:00.0", []byte{}, 10)
|
|
||||||
expect("00:00:00.000000", []byte{}, 15)
|
|
||||||
|
|
||||||
// Without micro(4)
|
|
||||||
expect("12:34:56", []byte{0, 0, 0, 0, 0, 12, 34, 56}, 8)
|
|
||||||
expect("-12:34:56", []byte{1, 0, 0, 0, 0, 12, 34, 56}, 8)
|
|
||||||
expect("12:34:56.00", []byte{0, 0, 0, 0, 0, 12, 34, 56}, 11)
|
|
||||||
expect("24:34:56", []byte{0, 1, 0, 0, 0, 0, 34, 56}, 8)
|
|
||||||
expect("-99:34:56", []byte{1, 4, 0, 0, 0, 3, 34, 56}, 8)
|
|
||||||
expect("103079215103:34:56", []byte{0, 255, 255, 255, 255, 23, 34, 56}, 8)
|
|
||||||
|
|
||||||
// With micro(4)
|
|
||||||
expect("12:34:56.00", []byte{0, 0, 0, 0, 0, 12, 34, 56, 99, 0, 0, 0}, 11)
|
|
||||||
expect("12:34:56.000099", []byte{0, 0, 0, 0, 0, 12, 34, 56, 99, 0, 0, 0}, 15)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestEscapeBackslash(t *testing.T) {
|
|
||||||
expect := func(expected, value string) {
|
|
||||||
actual := string(escapeBytesBackslash([]byte{}, []byte(value)))
|
|
||||||
if actual != expected {
|
|
||||||
t.Errorf(
|
|
||||||
"expected %s, got %s",
|
|
||||||
expected, actual,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
actual = string(escapeStringBackslash([]byte{}, value))
|
|
||||||
if actual != expected {
|
|
||||||
t.Errorf(
|
|
||||||
"expected %s, got %s",
|
|
||||||
expected, actual,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
expect("foo\\0bar", "foo\x00bar")
|
|
||||||
expect("foo\\nbar", "foo\nbar")
|
|
||||||
expect("foo\\rbar", "foo\rbar")
|
|
||||||
expect("foo\\Zbar", "foo\x1abar")
|
|
||||||
expect("foo\\\"bar", "foo\"bar")
|
|
||||||
expect("foo\\\\bar", "foo\\bar")
|
|
||||||
expect("foo\\'bar", "foo'bar")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestEscapeQuotes(t *testing.T) {
|
|
||||||
expect := func(expected, value string) {
|
|
||||||
actual := string(escapeBytesQuotes([]byte{}, []byte(value)))
|
|
||||||
if actual != expected {
|
|
||||||
t.Errorf(
|
|
||||||
"expected %s, got %s",
|
|
||||||
expected, actual,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
actual = string(escapeStringQuotes([]byte{}, value))
|
|
||||||
if actual != expected {
|
|
||||||
t.Errorf(
|
|
||||||
"expected %s, got %s",
|
|
||||||
expected, actual,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
expect("foo\x00bar", "foo\x00bar") // not affected
|
|
||||||
expect("foo\nbar", "foo\nbar") // not affected
|
|
||||||
expect("foo\rbar", "foo\rbar") // not affected
|
|
||||||
expect("foo\x1abar", "foo\x1abar") // not affected
|
|
||||||
expect("foo''bar", "foo'bar") // affected
|
|
||||||
expect("foo\"bar", "foo\"bar") // not affected
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAtomicBool(t *testing.T) {
|
|
||||||
var ab atomicBool
|
|
||||||
if ab.IsSet() {
|
|
||||||
t.Fatal("Expected value to be false")
|
|
||||||
}
|
|
||||||
|
|
||||||
ab.Set(true)
|
|
||||||
if ab.value != 1 {
|
|
||||||
t.Fatal("Set(true) did not set value to 1")
|
|
||||||
}
|
|
||||||
if !ab.IsSet() {
|
|
||||||
t.Fatal("Expected value to be true")
|
|
||||||
}
|
|
||||||
|
|
||||||
ab.Set(true)
|
|
||||||
if !ab.IsSet() {
|
|
||||||
t.Fatal("Expected value to be true")
|
|
||||||
}
|
|
||||||
|
|
||||||
ab.Set(false)
|
|
||||||
if ab.value != 0 {
|
|
||||||
t.Fatal("Set(false) did not set value to 0")
|
|
||||||
}
|
|
||||||
if ab.IsSet() {
|
|
||||||
t.Fatal("Expected value to be false")
|
|
||||||
}
|
|
||||||
|
|
||||||
ab.Set(false)
|
|
||||||
if ab.IsSet() {
|
|
||||||
t.Fatal("Expected value to be false")
|
|
||||||
}
|
|
||||||
if ab.TrySet(false) {
|
|
||||||
t.Fatal("Expected TrySet(false) to fail")
|
|
||||||
}
|
|
||||||
if !ab.TrySet(true) {
|
|
||||||
t.Fatal("Expected TrySet(true) to succeed")
|
|
||||||
}
|
|
||||||
if !ab.IsSet() {
|
|
||||||
t.Fatal("Expected value to be true")
|
|
||||||
}
|
|
||||||
|
|
||||||
ab.Set(true)
|
|
||||||
if !ab.IsSet() {
|
|
||||||
t.Fatal("Expected value to be true")
|
|
||||||
}
|
|
||||||
if ab.TrySet(true) {
|
|
||||||
t.Fatal("Expected TrySet(true) to fail")
|
|
||||||
}
|
|
||||||
if !ab.TrySet(false) {
|
|
||||||
t.Fatal("Expected TrySet(false) to succeed")
|
|
||||||
}
|
|
||||||
if ab.IsSet() {
|
|
||||||
t.Fatal("Expected value to be false")
|
|
||||||
}
|
|
||||||
|
|
||||||
ab._noCopy.Lock() // we've "tested" it ¯\_(ツ)_/¯
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAtomicError(t *testing.T) {
|
|
||||||
var ae atomicError
|
|
||||||
if ae.Value() != nil {
|
|
||||||
t.Fatal("Expected value to be nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
ae.Set(ErrMalformPkt)
|
|
||||||
if v := ae.Value(); v != ErrMalformPkt {
|
|
||||||
if v == nil {
|
|
||||||
t.Fatal("Value is still nil")
|
|
||||||
}
|
|
||||||
t.Fatal("Error did not match")
|
|
||||||
}
|
|
||||||
ae.Set(ErrPktSync)
|
|
||||||
if ae.Value() == ErrMalformPkt {
|
|
||||||
t.Fatal("Error still matches old error")
|
|
||||||
}
|
|
||||||
if v := ae.Value(); v != ErrPktSync {
|
|
||||||
t.Fatal("Error did not match")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestIsolationLevelMapping(t *testing.T) {
|
|
||||||
data := []struct {
|
|
||||||
level driver.IsolationLevel
|
|
||||||
expected string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
level: driver.IsolationLevel(sql.LevelReadCommitted),
|
|
||||||
expected: "READ COMMITTED",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
level: driver.IsolationLevel(sql.LevelRepeatableRead),
|
|
||||||
expected: "REPEATABLE READ",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
level: driver.IsolationLevel(sql.LevelReadUncommitted),
|
|
||||||
expected: "READ UNCOMMITTED",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
level: driver.IsolationLevel(sql.LevelSerializable),
|
|
||||||
expected: "SERIALIZABLE",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, td := range data {
|
|
||||||
if actual, err := mapIsolationLevel(td.level); actual != td.expected || err != nil {
|
|
||||||
t.Fatal(i, td.expected, actual, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// check unsupported mapping
|
|
||||||
expectedErr := "mysql: unsupported isolation level: 7"
|
|
||||||
actual, err := mapIsolationLevel(driver.IsolationLevel(sql.LevelLinearizable))
|
|
||||||
if actual != "" || err == nil {
|
|
||||||
t.Fatal("Expected error on unsupported isolation level")
|
|
||||||
}
|
|
||||||
if err.Error() != expectedErr {
|
|
||||||
t.Fatalf("Expected error to be %q, got %q", expectedErr, err)
|
|
||||||
}
|
|
||||||
}
|
|
23
vendor/github.com/jmoiron/sqlx/LICENSE
generated
vendored
23
vendor/github.com/jmoiron/sqlx/LICENSE
generated
vendored
@ -1,23 +0,0 @@
|
|||||||
Copyright (c) 2013, Jason Moiron
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person
|
|
||||||
obtaining a copy of this software and associated documentation
|
|
||||||
files (the "Software"), to deal in the Software without
|
|
||||||
restriction, including without limitation the rights to use,
|
|
||||||
copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the
|
|
||||||
Software is furnished to do so, subject to the following
|
|
||||||
conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be
|
|
||||||
included in all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
|
||||||
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
|
|
||||||
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
|
||||||
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
||||||
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
||||||
OTHER DEALINGS IN THE SOFTWARE.
|
|
||||||
|
|
258
vendor/github.com/jmoiron/sqlx/README.md
generated
vendored
258
vendor/github.com/jmoiron/sqlx/README.md
generated
vendored
@ -1,258 +0,0 @@
|
|||||||
#sqlx
|
|
||||||
|
|
||||||
[![Build Status](https://drone.io/github.com/jmoiron/sqlx/status.png)](https://drone.io/github.com/jmoiron/sqlx/latest) [![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/jmoiron/sqlx) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/jmoiron/sqlx/master/LICENSE)
|
|
||||||
|
|
||||||
sqlx is a library which provides a set of extensions on go's standard
|
|
||||||
`database/sql` library. The sqlx versions of `sql.DB`, `sql.TX`, `sql.Stmt`,
|
|
||||||
et al. all leave the underlying interfaces untouched, so that their interfaces
|
|
||||||
are a superset on the standard ones. This makes it relatively painless to
|
|
||||||
integrate existing codebases using database/sql with sqlx.
|
|
||||||
|
|
||||||
Major additional concepts are:
|
|
||||||
|
|
||||||
* Marshal rows into structs (with embedded struct support), maps, and slices
|
|
||||||
* Named parameter support including prepared statements
|
|
||||||
* `Get` and `Select` to go quickly from query to struct/slice
|
|
||||||
* `LoadFile` for executing statements from a file
|
|
||||||
|
|
||||||
There is now some [fairly comprehensive documentation](http://jmoiron.github.io/sqlx/) for sqlx.
|
|
||||||
You can also read the usage below for a quick sample on how sqlx works, or check out the [API
|
|
||||||
documentation on godoc](http://godoc.org/github.com/jmoiron/sqlx).
|
|
||||||
|
|
||||||
## Recent Changes
|
|
||||||
|
|
||||||
The ability to use basic types as Select and Get destinations was added. This
|
|
||||||
is only valid when there is one column in the result set, and both functions
|
|
||||||
return an error if this isn't the case. This allows for much simpler patterns
|
|
||||||
of access for single column results:
|
|
||||||
|
|
||||||
```go
|
|
||||||
var count int
|
|
||||||
err := db.Get(&count, "SELECT count(*) FROM person;")
|
|
||||||
|
|
||||||
var names []string
|
|
||||||
err := db.Select(&names, "SELECT name FROM person;")
|
|
||||||
```
|
|
||||||
|
|
||||||
See the note on Scannability at the bottom of this README for some more info.
|
|
||||||
|
|
||||||
### Backwards Compatibility
|
|
||||||
|
|
||||||
There is no Go1-like promise of absolute stability, but I take the issue
|
|
||||||
seriously and will maintain the library in a compatible state unless vital
|
|
||||||
bugs prevent me from doing so. Since [#59](https://github.com/jmoiron/sqlx/issues/59) and [#60](https://github.com/jmoiron/sqlx/issues/60) necessitated
|
|
||||||
breaking behavior, a wider API cleanup was done at the time of fixing.
|
|
||||||
|
|
||||||
## install
|
|
||||||
|
|
||||||
go get github.com/jmoiron/sqlx
|
|
||||||
|
|
||||||
## issues
|
|
||||||
|
|
||||||
Row headers can be ambiguous (`SELECT 1 AS a, 2 AS a`), and the result of
|
|
||||||
`Columns()` can have duplicate names on queries like:
|
|
||||||
|
|
||||||
```sql
|
|
||||||
SELECT a.id, a.name, b.id, b.name FROM foos AS a JOIN foos AS b ON a.parent = b.id;
|
|
||||||
```
|
|
||||||
|
|
||||||
making a struct or map destination ambiguous. Use `AS` in your queries
|
|
||||||
to give rows distinct names, `rows.Scan` to scan them manually, or
|
|
||||||
`SliceScan` to get a slice of results.
|
|
||||||
|
|
||||||
## usage
|
|
||||||
|
|
||||||
Below is an example which shows some common use cases for sqlx. Check
|
|
||||||
[sqlx_test.go](https://github.com/jmoiron/sqlx/blob/master/sqlx_test.go) for more
|
|
||||||
usage.
|
|
||||||
|
|
||||||
|
|
||||||
```go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
_ "github.com/lib/pq"
|
|
||||||
"database/sql"
|
|
||||||
"github.com/jmoiron/sqlx"
|
|
||||||
"log"
|
|
||||||
)
|
|
||||||
|
|
||||||
var schema = `
|
|
||||||
CREATE TABLE person (
|
|
||||||
first_name text,
|
|
||||||
last_name text,
|
|
||||||
email text
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE TABLE place (
|
|
||||||
country text,
|
|
||||||
city text NULL,
|
|
||||||
telcode integer
|
|
||||||
)`
|
|
||||||
|
|
||||||
type Person struct {
|
|
||||||
FirstName string `db:"first_name"`
|
|
||||||
LastName string `db:"last_name"`
|
|
||||||
Email string
|
|
||||||
}
|
|
||||||
|
|
||||||
type Place struct {
|
|
||||||
Country string
|
|
||||||
City sql.NullString
|
|
||||||
TelCode int
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
// this connects & tries a simple 'SELECT 1', panics on error
|
|
||||||
// use sqlx.Open() for sql.Open() semantics
|
|
||||||
db, err := sqlx.Connect("postgres", "user=foo dbname=bar sslmode=disable")
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalln(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// exec the schema or fail; multi-statement Exec behavior varies between
|
|
||||||
// database drivers; pq will exec them all, sqlite3 won't, ymmv
|
|
||||||
db.MustExec(schema)
|
|
||||||
|
|
||||||
tx := db.MustBegin()
|
|
||||||
tx.MustExec("INSERT INTO person (first_name, last_name, email) VALUES ($1, $2, $3)", "Jason", "Moiron", "jmoiron@jmoiron.net")
|
|
||||||
tx.MustExec("INSERT INTO person (first_name, last_name, email) VALUES ($1, $2, $3)", "John", "Doe", "johndoeDNE@gmail.net")
|
|
||||||
tx.MustExec("INSERT INTO place (country, city, telcode) VALUES ($1, $2, $3)", "United States", "New York", "1")
|
|
||||||
tx.MustExec("INSERT INTO place (country, telcode) VALUES ($1, $2)", "Hong Kong", "852")
|
|
||||||
tx.MustExec("INSERT INTO place (country, telcode) VALUES ($1, $2)", "Singapore", "65")
|
|
||||||
// Named queries can use structs, so if you have an existing struct (i.e. person := &Person{}) that you have populated, you can pass it in as &person
|
|
||||||
tx.NamedExec("INSERT INTO person (first_name, last_name, email) VALUES (:first_name, :last_name, :email)", &Person{"Jane", "Citizen", "jane.citzen@example.com"})
|
|
||||||
tx.Commit()
|
|
||||||
|
|
||||||
// Query the database, storing results in a []Person (wrapped in []interface{})
|
|
||||||
people := []Person{}
|
|
||||||
db.Select(&people, "SELECT * FROM person ORDER BY first_name ASC")
|
|
||||||
jason, john := people[0], people[1]
|
|
||||||
|
|
||||||
fmt.Printf("%#v\n%#v", jason, john)
|
|
||||||
// Person{FirstName:"Jason", LastName:"Moiron", Email:"jmoiron@jmoiron.net"}
|
|
||||||
// Person{FirstName:"John", LastName:"Doe", Email:"johndoeDNE@gmail.net"}
|
|
||||||
|
|
||||||
// You can also get a single result, a la QueryRow
|
|
||||||
jason = Person{}
|
|
||||||
err = db.Get(&jason, "SELECT * FROM person WHERE first_name=$1", "Jason")
|
|
||||||
fmt.Printf("%#v\n", jason)
|
|
||||||
// Person{FirstName:"Jason", LastName:"Moiron", Email:"jmoiron@jmoiron.net"}
|
|
||||||
|
|
||||||
// if you have null fields and use SELECT *, you must use sql.Null* in your struct
|
|
||||||
places := []Place{}
|
|
||||||
err = db.Select(&places, "SELECT * FROM place ORDER BY telcode ASC")
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
usa, singsing, honkers := places[0], places[1], places[2]
|
|
||||||
|
|
||||||
fmt.Printf("%#v\n%#v\n%#v\n", usa, singsing, honkers)
|
|
||||||
// Place{Country:"United States", City:sql.NullString{String:"New York", Valid:true}, TelCode:1}
|
|
||||||
// Place{Country:"Singapore", City:sql.NullString{String:"", Valid:false}, TelCode:65}
|
|
||||||
// Place{Country:"Hong Kong", City:sql.NullString{String:"", Valid:false}, TelCode:852}
|
|
||||||
|
|
||||||
// Loop through rows using only one struct
|
|
||||||
place := Place{}
|
|
||||||
rows, err := db.Queryx("SELECT * FROM place")
|
|
||||||
for rows.Next() {
|
|
||||||
err := rows.StructScan(&place)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalln(err)
|
|
||||||
}
|
|
||||||
fmt.Printf("%#v\n", place)
|
|
||||||
}
|
|
||||||
// Place{Country:"United States", City:sql.NullString{String:"New York", Valid:true}, TelCode:1}
|
|
||||||
// Place{Country:"Hong Kong", City:sql.NullString{String:"", Valid:false}, TelCode:852}
|
|
||||||
// Place{Country:"Singapore", City:sql.NullString{String:"", Valid:false}, TelCode:65}
|
|
||||||
|
|
||||||
// Named queries, using `:name` as the bindvar. Automatic bindvar support
|
|
||||||
// which takes into account the dbtype based on the driverName on sqlx.Open/Connect
|
|
||||||
_, err = db.NamedExec(`INSERT INTO person (first_name,last_name,email) VALUES (:first,:last,:email)`,
|
|
||||||
map[string]interface{}{
|
|
||||||
"first": "Bin",
|
|
||||||
"last": "Smuth",
|
|
||||||
"email": "bensmith@allblacks.nz",
|
|
||||||
})
|
|
||||||
|
|
||||||
// Selects Mr. Smith from the database
|
|
||||||
rows, err = db.NamedQuery(`SELECT * FROM person WHERE first_name=:fn`, map[string]interface{}{"fn": "Bin"})
|
|
||||||
|
|
||||||
// Named queries can also use structs. Their bind names follow the same rules
|
|
||||||
// as the name -> db mapping, so struct fields are lowercased and the `db` tag
|
|
||||||
// is taken into consideration.
|
|
||||||
rows, err = db.NamedQuery(`SELECT * FROM person WHERE first_name=:first_name`, jason)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Scannability
|
|
||||||
|
|
||||||
Get and Select are able to take base types, so the following is now possible:
|
|
||||||
|
|
||||||
```go
|
|
||||||
var name string
|
|
||||||
db.Get(&name, "SELECT first_name FROM person WHERE id=$1", 10)
|
|
||||||
|
|
||||||
var ids []int64
|
|
||||||
db.Select(&ids, "SELECT id FROM person LIMIT 20;")
|
|
||||||
```
|
|
||||||
|
|
||||||
This can get complicated with destination types which are structs, like `sql.NullString`. Because of this, straightforward rules for *scannability* had to be developed. Iff something is "Scannable", then it is used directly in `rows.Scan`; if it's not, then the standard sqlx struct rules apply.
|
|
||||||
|
|
||||||
Something is scannable if any of the following are true:
|
|
||||||
|
|
||||||
* It is not a struct, ie. `reflect.ValueOf(v).Kind() != reflect.Struct`
|
|
||||||
* It implements the `sql.Scanner` interface
|
|
||||||
* It has no exported fields (eg. `time.Time`)
|
|
||||||
|
|
||||||
## embedded structs
|
|
||||||
|
|
||||||
Scan targets obey Go attribute rules directly, including nested embedded structs. Older versions of sqlx would attempt to also descend into non-embedded structs, but this is no longer supported.
|
|
||||||
|
|
||||||
Go makes *accessing* '[ambiguous selectors](http://play.golang.org/p/MGRxdjLaUc)' a compile time error, defining structs with ambiguous selectors is legal. Sqlx will decide which field to use on a struct based on a breadth first search of the struct and any structs it embeds, as specified by the order of the fields as accessible by `reflect`, which generally means in source-order. This means that sqlx chooses the outer-most, top-most matching name for targets, even when the selector might technically be ambiguous.
|
|
||||||
|
|
||||||
## scan safety
|
|
||||||
|
|
||||||
By default, scanning into structs requires the structs to have fields for all of the
|
|
||||||
columns in the query. This was done for a few reasons:
|
|
||||||
|
|
||||||
* A mistake in naming during development could lead you to believe that data is
|
|
||||||
being written to a field when actually it can't be found and it is being dropped
|
|
||||||
* This behavior mirrors the behavior of the Go compiler with respect to unused
|
|
||||||
variables
|
|
||||||
* Selecting more data than you need is wasteful (more data on the wire, more time
|
|
||||||
marshalling, etc)
|
|
||||||
|
|
||||||
Unlike Marshallers in the stdlib, the programmer scanning an sql result into a struct
|
|
||||||
will generally have a full understanding of what the underlying data model is *and*
|
|
||||||
full control over the SQL statement.
|
|
||||||
|
|
||||||
Despite this, there are use cases where it's convenient to be able to ignore unknown
|
|
||||||
columns. In most of these cases, you might be better off with `ScanSlice`, but where
|
|
||||||
you want to still use structs, there is now the `Unsafe` method. Its usage is most
|
|
||||||
simply shown in an example:
|
|
||||||
|
|
||||||
```go
|
|
||||||
db, err := sqlx.Connect("postgres", "user=foo dbname=bar sslmode=disable")
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
type Person {
|
|
||||||
Name string
|
|
||||||
}
|
|
||||||
var p Person
|
|
||||||
|
|
||||||
// This fails, because there is no destination for location in Person
|
|
||||||
err = db.Get(&p, "SELECT name, location FROM person LIMIT 1")
|
|
||||||
|
|
||||||
udb := db.Unsafe()
|
|
||||||
|
|
||||||
// This succeeds and just sets `Name` in the p struct
|
|
||||||
err = udb.Get(&p, "SELECT name, location FROM person LIMIT 1")
|
|
||||||
```
|
|
||||||
|
|
||||||
The `Unsafe` method is implemented on `Tx`, `DB`, and `Stmt`. When you use an unsafe
|
|
||||||
`Tx` or `DB` to create a new `Tx` or `Stmt`, those inherit its lack of safety.
|
|
||||||
|
|
84
vendor/github.com/jmoiron/sqlx/bind.go
generated
vendored
84
vendor/github.com/jmoiron/sqlx/bind.go
generated
vendored
@ -1,84 +0,0 @@
|
|||||||
package sqlx
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"strconv"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Bindvar types supported by Rebind, BindMap and BindStruct.
|
|
||||||
const (
|
|
||||||
UNKNOWN = iota
|
|
||||||
QUESTION
|
|
||||||
DOLLAR
|
|
||||||
NAMED
|
|
||||||
)
|
|
||||||
|
|
||||||
// BindType returns the bindtype for a given database given a drivername.
|
|
||||||
func BindType(driverName string) int {
|
|
||||||
switch driverName {
|
|
||||||
case "postgres", "pgx":
|
|
||||||
return DOLLAR
|
|
||||||
case "mysql":
|
|
||||||
return QUESTION
|
|
||||||
case "sqlite3":
|
|
||||||
return QUESTION
|
|
||||||
case "oci8":
|
|
||||||
return NAMED
|
|
||||||
}
|
|
||||||
return UNKNOWN
|
|
||||||
}
|
|
||||||
|
|
||||||
// FIXME: this should be able to be tolerant of escaped ?'s in queries without
|
|
||||||
// losing much speed, and should be to avoid confusion.
|
|
||||||
|
|
||||||
// FIXME: this is now produces the wrong results for oracle's NAMED bindtype
|
|
||||||
|
|
||||||
// Rebind a query from the default bindtype (QUESTION) to the target bindtype.
|
|
||||||
func Rebind(bindType int, query string) string {
|
|
||||||
if bindType != DOLLAR {
|
|
||||||
return query
|
|
||||||
}
|
|
||||||
|
|
||||||
qb := []byte(query)
|
|
||||||
// Add space enough for 10 params before we have to allocate
|
|
||||||
rqb := make([]byte, 0, len(qb)+10)
|
|
||||||
j := 1
|
|
||||||
for _, b := range qb {
|
|
||||||
if b == '?' {
|
|
||||||
rqb = append(rqb, '$')
|
|
||||||
for _, b := range strconv.Itoa(j) {
|
|
||||||
rqb = append(rqb, byte(b))
|
|
||||||
}
|
|
||||||
j++
|
|
||||||
} else {
|
|
||||||
rqb = append(rqb, b)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return string(rqb)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Experimental implementation of Rebind which uses a bytes.Buffer. The code is
|
|
||||||
// much simpler and should be more resistant to odd unicode, but it is twice as
|
|
||||||
// slow. Kept here for benchmarking purposes and to possibly replace Rebind if
|
|
||||||
// problems arise with its somewhat naive handling of unicode.
|
|
||||||
|
|
||||||
func rebindBuff(bindType int, query string) string {
|
|
||||||
if bindType != DOLLAR {
|
|
||||||
return query
|
|
||||||
}
|
|
||||||
|
|
||||||
b := make([]byte, 0, len(query))
|
|
||||||
rqb := bytes.NewBuffer(b)
|
|
||||||
j := 1
|
|
||||||
for _, r := range query {
|
|
||||||
if r == '?' {
|
|
||||||
rqb.WriteRune('$')
|
|
||||||
rqb.WriteString(strconv.Itoa(j))
|
|
||||||
j++
|
|
||||||
} else {
|
|
||||||
rqb.WriteRune(r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return rqb.String()
|
|
||||||
}
|
|
12
vendor/github.com/jmoiron/sqlx/doc.go
generated
vendored
12
vendor/github.com/jmoiron/sqlx/doc.go
generated
vendored
@ -1,12 +0,0 @@
|
|||||||
// Package sqlx provides general purpose extensions to database/sql.
|
|
||||||
//
|
|
||||||
// It is intended to seamlessly wrap database/sql and provide convenience
|
|
||||||
// methods which are useful in the development of database driven applications.
|
|
||||||
// None of the underlying database/sql methods are changed. Instead all extended
|
|
||||||
// behavior is implemented through new methods defined on wrapper types.
|
|
||||||
//
|
|
||||||
// Additions include scanning into structs, named query support, rebinding
|
|
||||||
// queries for different drivers, convenient shorthands for common error handling
|
|
||||||
// and more.
|
|
||||||
//
|
|
||||||
package sqlx
|
|
321
vendor/github.com/jmoiron/sqlx/named.go
generated
vendored
321
vendor/github.com/jmoiron/sqlx/named.go
generated
vendored
@ -1,321 +0,0 @@
|
|||||||
package sqlx
|
|
||||||
|
|
||||||
// Named Query Support
|
|
||||||
//
|
|
||||||
// * BindMap - bind query bindvars to map/struct args
|
|
||||||
// * NamedExec, NamedQuery - named query w/ struct or map
|
|
||||||
// * NamedStmt - a pre-compiled named query which is a prepared statement
|
|
||||||
//
|
|
||||||
// Internal Interfaces:
|
|
||||||
//
|
|
||||||
// * compileNamedQuery - rebind a named query, returning a query and list of names
|
|
||||||
// * bindArgs, bindMapArgs, bindAnyArgs - given a list of names, return an arglist
|
|
||||||
//
|
|
||||||
import (
|
|
||||||
"database/sql"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"strconv"
|
|
||||||
"unicode"
|
|
||||||
|
|
||||||
"github.com/jmoiron/sqlx/reflectx"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NamedStmt is a prepared statement that executes named queries. Prepare it
|
|
||||||
// how you would execute a NamedQuery, but pass in a struct or map when executing.
|
|
||||||
type NamedStmt struct {
|
|
||||||
Params []string
|
|
||||||
QueryString string
|
|
||||||
Stmt *Stmt
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the named statement.
|
|
||||||
func (n *NamedStmt) Close() error {
|
|
||||||
return n.Stmt.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exec executes a named statement using the struct passed.
|
|
||||||
func (n *NamedStmt) Exec(arg interface{}) (sql.Result, error) {
|
|
||||||
args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
|
|
||||||
if err != nil {
|
|
||||||
return *new(sql.Result), err
|
|
||||||
}
|
|
||||||
return n.Stmt.Exec(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Query executes a named statement using the struct argument, returning rows.
|
|
||||||
func (n *NamedStmt) Query(arg interface{}) (*sql.Rows, error) {
|
|
||||||
args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return n.Stmt.Query(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// QueryRow executes a named statement against the database. Because sqlx cannot
|
|
||||||
// create a *sql.Row with an error condition pre-set for binding errors, sqlx
|
|
||||||
// returns a *sqlx.Row instead.
|
|
||||||
func (n *NamedStmt) QueryRow(arg interface{}) *Row {
|
|
||||||
args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
|
|
||||||
if err != nil {
|
|
||||||
return &Row{err: err}
|
|
||||||
}
|
|
||||||
return n.Stmt.QueryRowx(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustExec execs a NamedStmt, panicing on error
|
|
||||||
func (n *NamedStmt) MustExec(arg interface{}) sql.Result {
|
|
||||||
res, err := n.Exec(arg)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
// Queryx using this NamedStmt
|
|
||||||
func (n *NamedStmt) Queryx(arg interface{}) (*Rows, error) {
|
|
||||||
r, err := n.Query(arg)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &Rows{Rows: r, Mapper: n.Stmt.Mapper}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// QueryRowx this NamedStmt. Because of limitations with QueryRow, this is
|
|
||||||
// an alias for QueryRow.
|
|
||||||
func (n *NamedStmt) QueryRowx(arg interface{}) *Row {
|
|
||||||
return n.QueryRow(arg)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Select using this NamedStmt
|
|
||||||
func (n *NamedStmt) Select(dest interface{}, arg interface{}) error {
|
|
||||||
rows, err := n.Query(arg)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// if something happens here, we want to make sure the rows are Closed
|
|
||||||
defer rows.Close()
|
|
||||||
return scanAll(rows, dest, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get using this NamedStmt
|
|
||||||
func (n *NamedStmt) Get(dest interface{}, arg interface{}) error {
|
|
||||||
r := n.QueryRowx(arg)
|
|
||||||
return r.scanAny(dest, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// A union interface of preparer and binder, required to be able to prepare
|
|
||||||
// named statements (as the bindtype must be determined).
|
|
||||||
type namedPreparer interface {
|
|
||||||
Preparer
|
|
||||||
binder
|
|
||||||
}
|
|
||||||
|
|
||||||
func prepareNamed(p namedPreparer, query string) (*NamedStmt, error) {
|
|
||||||
bindType := BindType(p.DriverName())
|
|
||||||
q, args, err := compileNamedQuery([]byte(query), bindType)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
stmt, err := Preparex(p, q)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &NamedStmt{
|
|
||||||
QueryString: q,
|
|
||||||
Params: args,
|
|
||||||
Stmt: stmt,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func bindAnyArgs(names []string, arg interface{}, m *reflectx.Mapper) ([]interface{}, error) {
|
|
||||||
if maparg, ok := arg.(map[string]interface{}); ok {
|
|
||||||
return bindMapArgs(names, maparg)
|
|
||||||
}
|
|
||||||
return bindArgs(names, arg, m)
|
|
||||||
}
|
|
||||||
|
|
||||||
// private interface to generate a list of interfaces from a given struct
|
|
||||||
// type, given a list of names to pull out of the struct. Used by public
|
|
||||||
// BindStruct interface.
|
|
||||||
func bindArgs(names []string, arg interface{}, m *reflectx.Mapper) ([]interface{}, error) {
|
|
||||||
arglist := make([]interface{}, 0, len(names))
|
|
||||||
|
|
||||||
// grab the indirected value of arg
|
|
||||||
v := reflect.ValueOf(arg)
|
|
||||||
for v = reflect.ValueOf(arg); v.Kind() == reflect.Ptr; {
|
|
||||||
v = v.Elem()
|
|
||||||
}
|
|
||||||
|
|
||||||
fields := m.TraversalsByName(v.Type(), names)
|
|
||||||
for i, t := range fields {
|
|
||||||
if len(t) == 0 {
|
|
||||||
return arglist, fmt.Errorf("could not find name %s in %#v", names[i], arg)
|
|
||||||
}
|
|
||||||
val := reflectx.FieldByIndexesReadOnly(v, t)
|
|
||||||
arglist = append(arglist, val.Interface())
|
|
||||||
}
|
|
||||||
|
|
||||||
return arglist, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// like bindArgs, but for maps.
|
|
||||||
func bindMapArgs(names []string, arg map[string]interface{}) ([]interface{}, error) {
|
|
||||||
arglist := make([]interface{}, 0, len(names))
|
|
||||||
|
|
||||||
for _, name := range names {
|
|
||||||
val, ok := arg[name]
|
|
||||||
if !ok {
|
|
||||||
return arglist, fmt.Errorf("could not find name %s in %#v", name, arg)
|
|
||||||
}
|
|
||||||
arglist = append(arglist, val)
|
|
||||||
}
|
|
||||||
return arglist, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// bindStruct binds a named parameter query with fields from a struct argument.
|
|
||||||
// The rules for binding field names to parameter names follow the same
|
|
||||||
// conventions as for StructScan, including obeying the `db` struct tags.
|
|
||||||
func bindStruct(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) {
|
|
||||||
bound, names, err := compileNamedQuery([]byte(query), bindType)
|
|
||||||
if err != nil {
|
|
||||||
return "", []interface{}{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
arglist, err := bindArgs(names, arg, m)
|
|
||||||
if err != nil {
|
|
||||||
return "", []interface{}{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return bound, arglist, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// bindMap binds a named parameter query with a map of arguments.
|
|
||||||
func bindMap(bindType int, query string, args map[string]interface{}) (string, []interface{}, error) {
|
|
||||||
bound, names, err := compileNamedQuery([]byte(query), bindType)
|
|
||||||
if err != nil {
|
|
||||||
return "", []interface{}{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
arglist, err := bindMapArgs(names, args)
|
|
||||||
return bound, arglist, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// -- Compilation of Named Queries
|
|
||||||
|
|
||||||
// Allow digits and letters in bind params; additionally runes are
|
|
||||||
// checked against underscores, meaning that bind params can have be
|
|
||||||
// alphanumeric with underscores. Mind the difference between unicode
|
|
||||||
// digits and numbers, where '5' is a digit but '五' is not.
|
|
||||||
var allowedBindRunes = []*unicode.RangeTable{unicode.Letter, unicode.Digit}
|
|
||||||
|
|
||||||
// FIXME: this function isn't safe for unicode named params, as a failing test
|
|
||||||
// can testify. This is not a regression but a failure of the original code
|
|
||||||
// as well. It should be modified to range over runes in a string rather than
|
|
||||||
// bytes, even though this is less convenient and slower. Hopefully the
|
|
||||||
// addition of the prepared NamedStmt (which will only do this once) will make
|
|
||||||
// up for the slightly slower ad-hoc NamedExec/NamedQuery.
|
|
||||||
|
|
||||||
// compile a NamedQuery into an unbound query (using the '?' bindvar) and
|
|
||||||
// a list of names.
|
|
||||||
func compileNamedQuery(qs []byte, bindType int) (query string, names []string, err error) {
|
|
||||||
names = make([]string, 0, 10)
|
|
||||||
rebound := make([]byte, 0, len(qs))
|
|
||||||
|
|
||||||
inName := false
|
|
||||||
last := len(qs) - 1
|
|
||||||
currentVar := 1
|
|
||||||
name := make([]byte, 0, 10)
|
|
||||||
|
|
||||||
for i, b := range qs {
|
|
||||||
// a ':' while we're in a name is an error
|
|
||||||
if b == ':' {
|
|
||||||
// if this is the second ':' in a '::' escape sequence, append a ':'
|
|
||||||
if inName && i > 0 && qs[i-1] == ':' {
|
|
||||||
rebound = append(rebound, ':')
|
|
||||||
inName = false
|
|
||||||
continue
|
|
||||||
} else if inName {
|
|
||||||
err = errors.New("unexpected `:` while reading named param at " + strconv.Itoa(i))
|
|
||||||
return query, names, err
|
|
||||||
}
|
|
||||||
inName = true
|
|
||||||
name = []byte{}
|
|
||||||
// if we're in a name, and this is an allowed character, continue
|
|
||||||
} else if inName && (unicode.IsOneOf(allowedBindRunes, rune(b)) || b == '_') && i != last {
|
|
||||||
// append the byte to the name if we are in a name and not on the last byte
|
|
||||||
name = append(name, b)
|
|
||||||
// if we're in a name and it's not an allowed character, the name is done
|
|
||||||
} else if inName {
|
|
||||||
inName = false
|
|
||||||
// if this is the final byte of the string and it is part of the name, then
|
|
||||||
// make sure to add it to the name
|
|
||||||
if i == last && unicode.IsOneOf(allowedBindRunes, rune(b)) {
|
|
||||||
name = append(name, b)
|
|
||||||
}
|
|
||||||
// add the string representation to the names list
|
|
||||||
names = append(names, string(name))
|
|
||||||
// add a proper bindvar for the bindType
|
|
||||||
switch bindType {
|
|
||||||
// oracle only supports named type bind vars even for positional
|
|
||||||
case NAMED:
|
|
||||||
rebound = append(rebound, ':')
|
|
||||||
rebound = append(rebound, name...)
|
|
||||||
case QUESTION, UNKNOWN:
|
|
||||||
rebound = append(rebound, '?')
|
|
||||||
case DOLLAR:
|
|
||||||
rebound = append(rebound, '$')
|
|
||||||
for _, b := range strconv.Itoa(currentVar) {
|
|
||||||
rebound = append(rebound, byte(b))
|
|
||||||
}
|
|
||||||
currentVar++
|
|
||||||
}
|
|
||||||
// add this byte to string unless it was not part of the name
|
|
||||||
if i != last {
|
|
||||||
rebound = append(rebound, b)
|
|
||||||
} else if !unicode.IsOneOf(allowedBindRunes, rune(b)) {
|
|
||||||
rebound = append(rebound, b)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// this is a normal byte and should just go onto the rebound query
|
|
||||||
rebound = append(rebound, b)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return string(rebound), names, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bind binds a struct or a map to a query with named parameters.
|
|
||||||
func BindNamed(bindType int, query string, arg interface{}) (string, []interface{}, error) {
|
|
||||||
return bindNamedMapper(bindType, query, arg, mapper())
|
|
||||||
}
|
|
||||||
|
|
||||||
func bindNamedMapper(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) {
|
|
||||||
if maparg, ok := arg.(map[string]interface{}); ok {
|
|
||||||
return bindMap(bindType, query, maparg)
|
|
||||||
}
|
|
||||||
return bindStruct(bindType, query, arg, m)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NamedQuery binds a named query and then runs Query on the result using the
|
|
||||||
// provided Ext (sqlx.Tx, sqlx.Db). It works with both structs and with
|
|
||||||
// map[string]interface{} types.
|
|
||||||
func NamedQuery(e Ext, query string, arg interface{}) (*Rows, error) {
|
|
||||||
q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return e.Queryx(q, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NamedExec uses BindStruct to get a query executable by the driver and
|
|
||||||
// then runs Exec on the result. Returns an error from the binding
|
|
||||||
// or the query excution itself.
|
|
||||||
func NamedExec(e Ext, query string, arg interface{}) (sql.Result, error) {
|
|
||||||
q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return e.Exec(q, args...)
|
|
||||||
}
|
|
227
vendor/github.com/jmoiron/sqlx/named_test.go
generated
vendored
227
vendor/github.com/jmoiron/sqlx/named_test.go
generated
vendored
@ -1,227 +0,0 @@
|
|||||||
package sqlx
|
|
||||||
|
|
||||||
import (
|
|
||||||
"database/sql"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestCompileQuery(t *testing.T) {
|
|
||||||
table := []struct {
|
|
||||||
Q, R, D, N string
|
|
||||||
V []string
|
|
||||||
}{
|
|
||||||
// basic test for named parameters, invalid char ',' terminating
|
|
||||||
{
|
|
||||||
Q: `INSERT INTO foo (a,b,c,d) VALUES (:name, :age, :first, :last)`,
|
|
||||||
R: `INSERT INTO foo (a,b,c,d) VALUES (?, ?, ?, ?)`,
|
|
||||||
D: `INSERT INTO foo (a,b,c,d) VALUES ($1, $2, $3, $4)`,
|
|
||||||
N: `INSERT INTO foo (a,b,c,d) VALUES (:name, :age, :first, :last)`,
|
|
||||||
V: []string{"name", "age", "first", "last"},
|
|
||||||
},
|
|
||||||
// This query tests a named parameter ending the string as well as numbers
|
|
||||||
{
|
|
||||||
Q: `SELECT * FROM a WHERE first_name=:name1 AND last_name=:name2`,
|
|
||||||
R: `SELECT * FROM a WHERE first_name=? AND last_name=?`,
|
|
||||||
D: `SELECT * FROM a WHERE first_name=$1 AND last_name=$2`,
|
|
||||||
N: `SELECT * FROM a WHERE first_name=:name1 AND last_name=:name2`,
|
|
||||||
V: []string{"name1", "name2"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Q: `SELECT "::foo" FROM a WHERE first_name=:name1 AND last_name=:name2`,
|
|
||||||
R: `SELECT ":foo" FROM a WHERE first_name=? AND last_name=?`,
|
|
||||||
D: `SELECT ":foo" FROM a WHERE first_name=$1 AND last_name=$2`,
|
|
||||||
N: `SELECT ":foo" FROM a WHERE first_name=:name1 AND last_name=:name2`,
|
|
||||||
V: []string{"name1", "name2"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Q: `SELECT 'a::b::c' || first_name, '::::ABC::_::' FROM person WHERE first_name=:first_name AND last_name=:last_name`,
|
|
||||||
R: `SELECT 'a:b:c' || first_name, '::ABC:_:' FROM person WHERE first_name=? AND last_name=?`,
|
|
||||||
D: `SELECT 'a:b:c' || first_name, '::ABC:_:' FROM person WHERE first_name=$1 AND last_name=$2`,
|
|
||||||
N: `SELECT 'a:b:c' || first_name, '::ABC:_:' FROM person WHERE first_name=:first_name AND last_name=:last_name`,
|
|
||||||
V: []string{"first_name", "last_name"},
|
|
||||||
},
|
|
||||||
/* This unicode awareness test sadly fails, because of our byte-wise worldview.
|
|
||||||
* We could certainly iterate by Rune instead, though it's a great deal slower,
|
|
||||||
* it's probably the RightWay(tm)
|
|
||||||
{
|
|
||||||
Q: `INSERT INTO foo (a,b,c,d) VALUES (:あ, :b, :キコ, :名前)`,
|
|
||||||
R: `INSERT INTO foo (a,b,c,d) VALUES (?, ?, ?, ?)`,
|
|
||||||
D: `INSERT INTO foo (a,b,c,d) VALUES ($1, $2, $3, $4)`,
|
|
||||||
N: []string{"name", "age", "first", "last"},
|
|
||||||
},
|
|
||||||
*/
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, test := range table {
|
|
||||||
qr, names, err := compileNamedQuery([]byte(test.Q), QUESTION)
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
if qr != test.R {
|
|
||||||
t.Errorf("expected %s, got %s", test.R, qr)
|
|
||||||
}
|
|
||||||
if len(names) != len(test.V) {
|
|
||||||
t.Errorf("expected %#v, got %#v", test.V, names)
|
|
||||||
} else {
|
|
||||||
for i, name := range names {
|
|
||||||
if name != test.V[i] {
|
|
||||||
t.Errorf("expected %dth name to be %s, got %s", i+1, test.V[i], name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
qd, _, _ := compileNamedQuery([]byte(test.Q), DOLLAR)
|
|
||||||
if qd != test.D {
|
|
||||||
t.Errorf("\nexpected: `%s`\ngot: `%s`", test.D, qd)
|
|
||||||
}
|
|
||||||
|
|
||||||
qq, _, _ := compileNamedQuery([]byte(test.Q), NAMED)
|
|
||||||
if qq != test.N {
|
|
||||||
t.Errorf("\nexpected: `%s`\ngot: `%s`\n(len: %d vs %d)", test.N, qq, len(test.N), len(qq))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type Test struct {
|
|
||||||
t *testing.T
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t Test) Error(err error, msg ...interface{}) {
|
|
||||||
if err != nil {
|
|
||||||
if len(msg) == 0 {
|
|
||||||
t.t.Error(err)
|
|
||||||
} else {
|
|
||||||
t.t.Error(msg...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t Test) Errorf(err error, format string, args ...interface{}) {
|
|
||||||
if err != nil {
|
|
||||||
t.t.Errorf(format, args...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNamedQueries(t *testing.T) {
|
|
||||||
RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) {
|
|
||||||
loadDefaultFixture(db, t)
|
|
||||||
test := Test{t}
|
|
||||||
var ns *NamedStmt
|
|
||||||
var err error
|
|
||||||
|
|
||||||
// Check that invalid preparations fail
|
|
||||||
ns, err = db.PrepareNamed("SELECT * FROM person WHERE first_name=:first:name")
|
|
||||||
if err == nil {
|
|
||||||
t.Error("Expected an error with invalid prepared statement.")
|
|
||||||
}
|
|
||||||
|
|
||||||
ns, err = db.PrepareNamed("invalid sql")
|
|
||||||
if err == nil {
|
|
||||||
t.Error("Expected an error with invalid prepared statement.")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check closing works as anticipated
|
|
||||||
ns, err = db.PrepareNamed("SELECT * FROM person WHERE first_name=:first_name")
|
|
||||||
test.Error(err)
|
|
||||||
err = ns.Close()
|
|
||||||
test.Error(err)
|
|
||||||
|
|
||||||
ns, err = db.PrepareNamed(`
|
|
||||||
SELECT first_name, last_name, email
|
|
||||||
FROM person WHERE first_name=:first_name AND email=:email`)
|
|
||||||
test.Error(err)
|
|
||||||
|
|
||||||
// test Queryx w/ uses Query
|
|
||||||
p := Person{FirstName: "Jason", LastName: "Moiron", Email: "jmoiron@jmoiron.net"}
|
|
||||||
|
|
||||||
rows, err := ns.Queryx(p)
|
|
||||||
test.Error(err)
|
|
||||||
for rows.Next() {
|
|
||||||
var p2 Person
|
|
||||||
rows.StructScan(&p2)
|
|
||||||
if p.FirstName != p2.FirstName {
|
|
||||||
t.Errorf("got %s, expected %s", p.FirstName, p2.FirstName)
|
|
||||||
}
|
|
||||||
if p.LastName != p2.LastName {
|
|
||||||
t.Errorf("got %s, expected %s", p.LastName, p2.LastName)
|
|
||||||
}
|
|
||||||
if p.Email != p2.Email {
|
|
||||||
t.Errorf("got %s, expected %s", p.Email, p2.Email)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// test Select
|
|
||||||
people := make([]Person, 0, 5)
|
|
||||||
err = ns.Select(&people, p)
|
|
||||||
test.Error(err)
|
|
||||||
|
|
||||||
if len(people) != 1 {
|
|
||||||
t.Errorf("got %d results, expected %d", len(people), 1)
|
|
||||||
}
|
|
||||||
if p.FirstName != people[0].FirstName {
|
|
||||||
t.Errorf("got %s, expected %s", p.FirstName, people[0].FirstName)
|
|
||||||
}
|
|
||||||
if p.LastName != people[0].LastName {
|
|
||||||
t.Errorf("got %s, expected %s", p.LastName, people[0].LastName)
|
|
||||||
}
|
|
||||||
if p.Email != people[0].Email {
|
|
||||||
t.Errorf("got %s, expected %s", p.Email, people[0].Email)
|
|
||||||
}
|
|
||||||
|
|
||||||
// test Exec
|
|
||||||
ns, err = db.PrepareNamed(`
|
|
||||||
INSERT INTO person (first_name, last_name, email)
|
|
||||||
VALUES (:first_name, :last_name, :email)`)
|
|
||||||
test.Error(err)
|
|
||||||
|
|
||||||
js := Person{
|
|
||||||
FirstName: "Julien",
|
|
||||||
LastName: "Savea",
|
|
||||||
Email: "jsavea@ab.co.nz",
|
|
||||||
}
|
|
||||||
_, err = ns.Exec(js)
|
|
||||||
test.Error(err)
|
|
||||||
|
|
||||||
// Make sure we can pull him out again
|
|
||||||
p2 := Person{}
|
|
||||||
db.Get(&p2, db.Rebind("SELECT * FROM person WHERE email=?"), js.Email)
|
|
||||||
if p2.Email != js.Email {
|
|
||||||
t.Errorf("expected %s, got %s", js.Email, p2.Email)
|
|
||||||
}
|
|
||||||
|
|
||||||
// test Txn NamedStmts
|
|
||||||
tx := db.MustBegin()
|
|
||||||
txns := tx.NamedStmt(ns)
|
|
||||||
|
|
||||||
// We're going to add Steven in this txn
|
|
||||||
sl := Person{
|
|
||||||
FirstName: "Steven",
|
|
||||||
LastName: "Luatua",
|
|
||||||
Email: "sluatua@ab.co.nz",
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = txns.Exec(sl)
|
|
||||||
test.Error(err)
|
|
||||||
// then rollback...
|
|
||||||
tx.Rollback()
|
|
||||||
// looking for Steven after a rollback should fail
|
|
||||||
err = db.Get(&p2, db.Rebind("SELECT * FROM person WHERE email=?"), sl.Email)
|
|
||||||
if err != sql.ErrNoRows {
|
|
||||||
t.Errorf("expected no rows error, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// now do the same, but commit
|
|
||||||
tx = db.MustBegin()
|
|
||||||
txns = tx.NamedStmt(ns)
|
|
||||||
_, err = txns.Exec(sl)
|
|
||||||
test.Error(err)
|
|
||||||
tx.Commit()
|
|
||||||
|
|
||||||
// looking for Steven after a Commit should succeed
|
|
||||||
err = db.Get(&p2, db.Rebind("SELECT * FROM person WHERE email=?"), sl.Email)
|
|
||||||
test.Error(err)
|
|
||||||
if p2.Email != sl.Email {
|
|
||||||
t.Errorf("expected %s, got %s", sl.Email, p2.Email)
|
|
||||||
}
|
|
||||||
|
|
||||||
})
|
|
||||||
}
|
|
17
vendor/github.com/jmoiron/sqlx/reflectx/README.md
generated
vendored
17
vendor/github.com/jmoiron/sqlx/reflectx/README.md
generated
vendored
@ -1,17 +0,0 @@
|
|||||||
# reflectx
|
|
||||||
|
|
||||||
The sqlx package has special reflect needs. In particular, it needs to:
|
|
||||||
|
|
||||||
* be able to map a name to a field
|
|
||||||
* understand embedded structs
|
|
||||||
* understand mapping names to fields by a particular tag
|
|
||||||
* user specified name -> field mapping functions
|
|
||||||
|
|
||||||
These behaviors mimic the behaviors by the standard library marshallers and also the
|
|
||||||
behavior of standard Go accessors.
|
|
||||||
|
|
||||||
The first two are amply taken care of by `Reflect.Value.FieldByName`, and the third is
|
|
||||||
addressed by `Reflect.Value.FieldByNameFunc`, but these don't quite understand struct
|
|
||||||
tags in the ways that are vital to most marshalers, and they are slow.
|
|
||||||
|
|
||||||
This reflectx package extends reflect to achieve these goals.
|
|
250
vendor/github.com/jmoiron/sqlx/reflectx/reflect.go
generated
vendored
250
vendor/github.com/jmoiron/sqlx/reflectx/reflect.go
generated
vendored
@ -1,250 +0,0 @@
|
|||||||
// Package reflect implements extensions to the standard reflect lib suitable
|
|
||||||
// for implementing marshaling and unmarshaling packages. The main Mapper type
|
|
||||||
// allows for Go-compatible named atribute access, including accessing embedded
|
|
||||||
// struct attributes and the ability to use functions and struct tags to
|
|
||||||
// customize field names.
|
|
||||||
//
|
|
||||||
package reflectx
|
|
||||||
|
|
||||||
import "sync"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
"runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
type fieldMap map[string][]int
|
|
||||||
|
|
||||||
// Mapper is a general purpose mapper of names to struct fields. A Mapper
|
|
||||||
// behaves like most marshallers, optionally obeying a field tag for name
|
|
||||||
// mapping and a function to provide a basic mapping of fields to names.
|
|
||||||
type Mapper struct {
|
|
||||||
cache map[reflect.Type]fieldMap
|
|
||||||
tagName string
|
|
||||||
mapFunc func(string) string
|
|
||||||
mutex sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewMapper returns a new mapper which optionally obeys the field tag given
|
|
||||||
// by tagName. If tagName is the empty string, it is ignored.
|
|
||||||
func NewMapper(tagName string) *Mapper {
|
|
||||||
return &Mapper{
|
|
||||||
cache: make(map[reflect.Type]fieldMap),
|
|
||||||
tagName: tagName,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewMapperFunc returns a new mapper which optionally obeys a field tag and
|
|
||||||
// a struct field name mapper func given by f. Tags will take precedence, but
|
|
||||||
// for any other field, the mapped name will be f(field.Name)
|
|
||||||
func NewMapperFunc(tagName string, f func(string) string) *Mapper {
|
|
||||||
return &Mapper{
|
|
||||||
cache: make(map[reflect.Type]fieldMap),
|
|
||||||
tagName: tagName,
|
|
||||||
mapFunc: f,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TypeMap returns a mapping of field strings to int slices representing
|
|
||||||
// the traversal down the struct to reach the field.
|
|
||||||
func (m *Mapper) TypeMap(t reflect.Type) fieldMap {
|
|
||||||
m.mutex.Lock()
|
|
||||||
mapping, ok := m.cache[t]
|
|
||||||
if !ok {
|
|
||||||
mapping = getMapping(t, m.tagName, m.mapFunc)
|
|
||||||
m.cache[t] = mapping
|
|
||||||
}
|
|
||||||
m.mutex.Unlock()
|
|
||||||
return mapping
|
|
||||||
}
|
|
||||||
|
|
||||||
// FieldMap returns the mapper's mapping of field names to reflect values. Panics
|
|
||||||
// if v's Kind is not Struct, or v is not Indirectable to a struct kind.
|
|
||||||
func (m *Mapper) FieldMap(v reflect.Value) map[string]reflect.Value {
|
|
||||||
v = reflect.Indirect(v)
|
|
||||||
mustBe(v, reflect.Struct)
|
|
||||||
|
|
||||||
r := map[string]reflect.Value{}
|
|
||||||
nm := m.TypeMap(v.Type())
|
|
||||||
for tagName, indexes := range nm {
|
|
||||||
r[tagName] = FieldByIndexes(v, indexes)
|
|
||||||
}
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
// FieldByName returns a field by the its mapped name as a reflect.Value.
|
|
||||||
// Panics if v's Kind is not Struct or v is not Indirectable to a struct Kind.
|
|
||||||
// Returns zero Value if the name is not found.
|
|
||||||
func (m *Mapper) FieldByName(v reflect.Value, name string) reflect.Value {
|
|
||||||
v = reflect.Indirect(v)
|
|
||||||
mustBe(v, reflect.Struct)
|
|
||||||
|
|
||||||
nm := m.TypeMap(v.Type())
|
|
||||||
traversal, ok := nm[name]
|
|
||||||
if !ok {
|
|
||||||
return *new(reflect.Value)
|
|
||||||
}
|
|
||||||
return FieldByIndexes(v, traversal)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FieldsByName returns a slice of values corresponding to the slice of names
|
|
||||||
// for the value. Panics if v's Kind is not Struct or v is not Indirectable
|
|
||||||
// to a struct Kind. Returns zero Value for each name not found.
|
|
||||||
func (m *Mapper) FieldsByName(v reflect.Value, names []string) []reflect.Value {
|
|
||||||
v = reflect.Indirect(v)
|
|
||||||
mustBe(v, reflect.Struct)
|
|
||||||
|
|
||||||
nm := m.TypeMap(v.Type())
|
|
||||||
|
|
||||||
vals := make([]reflect.Value, 0, len(names))
|
|
||||||
for _, name := range names {
|
|
||||||
traversal, ok := nm[name]
|
|
||||||
if !ok {
|
|
||||||
vals = append(vals, *new(reflect.Value))
|
|
||||||
} else {
|
|
||||||
vals = append(vals, FieldByIndexes(v, traversal))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return vals
|
|
||||||
}
|
|
||||||
|
|
||||||
// Traversals by name returns a slice of int slices which represent the struct
|
|
||||||
// traversals for each mapped name. Panics if t is not a struct or Indirectable
|
|
||||||
// to a struct. Returns empty int slice for each name not found.
|
|
||||||
func (m *Mapper) TraversalsByName(t reflect.Type, names []string) [][]int {
|
|
||||||
t = Deref(t)
|
|
||||||
mustBe(t, reflect.Struct)
|
|
||||||
nm := m.TypeMap(t)
|
|
||||||
|
|
||||||
r := make([][]int, 0, len(names))
|
|
||||||
for _, name := range names {
|
|
||||||
traversal, ok := nm[name]
|
|
||||||
if !ok {
|
|
||||||
r = append(r, []int{})
|
|
||||||
} else {
|
|
||||||
r = append(r, traversal)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
// FieldByIndexes returns a value for a particular struct traversal.
|
|
||||||
func FieldByIndexes(v reflect.Value, indexes []int) reflect.Value {
|
|
||||||
for _, i := range indexes {
|
|
||||||
v = reflect.Indirect(v).Field(i)
|
|
||||||
// if this is a pointer, it's possible it is nil
|
|
||||||
if v.Kind() == reflect.Ptr && v.IsNil() {
|
|
||||||
alloc := reflect.New(Deref(v.Type()))
|
|
||||||
v.Set(alloc)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// FieldByIndexesReadOnly returns a value for a particular struct traversal,
|
|
||||||
// but is not concerned with allocating nil pointers because the value is
|
|
||||||
// going to be used for reading and not setting.
|
|
||||||
func FieldByIndexesReadOnly(v reflect.Value, indexes []int) reflect.Value {
|
|
||||||
for _, i := range indexes {
|
|
||||||
v = reflect.Indirect(v).Field(i)
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deref is Indirect for reflect.Types
|
|
||||||
func Deref(t reflect.Type) reflect.Type {
|
|
||||||
if t.Kind() == reflect.Ptr {
|
|
||||||
t = t.Elem()
|
|
||||||
}
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
// -- helpers & utilities --
|
|
||||||
|
|
||||||
type Kinder interface {
|
|
||||||
Kind() reflect.Kind
|
|
||||||
}
|
|
||||||
|
|
||||||
// mustBe checks a value against a kind, panicing with a reflect.ValueError
|
|
||||||
// if the kind isn't that which is required.
|
|
||||||
func mustBe(v Kinder, expected reflect.Kind) {
|
|
||||||
k := v.Kind()
|
|
||||||
if k != expected {
|
|
||||||
panic(&reflect.ValueError{Method: methodName(), Kind: k})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// methodName is returns the caller of the function calling methodName
|
|
||||||
func methodName() string {
|
|
||||||
pc, _, _, _ := runtime.Caller(2)
|
|
||||||
f := runtime.FuncForPC(pc)
|
|
||||||
if f == nil {
|
|
||||||
return "unknown method"
|
|
||||||
}
|
|
||||||
return f.Name()
|
|
||||||
}
|
|
||||||
|
|
||||||
type typeQueue struct {
|
|
||||||
t reflect.Type
|
|
||||||
p []int
|
|
||||||
}
|
|
||||||
|
|
||||||
// A copying append that creates a new slice each time.
|
|
||||||
func apnd(is []int, i int) []int {
|
|
||||||
x := make([]int, len(is)+1)
|
|
||||||
for p, n := range is {
|
|
||||||
x[p] = n
|
|
||||||
}
|
|
||||||
x[len(x)-1] = i
|
|
||||||
return x
|
|
||||||
}
|
|
||||||
|
|
||||||
// getMapping returns a mapping for the t type, using the tagName and the mapFunc
|
|
||||||
// to determine the canonical names of fields.
|
|
||||||
func getMapping(t reflect.Type, tagName string, mapFunc func(string) string) fieldMap {
|
|
||||||
queue := []typeQueue{}
|
|
||||||
queue = append(queue, typeQueue{Deref(t), []int{}})
|
|
||||||
m := fieldMap{}
|
|
||||||
for len(queue) != 0 {
|
|
||||||
// pop the first item off of the queue
|
|
||||||
tq := queue[0]
|
|
||||||
queue = queue[1:]
|
|
||||||
// iterate through all of its fields
|
|
||||||
for fieldPos := 0; fieldPos < tq.t.NumField(); fieldPos++ {
|
|
||||||
f := tq.t.Field(fieldPos)
|
|
||||||
|
|
||||||
name := f.Tag.Get(tagName)
|
|
||||||
if len(name) == 0 {
|
|
||||||
if mapFunc != nil {
|
|
||||||
name = mapFunc(f.Name)
|
|
||||||
} else {
|
|
||||||
name = f.Name
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// if the name is "-", disabled via a tag, skip it
|
|
||||||
if name == "-" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// skip unexported fields
|
|
||||||
if len(f.PkgPath) != 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// bfs search of anonymous embedded structs
|
|
||||||
if f.Anonymous {
|
|
||||||
queue = append(queue, typeQueue{Deref(f.Type), apnd(tq.p, fieldPos)})
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// if the name is shadowed by an earlier identical name in the search, skip it
|
|
||||||
if _, ok := m[name]; ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// add it to the map at the current position
|
|
||||||
m[name] = apnd(tq.p, fieldPos)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return m
|
|
||||||
}
|
|
216
vendor/github.com/jmoiron/sqlx/reflectx/reflect_test.go
generated
vendored
216
vendor/github.com/jmoiron/sqlx/reflectx/reflect_test.go
generated
vendored
@ -1,216 +0,0 @@
|
|||||||
package reflectx
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func ival(v reflect.Value) int {
|
|
||||||
return v.Interface().(int)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBasic(t *testing.T) {
|
|
||||||
type Foo struct {
|
|
||||||
A int
|
|
||||||
B int
|
|
||||||
C int
|
|
||||||
}
|
|
||||||
|
|
||||||
f := Foo{1, 2, 3}
|
|
||||||
fv := reflect.ValueOf(f)
|
|
||||||
m := NewMapper("")
|
|
||||||
|
|
||||||
v := m.FieldByName(fv, "A")
|
|
||||||
if ival(v) != f.A {
|
|
||||||
t.Errorf("Expecting %d, got %d", ival(v), f.A)
|
|
||||||
}
|
|
||||||
v = m.FieldByName(fv, "B")
|
|
||||||
if ival(v) != f.B {
|
|
||||||
t.Errorf("Expecting %d, got %d", f.B, ival(v))
|
|
||||||
}
|
|
||||||
v = m.FieldByName(fv, "C")
|
|
||||||
if ival(v) != f.C {
|
|
||||||
t.Errorf("Expecting %d, got %d", f.C, ival(v))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestEmbedded(t *testing.T) {
|
|
||||||
type Foo struct {
|
|
||||||
A int
|
|
||||||
}
|
|
||||||
|
|
||||||
type Bar struct {
|
|
||||||
Foo
|
|
||||||
B int
|
|
||||||
}
|
|
||||||
|
|
||||||
type Baz struct {
|
|
||||||
A int
|
|
||||||
Bar
|
|
||||||
}
|
|
||||||
|
|
||||||
m := NewMapper("")
|
|
||||||
|
|
||||||
z := Baz{}
|
|
||||||
z.A = 1
|
|
||||||
z.B = 2
|
|
||||||
z.Bar.Foo.A = 3
|
|
||||||
zv := reflect.ValueOf(z)
|
|
||||||
|
|
||||||
v := m.FieldByName(zv, "A")
|
|
||||||
if ival(v) != z.A {
|
|
||||||
t.Errorf("Expecting %d, got %d", ival(v), z.A)
|
|
||||||
}
|
|
||||||
v = m.FieldByName(zv, "B")
|
|
||||||
if ival(v) != z.B {
|
|
||||||
t.Errorf("Expecting %d, got %d", ival(v), z.B)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMapping(t *testing.T) {
|
|
||||||
type Person struct {
|
|
||||||
ID int
|
|
||||||
Name string
|
|
||||||
WearsGlasses bool `db:"wears_glasses"`
|
|
||||||
}
|
|
||||||
|
|
||||||
m := NewMapperFunc("db", strings.ToLower)
|
|
||||||
p := Person{1, "Jason", true}
|
|
||||||
mapping := m.TypeMap(reflect.TypeOf(p))
|
|
||||||
|
|
||||||
for _, key := range []string{"id", "name", "wears_glasses"} {
|
|
||||||
if _, ok := mapping[key]; !ok {
|
|
||||||
t.Errorf("Expecting to find key %s in mapping but did not.", key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type SportsPerson struct {
|
|
||||||
Weight int
|
|
||||||
Age int
|
|
||||||
Person
|
|
||||||
}
|
|
||||||
s := SportsPerson{Weight: 100, Age: 30, Person: p}
|
|
||||||
mapping = m.TypeMap(reflect.TypeOf(s))
|
|
||||||
for _, key := range []string{"id", "name", "wears_glasses", "weight", "age"} {
|
|
||||||
if _, ok := mapping[key]; !ok {
|
|
||||||
t.Errorf("Expecting to find key %s in mapping but did not.", key)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
type RugbyPlayer struct {
|
|
||||||
Position int
|
|
||||||
IsIntense bool `db:"is_intense"`
|
|
||||||
IsAllBlack bool `db:"-"`
|
|
||||||
SportsPerson
|
|
||||||
}
|
|
||||||
r := RugbyPlayer{12, true, false, s}
|
|
||||||
mapping = m.TypeMap(reflect.TypeOf(r))
|
|
||||||
for _, key := range []string{"id", "name", "wears_glasses", "weight", "age", "position", "is_intense"} {
|
|
||||||
if _, ok := mapping[key]; !ok {
|
|
||||||
t.Errorf("Expecting to find key %s in mapping but did not.", key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, ok := mapping["isallblack"]; ok {
|
|
||||||
t.Errorf("Expecting to ignore `IsAllBlack` field")
|
|
||||||
}
|
|
||||||
|
|
||||||
type EmbeddedLiteral struct {
|
|
||||||
Embedded struct {
|
|
||||||
Person string
|
|
||||||
Position int
|
|
||||||
}
|
|
||||||
IsIntense bool
|
|
||||||
}
|
|
||||||
|
|
||||||
e := EmbeddedLiteral{}
|
|
||||||
mapping = m.TypeMap(reflect.TypeOf(e))
|
|
||||||
//fmt.Printf("Mapping: %#v\n", mapping)
|
|
||||||
|
|
||||||
//f := FieldByIndexes(reflect.ValueOf(e), mapping["isintense"])
|
|
||||||
//fmt.Println(f, f.Interface())
|
|
||||||
|
|
||||||
//tbn := m.TraversalsByName(reflect.TypeOf(e), []string{"isintense"})
|
|
||||||
//fmt.Printf("%#v\n", tbn)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
type E1 struct {
|
|
||||||
A int
|
|
||||||
}
|
|
||||||
type E2 struct {
|
|
||||||
E1
|
|
||||||
B int
|
|
||||||
}
|
|
||||||
type E3 struct {
|
|
||||||
E2
|
|
||||||
C int
|
|
||||||
}
|
|
||||||
type E4 struct {
|
|
||||||
E3
|
|
||||||
D int
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkFieldNameL1(b *testing.B) {
|
|
||||||
e4 := E4{D: 1}
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
v := reflect.ValueOf(e4)
|
|
||||||
f := v.FieldByName("D")
|
|
||||||
if f.Interface().(int) != 1 {
|
|
||||||
b.Fatal("Wrong value.")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkFieldNameL4(b *testing.B) {
|
|
||||||
e4 := E4{}
|
|
||||||
e4.A = 1
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
v := reflect.ValueOf(e4)
|
|
||||||
f := v.FieldByName("A")
|
|
||||||
if f.Interface().(int) != 1 {
|
|
||||||
b.Fatal("Wrong value.")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkFieldPosL1(b *testing.B) {
|
|
||||||
e4 := E4{D: 1}
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
v := reflect.ValueOf(e4)
|
|
||||||
f := v.Field(1)
|
|
||||||
if f.Interface().(int) != 1 {
|
|
||||||
b.Fatal("Wrong value.")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkFieldPosL4(b *testing.B) {
|
|
||||||
e4 := E4{}
|
|
||||||
e4.A = 1
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
v := reflect.ValueOf(e4)
|
|
||||||
f := v.Field(0)
|
|
||||||
f = f.Field(0)
|
|
||||||
f = f.Field(0)
|
|
||||||
f = f.Field(0)
|
|
||||||
if f.Interface().(int) != 1 {
|
|
||||||
b.Fatal("Wrong value.")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkFieldByIndexL4(b *testing.B) {
|
|
||||||
e4 := E4{}
|
|
||||||
e4.A = 1
|
|
||||||
idx := []int{0, 0, 0, 0}
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
v := reflect.ValueOf(e4)
|
|
||||||
f := FieldByIndexes(v, idx)
|
|
||||||
if f.Interface().(int) != 1 {
|
|
||||||
b.Fatal("Wrong value.")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
986
vendor/github.com/jmoiron/sqlx/sqlx.go
generated
vendored
986
vendor/github.com/jmoiron/sqlx/sqlx.go
generated
vendored
@ -1,986 +0,0 @@
|
|||||||
package sqlx
|
|
||||||
|
|
||||||
import (
|
|
||||||
"database/sql"
|
|
||||||
"database/sql/driver"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"io/ioutil"
|
|
||||||
"path/filepath"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/jmoiron/sqlx/reflectx"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Although the NameMapper is convenient, in practice it should not
|
|
||||||
// be relied on except for application code. If you are writing a library
|
|
||||||
// that uses sqlx, you should be aware that the name mappings you expect
|
|
||||||
// can be overridded by your user's application.
|
|
||||||
|
|
||||||
// NameMapper is used to map column names to struct field names. By default,
|
|
||||||
// it uses strings.ToLower to lowercase struct field names. It can be set
|
|
||||||
// to whatever you want, but it is encouraged to be set before sqlx is used
|
|
||||||
// as name-to-field mappings are cached after first use on a type.
|
|
||||||
var NameMapper = strings.ToLower
|
|
||||||
var origMapper = reflect.ValueOf(NameMapper)
|
|
||||||
|
|
||||||
// Rather than creating on init, this is created when necessary so that
|
|
||||||
// importers have time to customize the NameMapper.
|
|
||||||
var mpr *reflectx.Mapper
|
|
||||||
|
|
||||||
// mapper returns a valid mapper using the configured NameMapper func.
|
|
||||||
func mapper() *reflectx.Mapper {
|
|
||||||
if mpr == nil {
|
|
||||||
mpr = reflectx.NewMapperFunc("db", NameMapper)
|
|
||||||
} else if origMapper != reflect.ValueOf(NameMapper) {
|
|
||||||
// if NameMapper has changed, create a new mapper
|
|
||||||
mpr = reflectx.NewMapperFunc("db", NameMapper)
|
|
||||||
origMapper = reflect.ValueOf(NameMapper)
|
|
||||||
}
|
|
||||||
return mpr
|
|
||||||
}
|
|
||||||
|
|
||||||
// isScannable takes the reflect.Type and the actual dest value and returns
|
|
||||||
// whether or not it's Scannable. Something is scannable if:
|
|
||||||
// * it is not a struct
|
|
||||||
// * it implements sql.Scanner
|
|
||||||
// * it has no exported fields
|
|
||||||
func isScannable(t reflect.Type) bool {
|
|
||||||
if reflect.PtrTo(t).Implements(_scannerInterface) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if t.Kind() != reflect.Struct {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// it's not important that we use the right mapper for this particular object,
|
|
||||||
// we're only concerned on how many exported fields this struct has
|
|
||||||
m := mapper()
|
|
||||||
if len(m.TypeMap(t)) == 0 {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// ColScanner is an interface used by MapScan and SliceScan
|
|
||||||
type ColScanner interface {
|
|
||||||
Columns() ([]string, error)
|
|
||||||
Scan(dest ...interface{}) error
|
|
||||||
Err() error
|
|
||||||
}
|
|
||||||
|
|
||||||
// Queryer is an interface used by Get and Select
|
|
||||||
type Queryer interface {
|
|
||||||
Query(query string, args ...interface{}) (*sql.Rows, error)
|
|
||||||
Queryx(query string, args ...interface{}) (*Rows, error)
|
|
||||||
QueryRowx(query string, args ...interface{}) *Row
|
|
||||||
}
|
|
||||||
|
|
||||||
// Execer is an interface used by MustExec and LoadFile
|
|
||||||
type Execer interface {
|
|
||||||
Exec(query string, args ...interface{}) (sql.Result, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Binder is an interface for something which can bind queries (Tx, DB)
|
|
||||||
type binder interface {
|
|
||||||
DriverName() string
|
|
||||||
Rebind(string) string
|
|
||||||
BindNamed(string, interface{}) (string, []interface{}, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ext is a union interface which can bind, query, and exec, used by
|
|
||||||
// NamedQuery and NamedExec.
|
|
||||||
type Ext interface {
|
|
||||||
binder
|
|
||||||
Queryer
|
|
||||||
Execer
|
|
||||||
}
|
|
||||||
|
|
||||||
// Preparer is an interface used by Preparex.
|
|
||||||
type Preparer interface {
|
|
||||||
Prepare(query string) (*sql.Stmt, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// determine if any of our extensions are unsafe
|
|
||||||
func isUnsafe(i interface{}) bool {
|
|
||||||
switch i.(type) {
|
|
||||||
case Row:
|
|
||||||
return i.(Row).unsafe
|
|
||||||
case *Row:
|
|
||||||
return i.(*Row).unsafe
|
|
||||||
case Rows:
|
|
||||||
return i.(Rows).unsafe
|
|
||||||
case *Rows:
|
|
||||||
return i.(*Rows).unsafe
|
|
||||||
case Stmt:
|
|
||||||
return i.(Stmt).unsafe
|
|
||||||
case qStmt:
|
|
||||||
return i.(qStmt).Stmt.unsafe
|
|
||||||
case *qStmt:
|
|
||||||
return i.(*qStmt).Stmt.unsafe
|
|
||||||
case DB:
|
|
||||||
return i.(DB).unsafe
|
|
||||||
case *DB:
|
|
||||||
return i.(*DB).unsafe
|
|
||||||
case Tx:
|
|
||||||
return i.(Tx).unsafe
|
|
||||||
case *Tx:
|
|
||||||
return i.(*Tx).unsafe
|
|
||||||
case sql.Rows, *sql.Rows:
|
|
||||||
return false
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func mapperFor(i interface{}) *reflectx.Mapper {
|
|
||||||
switch i.(type) {
|
|
||||||
case DB:
|
|
||||||
return i.(DB).Mapper
|
|
||||||
case *DB:
|
|
||||||
return i.(*DB).Mapper
|
|
||||||
case Tx:
|
|
||||||
return i.(Tx).Mapper
|
|
||||||
case *Tx:
|
|
||||||
return i.(*Tx).Mapper
|
|
||||||
default:
|
|
||||||
return mapper()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var _scannerInterface = reflect.TypeOf((*sql.Scanner)(nil)).Elem()
|
|
||||||
var _valuerInterface = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
|
|
||||||
|
|
||||||
// Row is a reimplementation of sql.Row in order to gain access to the underlying
|
|
||||||
// sql.Rows.Columns() data, necessary for StructScan.
|
|
||||||
type Row struct {
|
|
||||||
err error
|
|
||||||
unsafe bool
|
|
||||||
rows *sql.Rows
|
|
||||||
Mapper *reflectx.Mapper
|
|
||||||
}
|
|
||||||
|
|
||||||
// Scan is a fixed implementation of sql.Row.Scan, which does not discard the
|
|
||||||
// underlying error from the internal rows object if it exists.
|
|
||||||
func (r *Row) Scan(dest ...interface{}) error {
|
|
||||||
if r.err != nil {
|
|
||||||
return r.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(bradfitz): for now we need to defensively clone all
|
|
||||||
// []byte that the driver returned (not permitting
|
|
||||||
// *RawBytes in Rows.Scan), since we're about to close
|
|
||||||
// the Rows in our defer, when we return from this function.
|
|
||||||
// the contract with the driver.Next(...) interface is that it
|
|
||||||
// can return slices into read-only temporary memory that's
|
|
||||||
// only valid until the next Scan/Close. But the TODO is that
|
|
||||||
// for a lot of drivers, this copy will be unnecessary. We
|
|
||||||
// should provide an optional interface for drivers to
|
|
||||||
// implement to say, "don't worry, the []bytes that I return
|
|
||||||
// from Next will not be modified again." (for instance, if
|
|
||||||
// they were obtained from the network anyway) But for now we
|
|
||||||
// don't care.
|
|
||||||
defer r.rows.Close()
|
|
||||||
for _, dp := range dest {
|
|
||||||
if _, ok := dp.(*sql.RawBytes); ok {
|
|
||||||
return errors.New("sql: RawBytes isn't allowed on Row.Scan")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !r.rows.Next() {
|
|
||||||
if err := r.rows.Err(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return sql.ErrNoRows
|
|
||||||
}
|
|
||||||
err := r.rows.Scan(dest...)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// Make sure the query can be processed to completion with no errors.
|
|
||||||
if err := r.rows.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Columns returns the underlying sql.Rows.Columns(), or the deferred error usually
|
|
||||||
// returned by Row.Scan()
|
|
||||||
func (r *Row) Columns() ([]string, error) {
|
|
||||||
if r.err != nil {
|
|
||||||
return []string{}, r.err
|
|
||||||
}
|
|
||||||
return r.rows.Columns()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Err returns the error encountered while scanning.
|
|
||||||
func (r *Row) Err() error {
|
|
||||||
return r.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// DB is a wrapper around sql.DB which keeps track of the driverName upon Open,
|
|
||||||
// used mostly to automatically bind named queries using the right bindvars.
|
|
||||||
type DB struct {
|
|
||||||
*sql.DB
|
|
||||||
driverName string
|
|
||||||
unsafe bool
|
|
||||||
Mapper *reflectx.Mapper
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewDb returns a new sqlx DB wrapper for a pre-existing *sql.DB. The
|
|
||||||
// driverName of the original database is required for named query support.
|
|
||||||
func NewDb(db *sql.DB, driverName string) *DB {
|
|
||||||
return &DB{DB: db, driverName: driverName, Mapper: mapper()}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DriverName returns the driverName passed to the Open function for this DB.
|
|
||||||
func (db *DB) DriverName() string {
|
|
||||||
return db.driverName
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open is the same as sql.Open, but returns an *sqlx.DB instead.
|
|
||||||
func Open(driverName, dataSourceName string) (*DB, error) {
|
|
||||||
db, err := sql.Open(driverName, dataSourceName)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &DB{DB: db, driverName: driverName, Mapper: mapper()}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustOpen is the same as sql.Open, but returns an *sqlx.DB instead and panics on error.
|
|
||||||
func MustOpen(driverName, dataSourceName string) *DB {
|
|
||||||
db, err := Open(driverName, dataSourceName)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return db
|
|
||||||
}
|
|
||||||
|
|
||||||
// MapperFunc sets a new mapper for this db using the default sqlx struct tag
|
|
||||||
// and the provided mapper function.
|
|
||||||
func (db *DB) MapperFunc(mf func(string) string) {
|
|
||||||
db.Mapper = reflectx.NewMapperFunc("db", mf)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rebind transforms a query from QUESTION to the DB driver's bindvar type.
|
|
||||||
func (db *DB) Rebind(query string) string {
|
|
||||||
return Rebind(BindType(db.driverName), query)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unsafe returns a version of DB which will silently succeed to scan when
|
|
||||||
// columns in the SQL result have no fields in the destination struct.
|
|
||||||
// sqlx.Stmt and sqlx.Tx which are created from this DB will inherit its
|
|
||||||
// safety behavior.
|
|
||||||
func (db *DB) Unsafe() *DB {
|
|
||||||
return &DB{DB: db.DB, driverName: db.driverName, unsafe: true, Mapper: db.Mapper}
|
|
||||||
}
|
|
||||||
|
|
||||||
// BindNamed binds a query using the DB driver's bindvar type.
|
|
||||||
func (db *DB) BindNamed(query string, arg interface{}) (string, []interface{}, error) {
|
|
||||||
return BindNamed(BindType(db.driverName), query, arg)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NamedQuery using this DB.
|
|
||||||
func (db *DB) NamedQuery(query string, arg interface{}) (*Rows, error) {
|
|
||||||
return NamedQuery(db, query, arg)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NamedExec using this DB.
|
|
||||||
func (db *DB) NamedExec(query string, arg interface{}) (sql.Result, error) {
|
|
||||||
return NamedExec(db, query, arg)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Select using this DB.
|
|
||||||
func (db *DB) Select(dest interface{}, query string, args ...interface{}) error {
|
|
||||||
return Select(db, dest, query, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get using this DB.
|
|
||||||
func (db *DB) Get(dest interface{}, query string, args ...interface{}) error {
|
|
||||||
return Get(db, dest, query, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustBegin starts a transaction, and panics on error. Returns an *sqlx.Tx instead
|
|
||||||
// of an *sql.Tx.
|
|
||||||
func (db *DB) MustBegin() *Tx {
|
|
||||||
tx, err := db.Beginx()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return tx
|
|
||||||
}
|
|
||||||
|
|
||||||
// Beginx begins a transaction and returns an *sqlx.Tx instead of an *sql.Tx.
|
|
||||||
func (db *DB) Beginx() (*Tx, error) {
|
|
||||||
tx, err := db.DB.Begin()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &Tx{Tx: tx, driverName: db.driverName, unsafe: db.unsafe, Mapper: db.Mapper}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Queryx queries the database and returns an *sqlx.Rows.
|
|
||||||
func (db *DB) Queryx(query string, args ...interface{}) (*Rows, error) {
|
|
||||||
r, err := db.DB.Query(query, args...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &Rows{Rows: r, unsafe: db.unsafe, Mapper: db.Mapper}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// QueryRowx queries the database and returns an *sqlx.Row.
|
|
||||||
func (db *DB) QueryRowx(query string, args ...interface{}) *Row {
|
|
||||||
rows, err := db.DB.Query(query, args...)
|
|
||||||
return &Row{rows: rows, err: err, unsafe: db.unsafe, Mapper: db.Mapper}
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustExec (panic) runs MustExec using this database.
|
|
||||||
func (db *DB) MustExec(query string, args ...interface{}) sql.Result {
|
|
||||||
return MustExec(db, query, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Preparex returns an sqlx.Stmt instead of a sql.Stmt
|
|
||||||
func (db *DB) Preparex(query string) (*Stmt, error) {
|
|
||||||
return Preparex(db, query)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrepareNamed returns an sqlx.NamedStmt
|
|
||||||
func (db *DB) PrepareNamed(query string) (*NamedStmt, error) {
|
|
||||||
return prepareNamed(db, query)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tx is an sqlx wrapper around sql.Tx with extra functionality
|
|
||||||
type Tx struct {
|
|
||||||
*sql.Tx
|
|
||||||
driverName string
|
|
||||||
unsafe bool
|
|
||||||
Mapper *reflectx.Mapper
|
|
||||||
}
|
|
||||||
|
|
||||||
// DriverName returns the driverName used by the DB which began this transaction.
|
|
||||||
func (tx *Tx) DriverName() string {
|
|
||||||
return tx.driverName
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rebind a query within a transaction's bindvar type.
|
|
||||||
func (tx *Tx) Rebind(query string) string {
|
|
||||||
return Rebind(BindType(tx.driverName), query)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unsafe returns a version of Tx which will silently succeed to scan when
|
|
||||||
// columns in the SQL result have no fields in the destination struct.
|
|
||||||
func (tx *Tx) Unsafe() *Tx {
|
|
||||||
return &Tx{Tx: tx.Tx, driverName: tx.driverName, unsafe: true, Mapper: tx.Mapper}
|
|
||||||
}
|
|
||||||
|
|
||||||
// BindNamed binds a query within a transaction's bindvar type.
|
|
||||||
func (tx *Tx) BindNamed(query string, arg interface{}) (string, []interface{}, error) {
|
|
||||||
return BindNamed(BindType(tx.driverName), query, arg)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NamedQuery within a transaction.
|
|
||||||
func (tx *Tx) NamedQuery(query string, arg interface{}) (*Rows, error) {
|
|
||||||
return NamedQuery(tx, query, arg)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NamedExec a named query within a transaction.
|
|
||||||
func (tx *Tx) NamedExec(query string, arg interface{}) (sql.Result, error) {
|
|
||||||
return NamedExec(tx, query, arg)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Select within a transaction.
|
|
||||||
func (tx *Tx) Select(dest interface{}, query string, args ...interface{}) error {
|
|
||||||
return Select(tx, dest, query, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Queryx within a transaction.
|
|
||||||
func (tx *Tx) Queryx(query string, args ...interface{}) (*Rows, error) {
|
|
||||||
r, err := tx.Tx.Query(query, args...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &Rows{Rows: r, unsafe: tx.unsafe, Mapper: tx.Mapper}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// QueryRowx within a transaction.
|
|
||||||
func (tx *Tx) QueryRowx(query string, args ...interface{}) *Row {
|
|
||||||
rows, err := tx.Tx.Query(query, args...)
|
|
||||||
return &Row{rows: rows, err: err, unsafe: tx.unsafe, Mapper: tx.Mapper}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get within a transaction.
|
|
||||||
func (tx *Tx) Get(dest interface{}, query string, args ...interface{}) error {
|
|
||||||
return Get(tx, dest, query, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustExec runs MustExec within a transaction.
|
|
||||||
func (tx *Tx) MustExec(query string, args ...interface{}) sql.Result {
|
|
||||||
return MustExec(tx, query, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Preparex a statement within a transaction.
|
|
||||||
func (tx *Tx) Preparex(query string) (*Stmt, error) {
|
|
||||||
return Preparex(tx, query)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stmtx returns a version of the prepared statement which runs within a transaction. Provided
|
|
||||||
// stmt can be either *sql.Stmt or *sqlx.Stmt.
|
|
||||||
func (tx *Tx) Stmtx(stmt interface{}) *Stmt {
|
|
||||||
var st sql.Stmt
|
|
||||||
var s *sql.Stmt
|
|
||||||
switch stmt.(type) {
|
|
||||||
case sql.Stmt:
|
|
||||||
st = stmt.(sql.Stmt)
|
|
||||||
s = &st
|
|
||||||
case Stmt:
|
|
||||||
s = stmt.(Stmt).Stmt
|
|
||||||
case *Stmt:
|
|
||||||
s = stmt.(*Stmt).Stmt
|
|
||||||
case *sql.Stmt:
|
|
||||||
s = stmt.(*sql.Stmt)
|
|
||||||
}
|
|
||||||
return &Stmt{Stmt: tx.Stmt(s), Mapper: tx.Mapper}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NamedStmt returns a version of the prepared statement which runs within a transaction.
|
|
||||||
func (tx *Tx) NamedStmt(stmt *NamedStmt) *NamedStmt {
|
|
||||||
return &NamedStmt{
|
|
||||||
QueryString: stmt.QueryString,
|
|
||||||
Params: stmt.Params,
|
|
||||||
Stmt: tx.Stmtx(stmt.Stmt),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrepareNamed returns an sqlx.NamedStmt
|
|
||||||
func (tx *Tx) PrepareNamed(query string) (*NamedStmt, error) {
|
|
||||||
return prepareNamed(tx, query)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stmt is an sqlx wrapper around sql.Stmt with extra functionality
|
|
||||||
type Stmt struct {
|
|
||||||
*sql.Stmt
|
|
||||||
unsafe bool
|
|
||||||
Mapper *reflectx.Mapper
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unsafe returns a version of Stmt which will silently succeed to scan when
|
|
||||||
// columns in the SQL result have no fields in the destination struct.
|
|
||||||
func (s *Stmt) Unsafe() *Stmt {
|
|
||||||
return &Stmt{Stmt: s.Stmt, unsafe: true, Mapper: s.Mapper}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Select using the prepared statement.
|
|
||||||
func (s *Stmt) Select(dest interface{}, args ...interface{}) error {
|
|
||||||
return Select(&qStmt{*s}, dest, "", args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get using the prepared statement.
|
|
||||||
func (s *Stmt) Get(dest interface{}, args ...interface{}) error {
|
|
||||||
return Get(&qStmt{*s}, dest, "", args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustExec (panic) using this statement. Note that the query portion of the error
|
|
||||||
// output will be blank, as Stmt does not expose its query.
|
|
||||||
func (s *Stmt) MustExec(args ...interface{}) sql.Result {
|
|
||||||
return MustExec(&qStmt{*s}, "", args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// QueryRowx using this statement.
|
|
||||||
func (s *Stmt) QueryRowx(args ...interface{}) *Row {
|
|
||||||
qs := &qStmt{*s}
|
|
||||||
return qs.QueryRowx("", args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Queryx using this statement.
|
|
||||||
func (s *Stmt) Queryx(args ...interface{}) (*Rows, error) {
|
|
||||||
qs := &qStmt{*s}
|
|
||||||
return qs.Queryx("", args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// qStmt is an unexposed wrapper which lets you use a Stmt as a Queryer & Execer by
|
|
||||||
// implementing those interfaces and ignoring the `query` argument.
|
|
||||||
type qStmt struct{ Stmt }
|
|
||||||
|
|
||||||
func (q *qStmt) Query(query string, args ...interface{}) (*sql.Rows, error) {
|
|
||||||
return q.Stmt.Query(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *qStmt) Queryx(query string, args ...interface{}) (*Rows, error) {
|
|
||||||
r, err := q.Stmt.Query(args...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &Rows{Rows: r, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *qStmt) QueryRowx(query string, args ...interface{}) *Row {
|
|
||||||
rows, err := q.Stmt.Query(args...)
|
|
||||||
return &Row{rows: rows, err: err, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *qStmt) Exec(query string, args ...interface{}) (sql.Result, error) {
|
|
||||||
return q.Stmt.Exec(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rows is a wrapper around sql.Rows which caches costly reflect operations
|
|
||||||
// during a looped StructScan
|
|
||||||
type Rows struct {
|
|
||||||
*sql.Rows
|
|
||||||
unsafe bool
|
|
||||||
Mapper *reflectx.Mapper
|
|
||||||
// these fields cache memory use for a rows during iteration w/ structScan
|
|
||||||
started bool
|
|
||||||
fields [][]int
|
|
||||||
values []interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SliceScan using this Rows.
|
|
||||||
func (r *Rows) SliceScan() ([]interface{}, error) {
|
|
||||||
return SliceScan(r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MapScan using this Rows.
|
|
||||||
func (r *Rows) MapScan(dest map[string]interface{}) error {
|
|
||||||
return MapScan(r, dest)
|
|
||||||
}
|
|
||||||
|
|
||||||
// StructScan is like sql.Rows.Scan, but scans a single Row into a single Struct.
|
|
||||||
// Use this and iterate over Rows manually when the memory load of Select() might be
|
|
||||||
// prohibitive. *Rows.StructScan caches the reflect work of matching up column
|
|
||||||
// positions to fields to avoid that overhead per scan, which means it is not safe
|
|
||||||
// to run StructScan on the same Rows instance with different struct types.
|
|
||||||
func (r *Rows) StructScan(dest interface{}) error {
|
|
||||||
v := reflect.ValueOf(dest)
|
|
||||||
|
|
||||||
if v.Kind() != reflect.Ptr {
|
|
||||||
return errors.New("must pass a pointer, not a value, to StructScan destination")
|
|
||||||
}
|
|
||||||
|
|
||||||
v = reflect.Indirect(v)
|
|
||||||
|
|
||||||
if !r.started {
|
|
||||||
columns, err := r.Columns()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
m := r.Mapper
|
|
||||||
|
|
||||||
r.fields = m.TraversalsByName(v.Type(), columns)
|
|
||||||
// if we are not unsafe and are missing fields, return an error
|
|
||||||
if f, err := missingFields(r.fields); err != nil && !r.unsafe {
|
|
||||||
return fmt.Errorf("missing destination name %s", columns[f])
|
|
||||||
}
|
|
||||||
r.values = make([]interface{}, len(columns))
|
|
||||||
r.started = true
|
|
||||||
}
|
|
||||||
|
|
||||||
err := fieldsByTraversal(v, r.fields, r.values, true)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// scan into the struct field pointers and append to our results
|
|
||||||
err = r.Scan(r.values...)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return r.Err()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Connect to a database and verify with a ping.
|
|
||||||
func Connect(driverName, dataSourceName string) (*DB, error) {
|
|
||||||
db, err := Open(driverName, dataSourceName)
|
|
||||||
if err != nil {
|
|
||||||
return db, err
|
|
||||||
}
|
|
||||||
err = db.Ping()
|
|
||||||
return db, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustConnect connects to a database and panics on error.
|
|
||||||
func MustConnect(driverName, dataSourceName string) *DB {
|
|
||||||
db, err := Connect(driverName, dataSourceName)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return db
|
|
||||||
}
|
|
||||||
|
|
||||||
// Preparex prepares a statement.
|
|
||||||
func Preparex(p Preparer, query string) (*Stmt, error) {
|
|
||||||
s, err := p.Prepare(query)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &Stmt{Stmt: s, unsafe: isUnsafe(p), Mapper: mapperFor(p)}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Select executes a query using the provided Queryer, and StructScans each row
|
|
||||||
// into dest, which must be a slice. If the slice elements are scannable, then
|
|
||||||
// the result set must have only one column. Otherwise, StructScan is used.
|
|
||||||
// The *sql.Rows are closed automatically.
|
|
||||||
func Select(q Queryer, dest interface{}, query string, args ...interface{}) error {
|
|
||||||
rows, err := q.Queryx(query, args...)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// if something happens here, we want to make sure the rows are Closed
|
|
||||||
defer rows.Close()
|
|
||||||
return scanAll(rows, dest, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get does a QueryRow using the provided Queryer, and scans the resulting row
|
|
||||||
// to dest. If dest is scannable, the result must only have one column. Otherwise,
|
|
||||||
// StructScan is used. Get will return sql.ErrNoRows like row.Scan would.
|
|
||||||
func Get(q Queryer, dest interface{}, query string, args ...interface{}) error {
|
|
||||||
r := q.QueryRowx(query, args...)
|
|
||||||
return r.scanAny(dest, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// LoadFile exec's every statement in a file (as a single call to Exec).
|
|
||||||
// LoadFile may return a nil *sql.Result if errors are encountered locating or
|
|
||||||
// reading the file at path. LoadFile reads the entire file into memory, so it
|
|
||||||
// is not suitable for loading large data dumps, but can be useful for initializing
|
|
||||||
// schemas or loading indexes.
|
|
||||||
//
|
|
||||||
// FIXME: this does not really work with multi-statement files for mattn/go-sqlite3
|
|
||||||
// or the go-mysql-driver/mysql drivers; pq seems to be an exception here. Detecting
|
|
||||||
// this by requiring something with DriverName() and then attempting to split the
|
|
||||||
// queries will be difficult to get right, and its current driver-specific behavior
|
|
||||||
// is deemed at least not complex in its incorrectness.
|
|
||||||
func LoadFile(e Execer, path string) (*sql.Result, error) {
|
|
||||||
realpath, err := filepath.Abs(path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
contents, err := ioutil.ReadFile(realpath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
res, err := e.Exec(string(contents))
|
|
||||||
return &res, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustExec execs the query using e and panics if there was an error.
|
|
||||||
func MustExec(e Execer, query string, args ...interface{}) sql.Result {
|
|
||||||
res, err := e.Exec(query, args...)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
// SliceScan using this Rows.
|
|
||||||
func (r *Row) SliceScan() ([]interface{}, error) {
|
|
||||||
return SliceScan(r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MapScan using this Rows.
|
|
||||||
func (r *Row) MapScan(dest map[string]interface{}) error {
|
|
||||||
return MapScan(r, dest)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Row) scanAny(dest interface{}, structOnly bool) error {
|
|
||||||
if r.err != nil {
|
|
||||||
return r.err
|
|
||||||
}
|
|
||||||
defer r.rows.Close()
|
|
||||||
|
|
||||||
v := reflect.ValueOf(dest)
|
|
||||||
if v.Kind() != reflect.Ptr {
|
|
||||||
return errors.New("must pass a pointer, not a value, to StructScan destination")
|
|
||||||
}
|
|
||||||
if v.IsNil() {
|
|
||||||
return errors.New("nil pointer passed to StructScan destination")
|
|
||||||
}
|
|
||||||
|
|
||||||
base := reflectx.Deref(v.Type())
|
|
||||||
scannable := isScannable(base)
|
|
||||||
|
|
||||||
if structOnly && scannable {
|
|
||||||
return structOnlyError(base)
|
|
||||||
}
|
|
||||||
|
|
||||||
columns, err := r.Columns()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if scannable && len(columns) > 1 {
|
|
||||||
return fmt.Errorf("scannable dest type %s with >1 columns (%d) in result", base.Kind(), len(columns))
|
|
||||||
}
|
|
||||||
|
|
||||||
if scannable {
|
|
||||||
return r.Scan(dest)
|
|
||||||
}
|
|
||||||
|
|
||||||
m := r.Mapper
|
|
||||||
|
|
||||||
fields := m.TraversalsByName(v.Type(), columns)
|
|
||||||
// if we are not unsafe and are missing fields, return an error
|
|
||||||
if f, err := missingFields(fields); err != nil && !r.unsafe {
|
|
||||||
return fmt.Errorf("missing destination name %s", columns[f])
|
|
||||||
}
|
|
||||||
values := make([]interface{}, len(columns))
|
|
||||||
|
|
||||||
err = fieldsByTraversal(v, fields, values, true)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// scan into the struct field pointers and append to our results
|
|
||||||
return r.Scan(values...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// StructScan a single Row into dest.
|
|
||||||
func (r *Row) StructScan(dest interface{}) error {
|
|
||||||
return r.scanAny(dest, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SliceScan a row, returning a []interface{} with values similar to MapScan.
|
|
||||||
// This function is primarly intended for use where the number of columns
|
|
||||||
// is not known. Because you can pass an []interface{} directly to Scan,
|
|
||||||
// it's recommended that you do that as it will not have to allocate new
|
|
||||||
// slices per row.
|
|
||||||
func SliceScan(r ColScanner) ([]interface{}, error) {
|
|
||||||
// ignore r.started, since we needn't use reflect for anything.
|
|
||||||
columns, err := r.Columns()
|
|
||||||
if err != nil {
|
|
||||||
return []interface{}{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
values := make([]interface{}, len(columns))
|
|
||||||
for i := range values {
|
|
||||||
values[i] = new(interface{})
|
|
||||||
}
|
|
||||||
|
|
||||||
err = r.Scan(values...)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return values, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := range columns {
|
|
||||||
values[i] = *(values[i].(*interface{}))
|
|
||||||
}
|
|
||||||
|
|
||||||
return values, r.Err()
|
|
||||||
}
|
|
||||||
|
|
||||||
// MapScan scans a single Row into the dest map[string]interface{}.
|
|
||||||
// Use this to get results for SQL that might not be under your control
|
|
||||||
// (for instance, if you're building an interface for an SQL server that
|
|
||||||
// executes SQL from input). Please do not use this as a primary interface!
|
|
||||||
// This will modify the map sent to it in place, so reuse the same map with
|
|
||||||
// care. Columns which occur more than once in the result will overwrite
|
|
||||||
// eachother!
|
|
||||||
func MapScan(r ColScanner, dest map[string]interface{}) error {
|
|
||||||
// ignore r.started, since we needn't use reflect for anything.
|
|
||||||
columns, err := r.Columns()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
values := make([]interface{}, len(columns))
|
|
||||||
for i := range values {
|
|
||||||
values[i] = new(interface{})
|
|
||||||
}
|
|
||||||
|
|
||||||
err = r.Scan(values...)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, column := range columns {
|
|
||||||
dest[column] = *(values[i].(*interface{}))
|
|
||||||
}
|
|
||||||
|
|
||||||
return r.Err()
|
|
||||||
}
|
|
||||||
|
|
||||||
type rowsi interface {
|
|
||||||
Close() error
|
|
||||||
Columns() ([]string, error)
|
|
||||||
Err() error
|
|
||||||
Next() bool
|
|
||||||
Scan(...interface{}) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// structOnlyError returns an error appropriate for type when a non-scannable
|
|
||||||
// struct is expected but something else is given
|
|
||||||
func structOnlyError(t reflect.Type) error {
|
|
||||||
isStruct := t.Kind() == reflect.Struct
|
|
||||||
isScanner := reflect.PtrTo(t).Implements(_scannerInterface)
|
|
||||||
if !isStruct {
|
|
||||||
return fmt.Errorf("expected %s but got %s", reflect.Struct, t.Kind())
|
|
||||||
}
|
|
||||||
if isScanner {
|
|
||||||
return fmt.Errorf("structscan expects a struct dest but the provided struct type %s implements scanner", t.Name())
|
|
||||||
}
|
|
||||||
return fmt.Errorf("expected a struct, but struct %s has no exported fields", t.Name())
|
|
||||||
}
|
|
||||||
|
|
||||||
// scanAll scans all rows into a destination, which must be a slice of any
|
|
||||||
// type. If the destination slice type is a Struct, then StructScan will be
|
|
||||||
// used on each row. If the destination is some other kind of base type, then
|
|
||||||
// each row must only have one column which can scan into that type. This
|
|
||||||
// allows you to do something like:
|
|
||||||
//
|
|
||||||
// rows, _ := db.Query("select id from people;")
|
|
||||||
// var ids []int
|
|
||||||
// scanAll(rows, &ids, false)
|
|
||||||
//
|
|
||||||
// and ids will be a list of the id results. I realize that this is a desirable
|
|
||||||
// interface to expose to users, but for now it will only be exposed via changes
|
|
||||||
// to `Get` and `Select`. The reason that this has been implemented like this is
|
|
||||||
// this is the only way to not duplicate reflect work in the new API while
|
|
||||||
// maintaining backwards compatibility.
|
|
||||||
func scanAll(rows rowsi, dest interface{}, structOnly bool) error {
|
|
||||||
var v, vp reflect.Value
|
|
||||||
|
|
||||||
value := reflect.ValueOf(dest)
|
|
||||||
|
|
||||||
// json.Unmarshal returns errors for these
|
|
||||||
if value.Kind() != reflect.Ptr {
|
|
||||||
return errors.New("must pass a pointer, not a value, to StructScan destination")
|
|
||||||
}
|
|
||||||
if value.IsNil() {
|
|
||||||
return errors.New("nil pointer passed to StructScan destination")
|
|
||||||
}
|
|
||||||
direct := reflect.Indirect(value)
|
|
||||||
|
|
||||||
slice, err := baseType(value.Type(), reflect.Slice)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
isPtr := slice.Elem().Kind() == reflect.Ptr
|
|
||||||
base := reflectx.Deref(slice.Elem())
|
|
||||||
scannable := isScannable(base)
|
|
||||||
|
|
||||||
if structOnly && scannable {
|
|
||||||
return structOnlyError(base)
|
|
||||||
}
|
|
||||||
|
|
||||||
columns, err := rows.Columns()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// if it's a base type make sure it only has 1 column; if not return an error
|
|
||||||
if scannable && len(columns) > 1 {
|
|
||||||
return fmt.Errorf("non-struct dest type %s with >1 columns (%d)", base.Kind(), len(columns))
|
|
||||||
}
|
|
||||||
|
|
||||||
if !scannable {
|
|
||||||
var values []interface{}
|
|
||||||
var m *reflectx.Mapper
|
|
||||||
|
|
||||||
switch rows.(type) {
|
|
||||||
case *Rows:
|
|
||||||
m = rows.(*Rows).Mapper
|
|
||||||
default:
|
|
||||||
m = mapper()
|
|
||||||
}
|
|
||||||
|
|
||||||
fields := m.TraversalsByName(base, columns)
|
|
||||||
// if we are not unsafe and are missing fields, return an error
|
|
||||||
if f, err := missingFields(fields); err != nil && !isUnsafe(rows) {
|
|
||||||
return fmt.Errorf("missing destination name %s", columns[f])
|
|
||||||
}
|
|
||||||
values = make([]interface{}, len(columns))
|
|
||||||
|
|
||||||
for rows.Next() {
|
|
||||||
// create a new struct type (which returns PtrTo) and indirect it
|
|
||||||
vp = reflect.New(base)
|
|
||||||
v = reflect.Indirect(vp)
|
|
||||||
|
|
||||||
err = fieldsByTraversal(v, fields, values, true)
|
|
||||||
|
|
||||||
// scan into the struct field pointers and append to our results
|
|
||||||
err = rows.Scan(values...)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if isPtr {
|
|
||||||
direct.Set(reflect.Append(direct, vp))
|
|
||||||
} else {
|
|
||||||
direct.Set(reflect.Append(direct, v))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
for rows.Next() {
|
|
||||||
vp = reflect.New(base)
|
|
||||||
err = rows.Scan(vp.Interface())
|
|
||||||
// append
|
|
||||||
if isPtr {
|
|
||||||
direct.Set(reflect.Append(direct, vp))
|
|
||||||
} else {
|
|
||||||
direct.Set(reflect.Append(direct, reflect.Indirect(vp)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return rows.Err()
|
|
||||||
}
|
|
||||||
|
|
||||||
// FIXME: StructScan was the very first bit of API in sqlx, and now unfortunately
|
|
||||||
// it doesn't really feel like it's named properly. There is an incongruency
|
|
||||||
// between this and the way that StructScan (which might better be ScanStruct
|
|
||||||
// anyway) works on a rows object.
|
|
||||||
|
|
||||||
// StructScan all rows from an sql.Rows or an sqlx.Rows into the dest slice.
|
|
||||||
// StructScan will scan in the entire rows result, so if you need do not want to
|
|
||||||
// allocate structs for the entire result, use Queryx and see sqlx.Rows.StructScan.
|
|
||||||
// If rows is sqlx.Rows, it will use its mapper, otherwise it will use the default.
|
|
||||||
func StructScan(rows rowsi, dest interface{}) error {
|
|
||||||
return scanAll(rows, dest, true)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// reflect helpers
|
|
||||||
|
|
||||||
func baseType(t reflect.Type, expected reflect.Kind) (reflect.Type, error) {
|
|
||||||
t = reflectx.Deref(t)
|
|
||||||
if t.Kind() != expected {
|
|
||||||
return nil, fmt.Errorf("expected %s but got %s", expected, t.Kind())
|
|
||||||
}
|
|
||||||
return t, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// fieldsByName fills a values interface with fields from the passed value based
|
|
||||||
// on the traversals in int. If ptrs is true, return addresses instead of values.
|
|
||||||
// We write this instead of using FieldsByName to save allocations and map lookups
|
|
||||||
// when iterating over many rows. Empty traversals will get an interface pointer.
|
|
||||||
// Because of the necessity of requesting ptrs or values, it's considered a bit too
|
|
||||||
// specialized for inclusion in reflectx itself.
|
|
||||||
func fieldsByTraversal(v reflect.Value, traversals [][]int, values []interface{}, ptrs bool) error {
|
|
||||||
v = reflect.Indirect(v)
|
|
||||||
if v.Kind() != reflect.Struct {
|
|
||||||
return errors.New("argument not a struct")
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, traversal := range traversals {
|
|
||||||
if len(traversal) == 0 {
|
|
||||||
values[i] = new(interface{})
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
f := reflectx.FieldByIndexes(v, traversal)
|
|
||||||
if ptrs {
|
|
||||||
values[i] = f.Addr().Interface()
|
|
||||||
} else {
|
|
||||||
values[i] = f.Interface()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func missingFields(transversals [][]int) (field int, err error) {
|
|
||||||
for i, t := range transversals {
|
|
||||||
if len(t) == 0 {
|
|
||||||
return i, errors.New("missing field")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
1311
vendor/github.com/jmoiron/sqlx/sqlx_test.go
generated
vendored
1311
vendor/github.com/jmoiron/sqlx/sqlx_test.go
generated
vendored
File diff suppressed because it is too large
Load Diff
5
vendor/github.com/jmoiron/sqlx/types/README.md
generated
vendored
5
vendor/github.com/jmoiron/sqlx/types/README.md
generated
vendored
@ -1,5 +0,0 @@
|
|||||||
# types
|
|
||||||
|
|
||||||
The types package provides some useful types which implement the `sql.Scanner`
|
|
||||||
and `driver.Valuer` interfaces, suitable for use as scan and value targets with
|
|
||||||
database/sql.
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user