Merge pull request #10 from github/initial-db-access

Initial db access
This commit is contained in:
Shlomi Noach 2016-04-04 12:29:35 +02:00
commit bba352922a
61 changed files with 9297 additions and 4 deletions

View File

@ -7,8 +7,45 @@ package base
import () import ()
type RowsEstimateMethod string
const (
TableStatusRowsEstimate RowsEstimateMethod = "TableStatusRowsEstimate"
ExplainRowsEstimate = "ExplainRowsEstimate"
CountRowsEstimate = "CountRowsEstimate"
)
type MigrationContext struct { type MigrationContext struct {
DatabaseName string DatabaseName string
OriginalTableName string OriginalTableName string
GhostTableName string GhostTableName string
AlterStatement string
TableEngine string
CountTableRows bool
RowsEstimate int64
UsedRowsEstimateMethod RowsEstimateMethod
ChunkSize int
OriginalBinlogFormat string
OriginalBinlogRowImage string
}
var context *MigrationContext
func init() {
context = newMigrationContext()
}
func newMigrationContext() *MigrationContext {
return &MigrationContext{
ChunkSize: 1000,
}
}
func GetMigrationContext() *MigrationContext {
return context
}
// RequiresBinlogFormatChange
func (this *MigrationContext) RequiresBinlogFormatChange() bool {
return this.OriginalBinlogFormat != "ROW"
} }

View File

@ -88,6 +88,10 @@ func (this *GoMySQLReader) ReadEntries(logFile string, startPos uint64, stopPos
} else { } else {
ev.Dump(os.Stdout) ev.Dump(os.Stdout)
} }
// TODO : convert to entries
// need to parse multi-row entries
// insert & delete are just one row per db orw
// update: where-row_>values-row, repeating
} }
} }
log.Debugf("done") log.Debugf("done")

View File

@ -18,7 +18,7 @@ func init() {
log.SetLevel(log.ERROR) log.SetLevel(log.ERROR)
} }
func TestRBRSample0(t *testing.T) { func __TestRBRSample0(t *testing.T) {
testFile, err := os.Open("testdata/rbr-sample-0.txt") testFile, err := os.Open("testdata/rbr-sample-0.txt")
test.S(t).ExpectNil(err) test.S(t).ExpectNil(err)
defer testFile.Close() defer testFile.Close()

View File

@ -10,7 +10,9 @@ import (
"fmt" "fmt"
"os" "os"
"github.com/github/gh-osc/go/base"
"github.com/github/gh-osc/go/binlog" "github.com/github/gh-osc/go/binlog"
"github.com/github/gh-osc/go/logic"
"github.com/github/gh-osc/go/mysql" "github.com/github/gh-osc/go/mysql"
"github.com/outbrain/golib/log" "github.com/outbrain/golib/log"
) )
@ -18,6 +20,7 @@ import (
// main is the application's entry point. It will either spawn a CLI or HTTP itnerfaces. // main is the application's entry point. It will either spawn a CLI or HTTP itnerfaces.
func main() { func main() {
var connectionConfig mysql.ConnectionConfig var connectionConfig mysql.ConnectionConfig
migrationContext := base.GetMigrationContext()
// mysqlBasedir := flag.String("mysql-basedir", "", "the --basedir config for MySQL (auto-detected if not given)") // mysqlBasedir := flag.String("mysql-basedir", "", "the --basedir config for MySQL (auto-detected if not given)")
// mysqlDatadir := flag.String("mysql-datadir", "", "the --datadir config for MySQL (auto-detected if not given)") // mysqlDatadir := flag.String("mysql-datadir", "", "the --datadir config for MySQL (auto-detected if not given)")
@ -29,6 +32,11 @@ func main() {
flag.StringVar(&connectionConfig.User, "user", "root", "MySQL user") flag.StringVar(&connectionConfig.User, "user", "root", "MySQL user")
flag.StringVar(&connectionConfig.Password, "password", "", "MySQL password") flag.StringVar(&connectionConfig.Password, "password", "", "MySQL password")
flag.StringVar(&migrationContext.DatabaseName, "database", "", "database name (mandatory)")
flag.StringVar(&migrationContext.OriginalTableName, "table", "", "table name (mandatory)")
flag.StringVar(&migrationContext.AlterStatement, "alter", "", "alter statement (mandatory)")
flag.BoolVar(&migrationContext.CountTableRows, "exact-rowcount", false, "actually count table rows as opposed to estimate them (results in more accurate progress estimation)")
quiet := flag.Bool("quiet", false, "quiet") quiet := flag.Bool("quiet", false, "quiet")
verbose := flag.Bool("verbose", false, "verbose") verbose := flag.Bool("verbose", false, "verbose")
debug := flag.Bool("debug", false, "debug mode (very verbose)") debug := flag.Bool("debug", false, "debug mode (very verbose)")
@ -56,6 +64,17 @@ func main() {
// Override!! // Override!!
log.SetLevel(log.ERROR) log.SetLevel(log.ERROR)
} }
if migrationContext.DatabaseName == "" {
log.Fatalf("--database must be provided and database name must not be empty")
}
if migrationContext.OriginalTableName == "" {
log.Fatalf("--table must be provided and table name must not be empty")
}
if migrationContext.AlterStatement == "" {
log.Fatalf("--alter must be provided and statement must not be empty")
}
log.Info("starting gh-osc") log.Info("starting gh-osc")
if *internalExperiment { if *internalExperiment {
@ -69,5 +88,12 @@ func main() {
log.Fatale(err) log.Fatale(err)
} }
binlogReader.ReadEntries(*binlogFile, 0, 0) binlogReader.ReadEntries(*binlogFile, 0, 0)
return
} }
migrator := logic.NewMigrator(&connectionConfig)
err := migrator.Migrate()
if err != nil {
log.Fatale(err)
}
log.Info("Done")
} }

325
go/logic/inspect.go Normal file
View File

@ -0,0 +1,325 @@
/*
Copyright 2016 GitHub Inc.
See https://github.com/github/gh-osc/blob/master/LICENSE
*/
package logic
import (
gosql "database/sql"
"fmt"
"strings"
"github.com/github/gh-osc/go/base"
"github.com/github/gh-osc/go/mysql"
"github.com/github/gh-osc/go/sql"
"github.com/outbrain/golib/log"
"github.com/outbrain/golib/sqlutils"
)
// Inspector reads data from the read-MySQL-server (typically a replica, but can be the master)
// It is used for gaining initial status and structure, and later also follow up on progress and changelog
type Inspector struct {
connectionConfig *mysql.ConnectionConfig
db *gosql.DB
migrationContext *base.MigrationContext
}
func NewInspector(connectionConfig *mysql.ConnectionConfig) *Inspector {
return &Inspector{
connectionConfig: connectionConfig,
migrationContext: base.GetMigrationContext(),
}
}
func (this *Inspector) InitDBConnections() (err error) {
inspectorUri := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s", this.connectionConfig.User, this.connectionConfig.Password, this.connectionConfig.Hostname, this.connectionConfig.Port, this.migrationContext.DatabaseName)
if this.db, _, err = sqlutils.GetDB(inspectorUri); err != nil {
return err
}
if err := this.validateConnection(); err != nil {
return err
}
if err := this.validateGrants(); err != nil {
return err
}
if err := this.validateBinlogs(); err != nil {
return err
}
if err := this.validateTable(); err != nil {
return err
}
if this.migrationContext.CountTableRows {
if err := this.countTableRows(); err != nil {
return err
}
} else {
if err := this.estimateTableRowsViaExplain(); err != nil {
return err
}
}
return nil
}
func (this *Inspector) InspectTables() (err error) {
uniqueKeys, err := this.getCandidateUniqueKeys(this.migrationContext.OriginalTableName)
if err != nil {
return err
}
if len(uniqueKeys) == 0 {
return fmt.Errorf("No PRIMARY nor UNIQUE key found in table! Bailing out")
}
return nil
}
// validateConnection issues a simple can-connect to MySQL
func (this *Inspector) validateConnection() error {
query := `select @@port`
var port int
if err := this.db.QueryRow(query).Scan(&port); err != nil {
return err
}
if port != this.connectionConfig.Port {
return fmt.Errorf("Unexpected database port reported: %+v", port)
}
log.Infof("connection validated on port %+v", port)
return nil
}
// validateGrants verifies the user by which we're executing has necessary grants
// to do its thang.
func (this *Inspector) validateGrants() error {
query := `show /* gh-osc */ grants for current_user()`
foundAll := false
foundSuper := false
foundReplicationSlave := false
foundDBAll := false
err := sqlutils.QueryRowsMap(this.db, query, func(rowMap sqlutils.RowMap) error {
for _, grantData := range rowMap {
grant := grantData.String
if strings.Contains(grant, `GRANT ALL PRIVILEGES ON *.*`) {
foundAll = true
}
if strings.Contains(grant, `SUPER`) && strings.Contains(grant, ` ON *.*`) {
foundSuper = true
}
if strings.Contains(grant, `REPLICATION SLAVE`) && strings.Contains(grant, ` ON *.*`) {
foundReplicationSlave = true
}
if strings.Contains(grant, fmt.Sprintf("GRANT ALL PRIVILEGES ON `%s`.*", this.migrationContext.DatabaseName)) {
foundDBAll = true
}
}
return nil
})
if err != nil {
return log.Errore(err)
}
if foundAll {
log.Infof("User has ALL privileges")
return nil
}
if foundSuper && foundReplicationSlave && foundDBAll {
log.Infof("User has SUPER, REPLICATION SLAVE privileges, and has ALL privileges on `%s`", this.migrationContext.DatabaseName)
return nil
}
return log.Errorf("User has insufficient privileges for migration.")
}
// validateConnection issues a simple can-connect to MySQL
func (this *Inspector) validateBinlogs() error {
query := `select @@global.log_bin, @@global.log_slave_updates, @@global.binlog_format`
var hasBinaryLogs, logSlaveUpdates bool
if err := this.db.QueryRow(query).Scan(&hasBinaryLogs, &logSlaveUpdates, &this.migrationContext.OriginalBinlogFormat); err != nil {
return err
}
if !hasBinaryLogs {
return fmt.Errorf("%s:%d must have binary logs enabled", this.connectionConfig.Hostname, this.connectionConfig.Port)
}
if !logSlaveUpdates {
return fmt.Errorf("%s:%d must have log_slave_updates enabled", this.connectionConfig.Hostname, this.connectionConfig.Port)
}
if this.migrationContext.RequiresBinlogFormatChange() {
query := fmt.Sprintf(`show /* gh-osc */ slave hosts`)
countReplicas := 0
err := sqlutils.QueryRowsMap(this.db, query, func(rowMap sqlutils.RowMap) error {
countReplicas++
return nil
})
if err != nil {
return log.Errore(err)
}
if countReplicas > 0 {
return fmt.Errorf("%s:%d has %s binlog_format, but I'm too scared to change it to ROW because it has replicas. Bailing out", this.connectionConfig.Hostname, this.connectionConfig.Port, this.migrationContext.OriginalBinlogFormat)
}
log.Infof("%s:%d has %s binlog_format. I will change it to ROW for the duration of this migration.", this.connectionConfig.Hostname, this.connectionConfig.Port, this.migrationContext.OriginalBinlogFormat)
}
query = `select @@global.binlog_row_image`
if err := this.db.QueryRow(query).Scan(&this.migrationContext.OriginalBinlogRowImage); err != nil {
// Only as of 5.6. We wish to support 5.5 as well
this.migrationContext.OriginalBinlogRowImage = ""
}
log.Infof("binary logs validated on %s:%d", this.connectionConfig.Hostname, this.connectionConfig.Port)
return nil
}
// validateTable makes sure the table we need to operate on actually exists
func (this *Inspector) validateTable() error {
query := fmt.Sprintf(`show /* gh-osc */ table status from %s like '%s'`, sql.EscapeName(this.migrationContext.DatabaseName), this.migrationContext.OriginalTableName)
tableFound := false
err := sqlutils.QueryRowsMap(this.db, query, func(rowMap sqlutils.RowMap) error {
this.migrationContext.TableEngine = rowMap.GetString("Engine")
this.migrationContext.RowsEstimate = rowMap.GetInt64("Rows")
this.migrationContext.UsedRowsEstimateMethod = base.TableStatusRowsEstimate
if rowMap.GetString("Comment") == "VIEW" {
return fmt.Errorf("%s.%s is a VIEW, not a real table. Bailing out", sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
}
tableFound = true
return nil
})
if err != nil {
return log.Errore(err)
}
if !tableFound {
return log.Errorf("Cannot find table %s.%s!", sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
}
log.Infof("Table found. Engine=%s", this.migrationContext.TableEngine)
log.Debugf("Estimated number of rows via STATUS: %d", this.migrationContext.RowsEstimate)
return nil
}
func (this *Inspector) estimateTableRowsViaExplain() error {
query := fmt.Sprintf(`explain select /* gh-osc */ * from %s.%s where 1=1`, sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
outputFound := false
err := sqlutils.QueryRowsMap(this.db, query, func(rowMap sqlutils.RowMap) error {
this.migrationContext.RowsEstimate = rowMap.GetInt64("rows")
this.migrationContext.UsedRowsEstimateMethod = base.ExplainRowsEstimate
outputFound = true
return nil
})
if err != nil {
return log.Errore(err)
}
if !outputFound {
return log.Errorf("Cannot run EXPLAIN on %s.%s!", sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
}
log.Infof("Estimated number of rows via EXPLAIN: %d", this.migrationContext.RowsEstimate)
return nil
}
func (this *Inspector) countTableRows() error {
log.Infof("As instructed, I'm issuing a SELECT COUNT(*) on the table. This may take a while")
query := fmt.Sprintf(`select /* gh-osc */ count(*) as rows from %s.%s`, sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
if err := this.db.QueryRow(query).Scan(&this.migrationContext.RowsEstimate); err != nil {
return err
}
this.migrationContext.UsedRowsEstimateMethod = base.CountRowsEstimate
log.Infof("Exact number of rows via COUNT: %d", this.migrationContext.RowsEstimate)
return nil
}
// getCandidateUniqueKeys investigates a table and returns the list of unique keys
// candidate for chunking
func (this *Inspector) getCandidateUniqueKeys(tableName string) (uniqueKeys [](*sql.UniqueKey), err error) {
query := `
SELECT
COLUMNS.TABLE_SCHEMA,
COLUMNS.TABLE_NAME,
COLUMNS.COLUMN_NAME,
UNIQUES.INDEX_NAME,
UNIQUES.COLUMN_NAMES,
UNIQUES.COUNT_COLUMN_IN_INDEX,
COLUMNS.DATA_TYPE,
COLUMNS.CHARACTER_SET_NAME,
has_nullable
FROM INFORMATION_SCHEMA.COLUMNS INNER JOIN (
SELECT
TABLE_SCHEMA,
TABLE_NAME,
INDEX_NAME,
COUNT(*) AS COUNT_COLUMN_IN_INDEX,
GROUP_CONCAT(COLUMN_NAME ORDER BY SEQ_IN_INDEX ASC) AS COLUMN_NAMES,
SUBSTRING_INDEX(GROUP_CONCAT(COLUMN_NAME ORDER BY SEQ_IN_INDEX ASC), ',', 1) AS FIRST_COLUMN_NAME,
SUM(NULLABLE='YES') > 0 AS has_nullable
FROM INFORMATION_SCHEMA.STATISTICS
WHERE NON_UNIQUE=0
GROUP BY TABLE_SCHEMA, TABLE_NAME, INDEX_NAME
) AS UNIQUES
ON (
COLUMNS.TABLE_SCHEMA = UNIQUES.TABLE_SCHEMA AND
COLUMNS.TABLE_NAME = UNIQUES.TABLE_NAME AND
COLUMNS.COLUMN_NAME = UNIQUES.FIRST_COLUMN_NAME
)
WHERE
COLUMNS.TABLE_SCHEMA = ?
AND COLUMNS.TABLE_NAME = ?
ORDER BY
COLUMNS.TABLE_SCHEMA, COLUMNS.TABLE_NAME,
CASE UNIQUES.INDEX_NAME
WHEN 'PRIMARY' THEN 0
ELSE 1
END,
CASE has_nullable
WHEN 0 THEN 0
ELSE 1
END,
CASE IFNULL(CHARACTER_SET_NAME, '')
WHEN '' THEN 0
ELSE 1
END,
CASE DATA_TYPE
WHEN 'tinyint' THEN 0
WHEN 'smallint' THEN 1
WHEN 'int' THEN 2
WHEN 'bigint' THEN 3
ELSE 100
END,
COUNT_COLUMN_IN_INDEX
`
err = sqlutils.QueryRowsMap(this.db, query, func(rowMap sqlutils.RowMap) error {
uniqueKey := &sql.UniqueKey{
Name: rowMap.GetString("INDEX_NAME"),
Columns: *sql.ParseColumnList(rowMap.GetString("COLUMN_NAMES")),
HasNullable: rowMap.GetBool("has_nullable"),
}
uniqueKeys = append(uniqueKeys, uniqueKey)
return nil
}, this.migrationContext.DatabaseName, tableName)
if err != nil {
return uniqueKeys, err
}
log.Debugf("Potential unique keys: %+v", uniqueKeys)
return uniqueKeys, nil
}
// getCandidateUniqueKeys investigates a table and returns the list of unique keys
// candidate for chunking
func (this *Inspector) getSharedUniqueKeys() (uniqueKeys [](*sql.UniqueKey), err error) {
originalUniqueKeys, err := this.getCandidateUniqueKeys(this.migrationContext.OriginalTableName)
if err != nil {
return uniqueKeys, err
}
ghostUniqueKeys, err := this.getCandidateUniqueKeys(this.migrationContext.GhostTableName)
if err != nil {
return uniqueKeys, err
}
// We actually do NOT rely on key name, just on the set of columns. This is because maybe
// the ALTER is on the name itself...
for _, originalUniqueKey := range originalUniqueKeys {
for _, ghostUniqueKey := range ghostUniqueKeys {
if originalUniqueKey.Columns.Equals(&ghostUniqueKey.Columns) {
uniqueKeys = append(uniqueKeys, originalUniqueKey)
}
}
}
return uniqueKeys, nil
}

33
go/logic/migrator.go Normal file
View File

@ -0,0 +1,33 @@
/*
Copyright 2016 GitHub Inc.
See https://github.com/github/gh-osc/blob/master/LICENSE
*/
package logic
import (
"github.com/github/gh-osc/go/mysql"
)
// Migrator is the main schema migration flow manager.
type Migrator struct {
connectionConfig *mysql.ConnectionConfig
inspector *Inspector
}
func NewMigrator(connectionConfig *mysql.ConnectionConfig) *Migrator {
return &Migrator{
connectionConfig: connectionConfig,
inspector: NewInspector(connectionConfig),
}
}
func (this *Migrator) Migrate() error {
if err := this.inspector.InitDBConnections(); err != nil {
return err
}
if err := this.inspector.InspectTables(); err != nil {
return err
}
return nil
}

View File

@ -113,10 +113,26 @@ func BuildRangeComparison(columns []string, values []string, comparisonSign Valu
return result, nil return result, nil
} }
func BuildRangePreparedComparison(columns []string, comparisonSign ValueComparisonSign) (result string, err error) {
values := make([]string, len(columns), len(columns))
for i := range columns {
values[i] = "?"
}
return BuildRangeComparison(columns, values, comparisonSign)
}
func BuildRangeInsertQuery(databaseName, originalTableName, ghostTableName string, sharedColumns []string, uniqueKey string, uniqueKeyColumns, rangeStartValues, rangeEndValues []string) (string, error) { func BuildRangeInsertQuery(databaseName, originalTableName, ghostTableName string, sharedColumns []string, uniqueKey string, uniqueKeyColumns, rangeStartValues, rangeEndValues []string) (string, error) {
if len(sharedColumns) == 0 { if len(sharedColumns) == 0 {
return "", fmt.Errorf("Got 0 shared columns in BuildRangeInsertQuery") return "", fmt.Errorf("Got 0 shared columns in BuildRangeInsertQuery")
} }
databaseName = EscapeName(databaseName)
originalTableName = EscapeName(originalTableName)
ghostTableName = EscapeName(ghostTableName)
for i := range sharedColumns {
sharedColumns[i] = EscapeName(sharedColumns[i])
}
uniqueKey = EscapeName(uniqueKey)
sharedColumnsListing := strings.Join(sharedColumns, ", ") sharedColumnsListing := strings.Join(sharedColumns, ", ")
rangeStartComparison, err := BuildRangeComparison(uniqueKeyColumns, rangeStartValues, GreaterThanOrEqualsComparisonSign) rangeStartComparison, err := BuildRangeComparison(uniqueKeyColumns, rangeStartValues, GreaterThanOrEqualsComparisonSign)
if err != nil { if err != nil {
@ -146,3 +162,49 @@ func BuildRangeInsertPreparedQuery(databaseName, originalTableName, ghostTableNa
} }
return BuildRangeInsertQuery(databaseName, originalTableName, ghostTableName, sharedColumns, uniqueKey, uniqueKeyColumns, rangeStartValues, rangeEndValues) return BuildRangeInsertQuery(databaseName, originalTableName, ghostTableName, sharedColumns, uniqueKey, uniqueKeyColumns, rangeStartValues, rangeEndValues)
} }
func BuildUniqueKeyRangeEndPreparedQuery(databaseName, originalTableName string, uniqueKeyColumns []string, chunkSize int) (string, error) {
if len(uniqueKeyColumns) == 0 {
return "", fmt.Errorf("Got 0 shared columns in BuildRangeInsertQuery")
}
databaseName = EscapeName(databaseName)
originalTableName = EscapeName(originalTableName)
rangeStartComparison, err := BuildRangePreparedComparison(uniqueKeyColumns, GreaterThanComparisonSign)
if err != nil {
return "", err
}
rangeEndComparison, err := BuildRangePreparedComparison(uniqueKeyColumns, LessThanOrEqualsComparisonSign)
if err != nil {
return "", err
}
uniqueKeyColumnAscending := make([]string, len(uniqueKeyColumns), len(uniqueKeyColumns))
uniqueKeyColumnDescending := make([]string, len(uniqueKeyColumns), len(uniqueKeyColumns))
for i := range uniqueKeyColumns {
uniqueKeyColumns[i] = EscapeName(uniqueKeyColumns[i])
uniqueKeyColumnAscending[i] = fmt.Sprintf("%s asc", uniqueKeyColumns[i])
uniqueKeyColumnDescending[i] = fmt.Sprintf("%s desc", uniqueKeyColumns[i])
}
query := fmt.Sprintf(`
select /* gh-osc %s.%s */ %s
from (
select
%s
from
%s.%s
where %s and %s
order by
%s
limit %d
) select_osc_chunk
order by
%s
limit 1
`, databaseName, originalTableName, strings.Join(uniqueKeyColumns, ", "),
strings.Join(uniqueKeyColumns, ", "), databaseName, originalTableName,
rangeStartComparison, rangeEndComparison,
strings.Join(uniqueKeyColumnAscending, ", "), chunkSize,
strings.Join(uniqueKeyColumnDescending, ", "),
)
return query, nil
}

View File

@ -176,3 +176,32 @@ func TestBuildRangeInsertPreparedQuery(t *testing.T) {
test.S(t).ExpectEquals(normalizeQuery(query), normalizeQuery(expected)) test.S(t).ExpectEquals(normalizeQuery(query), normalizeQuery(expected))
} }
} }
func TestBuildUniqueKeyRangeEndPreparedQuery(t *testing.T) {
databaseName := "mydb"
originalTableName := "tbl"
chunkSize := 500
{
uniqueKeyColumns := []string{"name", "position"}
query, err := BuildUniqueKeyRangeEndPreparedQuery(databaseName, originalTableName, uniqueKeyColumns, chunkSize)
test.S(t).ExpectNil(err)
expected := `
select /* gh-osc mydb.tbl */ name, position
from (
select
name, position
from
mydb.tbl
where ((name > ?) or (((name = ?)) AND (position > ?))) and ((name < ?) or (((name = ?)) AND (position < ?)) or ((name = ?) and (position = ?)))
order by
name asc, position asc
limit 500
) select_osc_chunk
order by
name desc, position desc
limit 1
`
test.S(t).ExpectEquals(normalizeQuery(query), normalizeQuery(expected))
}
}

45
go/sql/types.go Normal file
View File

@ -0,0 +1,45 @@
/*
Copyright 2016 GitHub Inc.
See https://github.com/github/gh-osc/blob/master/LICENSE
*/
package sql
import (
"fmt"
"reflect"
"strings"
)
// ColumnList makes for a named list of columns
type ColumnList []string
// ParseColumnList parses a comma delimited list of column names
func ParseColumnList(columns string) *ColumnList {
result := ColumnList(strings.Split(columns, ","))
return &result
}
func (this *ColumnList) String() string {
return strings.Join(*this, ",")
}
func (this *ColumnList) Equals(other *ColumnList) bool {
return reflect.DeepEqual(*this, *other)
}
// UniqueKey is the combination of a key's name and columns
type UniqueKey struct {
Name string
Columns ColumnList
HasNullable bool
}
// IsPrimary cehcks if this unique key is primary
func (this *UniqueKey) IsPrimary() bool {
return this.Name == "PRIMARY"
}
func (this *UniqueKey) String() string {
return fmt.Sprintf("%s: %s; has nullable: %+v", this.Name, this.Columns, this.HasNullable)
}

8
vendor/github.com/go-sql-driver/mysql/.gitignore generated vendored Normal file
View File

@ -0,0 +1,8 @@
.DS_Store
.DS_Store?
._*
.Spotlight-V100
.Trashes
Icon?
ehthumbs.db
Thumbs.db

10
vendor/github.com/go-sql-driver/mysql/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,10 @@
sudo: false
language: go
go:
- 1.2
- 1.3
- 1.4
- tip
before_script:
- mysql -e 'create database gotest;'

46
vendor/github.com/go-sql-driver/mysql/AUTHORS generated vendored Normal file
View File

@ -0,0 +1,46 @@
# This is the official list of Go-MySQL-Driver authors for copyright purposes.
# If you are submitting a patch, please add your name or the name of the
# organization which holds the copyright to this list in alphabetical order.
# Names should be added to this file as
# Name <email address>
# The email address is not required for organizations.
# Please keep the list sorted.
# Individual Persons
Aaron Hopkins <go-sql-driver at die.net>
Arne Hormann <arnehormann at gmail.com>
Carlos Nieto <jose.carlos at menteslibres.net>
Chris Moos <chris at tech9computers.com>
DisposaBoy <disposaboy at dby.me>
Frederick Mayle <frederickmayle at gmail.com>
Gustavo Kristic <gkristic at gmail.com>
Hanno Braun <mail at hannobraun.com>
Henri Yandell <flamefew at gmail.com>
Hirotaka Yamamoto <ymmt2005 at gmail.com>
INADA Naoki <songofacandy at gmail.com>
James Harr <james.harr at gmail.com>
Jian Zhen <zhenjl at gmail.com>
Joshua Prunier <joshua.prunier at gmail.com>
Julien Schmidt <go-sql-driver at julienschmidt.com>
Kamil Dziedzic <kamil at klecza.pl>
Leonardo YongUk Kim <dalinaum at gmail.com>
Lucas Liu <extrafliu at gmail.com>
Luke Scott <luke at webconnex.com>
Michael Woolnough <michael.woolnough at gmail.com>
Nicola Peduzzi <thenikso at gmail.com>
Runrioter Wung <runrioter at gmail.com>
Soroush Pour <me at soroushjp.com>
Stan Putrya <root.vagner at gmail.com>
Xiaobing Jiang <s7v7nislands at gmail.com>
Xiuming Chen <cc at cxm.cc>
Julien Lefevre <julien.lefevr at gmail.com>
# Organizations
Barracuda Networks, Inc.
Google Inc.
Stripe Inc.

92
vendor/github.com/go-sql-driver/mysql/CHANGELOG.md generated vendored Normal file
View File

@ -0,0 +1,92 @@
## HEAD
Changes:
- Go 1.1 is no longer supported
- Use decimals field from MySQL to format time types (#249)
- Buffer optimizations (#269)
- TLS ServerName defaults to the host (#283)
Bugfixes:
- Enable microsecond resolution on TIME, DATETIME and TIMESTAMP (#249)
- Fixed handling of queries without columns and rows (#255)
- Fixed a panic when SetKeepAlive() failed (#298)
New Features:
- Support for returning table alias on Columns() (#289)
- Placeholder interpolation, can be actived with the DSN parameter `interpolateParams=true` (#309, #318)
## Version 1.2 (2014-06-03)
Changes:
- We switched back to a "rolling release". `go get` installs the current master branch again
- Version v1 of the driver will not be maintained anymore. Go 1.0 is no longer supported by this driver
- Exported errors to allow easy checking from application code
- Enabled TCP Keepalives on TCP connections
- Optimized INFILE handling (better buffer size calculation, lazy init, ...)
- The DSN parser also checks for a missing separating slash
- Faster binary date / datetime to string formatting
- Also exported the MySQLWarning type
- mysqlConn.Close returns the first error encountered instead of ignoring all errors
- writePacket() automatically writes the packet size to the header
- readPacket() uses an iterative approach instead of the recursive approach to merge splitted packets
New Features:
- `RegisterDial` allows the usage of a custom dial function to establish the network connection
- Setting the connection collation is possible with the `collation` DSN parameter. This parameter should be preferred over the `charset` parameter
- Logging of critical errors is configurable with `SetLogger`
- Google CloudSQL support
Bugfixes:
- Allow more than 32 parameters in prepared statements
- Various old_password fixes
- Fixed TestConcurrent test to pass Go's race detection
- Fixed appendLengthEncodedInteger for large numbers
- Renamed readLengthEnodedString to readLengthEncodedString and skipLengthEnodedString to skipLengthEncodedString (fixed typo)
## Version 1.1 (2013-11-02)
Changes:
- Go-MySQL-Driver now requires Go 1.1
- Connections now use the collation `utf8_general_ci` by default. Adding `&charset=UTF8` to the DSN should not be necessary anymore
- Made closing rows and connections error tolerant. This allows for example deferring rows.Close() without checking for errors
- `[]byte(nil)` is now treated as a NULL value. Before, it was treated like an empty string / `[]byte("")`
- DSN parameter values must now be url.QueryEscape'ed. This allows text values to contain special characters, such as '&'.
- Use the IO buffer also for writing. This results in zero allocations (by the driver) for most queries
- Optimized the buffer for reading
- stmt.Query now caches column metadata
- New Logo
- Changed the copyright header to include all contributors
- Improved the LOAD INFILE documentation
- The driver struct is now exported to make the driver directly accessible
- Refactored the driver tests
- Added more benchmarks and moved all to a separate file
- Other small refactoring
New Features:
- Added *old_passwords* support: Required in some cases, but must be enabled by adding `allowOldPasswords=true` to the DSN since it is insecure
- Added a `clientFoundRows` parameter: Return the number of matching rows instead of the number of rows changed on UPDATEs
- Added TLS/SSL support: Use a TLS/SSL encrypted connection to the server. Custom TLS configs can be registered and used
Bugfixes:
- Fixed MySQL 4.1 support: MySQL 4.1 sends packets with lengths which differ from the specification
- Convert to DB timezone when inserting `time.Time`
- Splitted packets (more than 16MB) are now merged correctly
- Fixed false positive `io.EOF` errors when the data was fully read
- Avoid panics on reuse of closed connections
- Fixed empty string producing false nil values
- Fixed sign byte for positive TIME fields
## Version 1.0 (2013-05-14)
Initial Release

40
vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md generated vendored Normal file
View File

@ -0,0 +1,40 @@
# Contributing Guidelines
## Reporting Issues
Before creating a new Issue, please check first if a similar Issue [already exists](https://github.com/go-sql-driver/mysql/issues?state=open) or was [recently closed](https://github.com/go-sql-driver/mysql/issues?direction=desc&page=1&sort=updated&state=closed).
Please provide the following minimum information:
* Your Go-MySQL-Driver version (or git SHA)
* Your Go version (run `go version` in your console)
* A detailed issue description
* Error Log if present
* If possible, a short example
## Contributing Code
By contributing to this project, you share your code under the Mozilla Public License 2, as specified in the LICENSE file.
Don't forget to add yourself to the AUTHORS file.
### Pull Requests Checklist
Please check the following points before submitting your pull request:
- [x] Code compiles correctly
- [x] Created tests, if possible
- [x] All tests pass
- [x] Extended the README / documentation, if necessary
- [x] Added yourself to the AUTHORS file
### Code Review
Everyone is invited to review and comment on pull requests.
If it looks fine to you, comment with "LGTM" (Looks good to me).
If changes are required, notice the reviewers with "PTAL" (Please take another look) after committing the fixes.
Before merging the Pull Request, at least one [team member](https://github.com/go-sql-driver?tab=members) must have commented with "LGTM".
## Development Ideas
If you are looking for ideas for code contributions, please check our [Development Ideas](https://github.com/go-sql-driver/mysql/wiki/Development-Ideas) Wiki page.

373
vendor/github.com/go-sql-driver/mysql/LICENSE generated vendored Normal file
View File

@ -0,0 +1,373 @@
Mozilla Public License Version 2.0
==================================
1. Definitions
--------------
1.1. "Contributor"
means each individual or legal entity that creates, contributes to
the creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used
by a Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached
the notice in Exhibit A, the Executable Form of such Source Code
Form, and Modifications of such Source Code Form, in each case
including portions thereof.
1.5. "Incompatible With Secondary Licenses"
means
(a) that the initial Contributor has attached the notice described
in Exhibit B to the Covered Software; or
(b) that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the
terms of a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in
a separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible,
whether at the time of the initial grant or subsequently, any and
all of the rights conveyed by this License.
1.10. "Modifications"
means any of the following:
(a) any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered
Software; or
(b) any new file in Source Code Form that contains any Covered
Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the
License, by the making, using, selling, offering for sale, having
made, import, or transfer of either its Contributions or its
Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU
Lesser General Public License, Version 2.1, the GNU Affero General
Public License, Version 3.0, or any later versions of those
licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that
controls, is controlled by, or is under common control with You. For
purposes of this definition, "control" means (a) the power, direct
or indirect, to cause the direction or management of such entity,
whether by contract or otherwise, or (b) ownership of more than
fifty percent (50%) of the outstanding shares or beneficial
ownership of such entity.
2. License Grants and Conditions
--------------------------------
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
(a) under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
(b) under Patent Claims of such Contributor to make, use, sell, offer
for sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
(a) for any code that a Contributor has removed from Covered Software;
or
(b) for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
(c) under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights
to grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
in Section 2.1.
3. Responsibilities
-------------------
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
(a) such Covered Software must also be made available in Source Code
Form, as described in Section 3.1, and You must inform recipients of
the Executable Form how they can obtain a copy of such Source Code
Form by reasonable means in a timely manner, at a charge no more
than the cost of distribution to the recipient; and
(b) You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter
the recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty,
or limitations of liability) contained within the Source Code Form of
the Covered Software, except that You may alter any license notices to
the extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
---------------------------------------------------
If it is impossible for You to comply with any of the terms of this
License with respect to some or all of the Covered Software due to
statute, judicial order, or regulation then You must: (a) comply with
the terms of this License to the maximum extent possible; and (b)
describe the limitations and the code they affect. Such description must
be placed in a text file included with all distributions of the Covered
Software under this License. Except to the extent prohibited by statute
or regulation, such description must be sufficiently detailed for a
recipient of ordinary skill to be able to understand it.
5. Termination
--------------
5.1. The rights granted under this License will terminate automatically
if You fail to comply with any of its terms. However, if You become
compliant, then the rights granted under this License from a particular
Contributor are reinstated (a) provisionally, unless and until such
Contributor explicitly and finally terminates Your grants, and (b) on an
ongoing basis, if such Contributor fails to notify You of the
non-compliance by some reasonable means prior to 60 days after You have
come back into compliance. Moreover, Your grants from a particular
Contributor are reinstated on an ongoing basis if such Contributor
notifies You of the non-compliance by some reasonable means, this is the
first time You have received notice of non-compliance with this License
from such Contributor, and You become compliant prior to 30 days after
Your receipt of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all
end user license agreements (excluding distributors and resellers) which
have been validly granted by You or Your distributors under this License
prior to termination shall survive termination.
************************************************************************
* *
* 6. Disclaimer of Warranty *
* ------------------------- *
* *
* Covered Software is provided under this License on an "as is" *
* basis, without warranty of any kind, either expressed, implied, or *
* statutory, including, without limitation, warranties that the *
* Covered Software is free of defects, merchantable, fit for a *
* particular purpose or non-infringing. The entire risk as to the *
* quality and performance of the Covered Software is with You. *
* Should any Covered Software prove defective in any respect, You *
* (not any Contributor) assume the cost of any necessary servicing, *
* repair, or correction. This disclaimer of warranty constitutes an *
* essential part of this License. No use of any Covered Software is *
* authorized under this License except under this disclaimer. *
* *
************************************************************************
************************************************************************
* *
* 7. Limitation of Liability *
* -------------------------- *
* *
* Under no circumstances and under no legal theory, whether tort *
* (including negligence), contract, or otherwise, shall any *
* Contributor, or anyone who distributes Covered Software as *
* permitted above, be liable to You for any direct, indirect, *
* special, incidental, or consequential damages of any character *
* including, without limitation, damages for lost profits, loss of *
* goodwill, work stoppage, computer failure or malfunction, or any *
* and all other commercial damages or losses, even if such party *
* shall have been informed of the possibility of such damages. This *
* limitation of liability shall not apply to liability for death or *
* personal injury resulting from such party's negligence to the *
* extent applicable law prohibits such limitation. Some *
* jurisdictions do not allow the exclusion or limitation of *
* incidental or consequential damages, so this exclusion and *
* limitation may not apply to You. *
* *
************************************************************************
8. Litigation
-------------
Any litigation relating to this License may be brought only in the
courts of a jurisdiction where the defendant maintains its principal
place of business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions.
Nothing in this Section shall prevent a party's ability to bring
cross-claims or counter-claims.
9. Miscellaneous
----------------
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides
that the language of a contract shall be construed against the drafter
shall not be used to construe this License against a Contributor.
10. Versions of the License
---------------------------
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses
If You choose to distribute Source Code Form that is Incompatible With
Secondary Licenses under the terms of this version of the License, the
notice described in Exhibit B of this License must be attached.
Exhibit A - Source Code Form License Notice
-------------------------------------------
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular
file, then You may include the notice in a location (such as a LICENSE
file in a relevant directory) where a recipient would be likely to look
for such a notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
---------------------------------------------------------
This Source Code Form is "Incompatible With Secondary Licenses", as
defined by the Mozilla Public License, v. 2.0.

386
vendor/github.com/go-sql-driver/mysql/README.md generated vendored Normal file
View File

@ -0,0 +1,386 @@
# Go-MySQL-Driver
A MySQL-Driver for Go's [database/sql](http://golang.org/pkg/database/sql) package
![Go-MySQL-Driver logo](https://raw.github.com/wiki/go-sql-driver/mysql/gomysql_m.png "Golang Gopher holding the MySQL Dolphin")
**Latest stable Release:** [Version 1.2 (June 03, 2014)](https://github.com/go-sql-driver/mysql/releases)
[![Build Status](https://travis-ci.org/go-sql-driver/mysql.png?branch=master)](https://travis-ci.org/go-sql-driver/mysql)
---------------------------------------
* [Features](#features)
* [Requirements](#requirements)
* [Installation](#installation)
* [Usage](#usage)
* [DSN (Data Source Name)](#dsn-data-source-name)
* [Password](#password)
* [Protocol](#protocol)
* [Address](#address)
* [Parameters](#parameters)
* [Examples](#examples)
* [LOAD DATA LOCAL INFILE support](#load-data-local-infile-support)
* [time.Time support](#timetime-support)
* [Unicode support](#unicode-support)
* [Testing / Development](#testing--development)
* [License](#license)
---------------------------------------
## Features
* Lightweight and [fast](https://github.com/go-sql-driver/sql-benchmark "golang MySQL-Driver performance")
* Native Go implementation. No C-bindings, just pure Go
* Connections over TCP/IPv4, TCP/IPv6, Unix domain sockets or [custom protocols](http://godoc.org/github.com/go-sql-driver/mysql#DialFunc)
* Automatic handling of broken connections
* Automatic Connection Pooling *(by database/sql package)*
* Supports queries larger than 16MB
* Full [`sql.RawBytes`](http://golang.org/pkg/database/sql/#RawBytes) support.
* Intelligent `LONG DATA` handling in prepared statements
* Secure `LOAD DATA LOCAL INFILE` support with file Whitelisting and `io.Reader` support
* Optional `time.Time` parsing
* Optional placeholder interpolation
## Requirements
* Go 1.2 or higher
* MySQL (4.1+), MariaDB, Percona Server, Google CloudSQL or Sphinx (2.2.3+)
---------------------------------------
## Installation
Simple install the package to your [$GOPATH](http://code.google.com/p/go-wiki/wiki/GOPATH "GOPATH") with the [go tool](http://golang.org/cmd/go/ "go command") from shell:
```bash
$ go get github.com/go-sql-driver/mysql
```
Make sure [Git is installed](http://git-scm.com/downloads) on your machine and in your system's `PATH`.
## Usage
_Go MySQL Driver_ is an implementation of Go's `database/sql/driver` interface. You only need to import the driver and can use the full [`database/sql`](http://golang.org/pkg/database/sql) API then.
Use `mysql` as `driverName` and a valid [DSN](#dsn-data-source-name) as `dataSourceName`:
```go
import "database/sql"
import _ "github.com/go-sql-driver/mysql"
db, err := sql.Open("mysql", "user:password@/dbname")
```
[Examples are available in our Wiki](https://github.com/go-sql-driver/mysql/wiki/Examples "Go-MySQL-Driver Examples").
### DSN (Data Source Name)
The Data Source Name has a common format, like e.g. [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php) uses it, but without type-prefix (optional parts marked by squared brackets):
```
[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...&paramN=valueN]
```
A DSN in its fullest form:
```
username:password@protocol(address)/dbname?param=value
```
Except for the databasename, all values are optional. So the minimal DSN is:
```
/dbname
```
If you do not want to preselect a database, leave `dbname` empty:
```
/
```
This has the same effect as an empty DSN string:
```
```
#### Password
Passwords can consist of any character. Escaping is **not** necessary.
#### Protocol
See [net.Dial](http://golang.org/pkg/net/#Dial) for more information which networks are available.
In general you should use an Unix domain socket if available and TCP otherwise for best performance.
#### Address
For TCP and UDP networks, addresses have the form `host:port`.
If `host` is a literal IPv6 address, it must be enclosed in square brackets.
The functions [net.JoinHostPort](http://golang.org/pkg/net/#JoinHostPort) and [net.SplitHostPort](http://golang.org/pkg/net/#SplitHostPort) manipulate addresses in this form.
For Unix domain sockets the address is the absolute path to the MySQL-Server-socket, e.g. `/var/run/mysqld/mysqld.sock` or `/tmp/mysql.sock`.
#### Parameters
*Parameters are case-sensitive!*
Notice that any of `true`, `TRUE`, `True` or `1` is accepted to stand for a true boolean value. Not surprisingly, false can be specified as any of: `false`, `FALSE`, `False` or `0`.
##### `allowAllFiles`
```
Type: bool
Valid Values: true, false
Default: false
```
`allowAllFiles=true` disables the file Whitelist for `LOAD DATA LOCAL INFILE` and allows *all* files.
[*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)
##### `allowCleartextPasswords`
```
Type: bool
Valid Values: true, false
Default: false
```
`allowCleartextPasswords=true` allows using the [cleartext client side plugin](http://dev.mysql.com/doc/en/cleartext-authentication-plugin.html) if required by an account, such as one defined with the [PAM authentication plugin](http://dev.mysql.com/doc/en/pam-authentication-plugin.html). Sending passwords in clear text may be a security problem in some configurations. To avoid problems if there is any possibility that the password would be intercepted, clients should connect to MySQL Server using a method that protects the password. Possibilities include [TLS / SSL](#tls), IPsec, or a private network.
##### `allowOldPasswords`
```
Type: bool
Valid Values: true, false
Default: false
```
`allowOldPasswords=true` allows the usage of the insecure old password method. This should be avoided, but is necessary in some cases. See also [the old_passwords wiki page](https://github.com/go-sql-driver/mysql/wiki/old_passwords).
##### `charset`
```
Type: string
Valid Values: <name>
Default: none
```
Sets the charset used for client-server interaction (`"SET NAMES <value>"`). If multiple charsets are set (separated by a comma), the following charset is used if setting the charset failes. This enables for example support for `utf8mb4` ([introduced in MySQL 5.5.3](http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html)) with fallback to `utf8` for older servers (`charset=utf8mb4,utf8`).
Usage of the `charset` parameter is discouraged because it issues additional queries to the server.
Unless you need the fallback behavior, please use `collation` instead.
##### `collation`
```
Type: string
Valid Values: <name>
Default: utf8_general_ci
```
Sets the collation used for client-server interaction on connection. In contrast to `charset`, `collation` does not issue additional queries. If the specified collation is unavailable on the target server, the connection will fail.
A list of valid charsets for a server is retrievable with `SHOW COLLATION`.
##### `clientFoundRows`
```
Type: bool
Valid Values: true, false
Default: false
```
`clientFoundRows=true` causes an UPDATE to return the number of matching rows instead of the number of rows changed.
##### `columnsWithAlias`
```
Type: bool
Valid Values: true, false
Default: false
```
When `columnsWithAlias` is true, calls to `sql.Rows.Columns()` will return the table alias and the column name separated by a dot. For example:
```
SELECT u.id FROM users as u
```
will return `u.id` instead of just `id` if `columnsWithAlias=true`.
##### `interpolateParams`
```
Type: bool
Valid Values: true, false
Default: false
```
If `interpolateParams` is true, placeholders (`?`) in calls to `db.Query()` and `db.Exec()` are interpolated into a single query string with given parameters. This reduces the number of roundtrips, since the driver has to prepare a statement, execute it with given parameters and close the statement again with `interpolateParams=false`.
*This can not be used together with the multibyte encodings BIG5, CP932, GB2312, GBK or SJIS. These are blacklisted as they may [introduce a SQL injection vulnerability](http://stackoverflow.com/a/12118602/3430118)!*
##### `loc`
```
Type: string
Valid Values: <escaped name>
Default: UTC
```
Sets the location for time.Time values (when using `parseTime=true`). *"Local"* sets the system's location. See [time.LoadLocation](http://golang.org/pkg/time/#LoadLocation) for details.
Note that this sets the location for time.Time values but does not change MySQL's [time_zone setting](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html). For that see the [time_zone system variable](#system-variables), which can also be set as a DSN parameter.
Please keep in mind, that param values must be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`.
##### `parseTime`
```
Type: bool
Valid Values: true, false
Default: false
```
`parseTime=true` changes the output type of `DATE` and `DATETIME` values to `time.Time` instead of `[]byte` / `string`
##### `strict`
```
Type: bool
Valid Values: true, false
Default: false
```
`strict=true` enables the strict mode in which MySQL warnings are treated as errors.
By default MySQL also treats notes as warnings. Use [`sql_notes=false`](http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_sql_notes) to ignore notes. See the [examples](#examples) for an DSN example.
##### `timeout`
```
Type: decimal number
Default: OS default
```
*Driver* side connection timeout. The value must be a string of decimal numbers, each with optional fraction and a unit suffix ( *"ms"*, *"s"*, *"m"*, *"h"* ), such as *"30s"*, *"0.5m"* or *"1m30s"*. To set a server side timeout, use the parameter [`wait_timeout`](http://dev.mysql.com/doc/refman/5.6/en/server-system-variables.html#sysvar_wait_timeout).
##### `tls`
```
Type: bool / string
Valid Values: true, false, skip-verify, <name>
Default: false
```
`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side). Use a custom value registered with [`mysql.RegisterTLSConfig`](http://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig).
##### System Variables
All other parameters are interpreted as system variables:
* `autocommit`: `"SET autocommit=<value>"`
* [`time_zone`](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html): `"SET time_zone=<value>"`
* [`tx_isolation`](https://dev.mysql.com/doc/refman/5.5/en/server-system-variables.html#sysvar_tx_isolation): `"SET tx_isolation=<value>"`
* `param`: `"SET <param>=<value>"`
*The values must be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed!*
#### Examples
```
user@unix(/path/to/socket)/dbname
```
```
root:pw@unix(/tmp/mysql.sock)/myDatabase?loc=Local
```
```
user:password@tcp(localhost:5555)/dbname?tls=skip-verify&autocommit=true
```
Use the [strict mode](#strict) but ignore notes:
```
user:password@/dbname?strict=true&sql_notes=false
```
TCP via IPv6:
```
user:password@tcp([de:ad:be:ef::ca:fe]:80)/dbname?timeout=90s&collation=utf8mb4_unicode_ci
```
TCP on a remote host, e.g. Amazon RDS:
```
id:password@tcp(your-amazonaws-uri.com:3306)/dbname
```
Google Cloud SQL on App Engine:
```
user@cloudsql(project-id:instance-name)/dbname
```
TCP using default port (3306) on localhost:
```
user:password@tcp/dbname?charset=utf8mb4,utf8&sys_var=esc%40ped
```
Use the default protocol (tcp) and host (localhost:3306):
```
user:password@/dbname
```
No Database preselected:
```
user:password@/
```
### `LOAD DATA LOCAL INFILE` support
For this feature you need direct access to the package. Therefore you must change the import path (no `_`):
```go
import "github.com/go-sql-driver/mysql"
```
Files must be whitelisted by registering them with `mysql.RegisterLocalFile(filepath)` (recommended) or the Whitelist check must be deactivated by using the DSN parameter `allowAllFiles=true` ([*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)).
To use a `io.Reader` a handler function must be registered with `mysql.RegisterReaderHandler(name, handler)` which returns a `io.Reader` or `io.ReadCloser`. The Reader is available with the filepath `Reader::<name>` then. Choose different names for different handlers and `DeregisterReaderHandler` when you don't need it anymore.
See the [godoc of Go-MySQL-Driver](http://godoc.org/github.com/go-sql-driver/mysql "golang mysql driver documentation") for details.
### `time.Time` support
The default internal output type of MySQL `DATE` and `DATETIME` values is `[]byte` which allows you to scan the value into a `[]byte`, `string` or `sql.RawBytes` variable in your programm.
However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` variables, which is the logical opposite in Go to `DATE` and `DATETIME` in MySQL. You can do that by changing the internal output type from `[]byte` to `time.Time` with the DSN parameter `parseTime=true`. You can set the default [`time.Time` location](http://golang.org/pkg/time/#Location) with the `loc` DSN parameter.
**Caution:** As of Go 1.1, this makes `time.Time` the only variable type you can scan `DATE` and `DATETIME` values into. This breaks for example [`sql.RawBytes` support](https://github.com/go-sql-driver/mysql/wiki/Examples#rawbytes).
Alternatively you can use the [`NullTime`](http://godoc.org/github.com/go-sql-driver/mysql#NullTime) type as the scan destination, which works with both `time.Time` and `string` / `[]byte`.
### Unicode support
Since version 1.1 Go-MySQL-Driver automatically uses the collation `utf8_general_ci` by default.
Other collations / charsets can be set using the [`collation`](#collation) DSN parameter.
Version 1.0 of the driver recommended adding `&charset=utf8` (alias for `SET NAMES utf8`) to the DSN to enable proper UTF-8 support. This is not necessary anymore. The [`collation`](#collation) parameter should be preferred to set another collation / charset than the default.
See http://dev.mysql.com/doc/refman/5.7/en/charset-unicode.html for more details on MySQL's Unicode support.
## Testing / Development
To run the driver tests you may need to adjust the configuration. See the [Testing Wiki-Page](https://github.com/go-sql-driver/mysql/wiki/Testing "Testing") for details.
Go-MySQL-Driver is not feature-complete yet. Your help is very appreciated.
If you want to contribute, you can work on an [open issue](https://github.com/go-sql-driver/mysql/issues?state=open) or review a [pull request](https://github.com/go-sql-driver/mysql/pulls).
See the [Contribution Guidelines](https://github.com/go-sql-driver/mysql/blob/master/CONTRIBUTING.md) for details.
---------------------------------------
## License
Go-MySQL-Driver is licensed under the [Mozilla Public License Version 2.0](https://raw.github.com/go-sql-driver/mysql/master/LICENSE)
Mozilla summarizes the license scope as follows:
> MPL: The copyleft applies to any files containing MPLed code.
That means:
* You can **use** the **unchanged** source code both in private and commercially
* When distributing, you **must publish** the source code of any **changed files** licensed under the MPL 2.0 under a) the MPL 2.0 itself or b) a compatible license (e.g. GPL 3.0 or Apache License 2.0)
* You **needn't publish** the source code of your library as long as the files licensed under the MPL 2.0 are **unchanged**
Please read the [MPL 2.0 FAQ](http://www.mozilla.org/MPL/2.0/FAQ.html) if you have further questions regarding the license.
You can read the full terms here: [LICENSE](https://raw.github.com/go-sql-driver/mysql/master/LICENSE)
![Go Gopher and MySQL Dolphin](https://raw.github.com/wiki/go-sql-driver/mysql/go-mysql-driver_m.jpg "Golang Gopher transporting the MySQL Dolphin in a wheelbarrow")

19
vendor/github.com/go-sql-driver/mysql/appengine.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
//
// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
// +build appengine
package mysql
import (
"appengine/cloudsql"
)
func init() {
RegisterDial("cloudsql", cloudsql.Dial)
}

136
vendor/github.com/go-sql-driver/mysql/buffer.go generated vendored Normal file
View File

@ -0,0 +1,136 @@
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
//
// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
package mysql
import "io"
const defaultBufSize = 4096
// A buffer which is used for both reading and writing.
// This is possible since communication on each connection is synchronous.
// In other words, we can't write and read simultaneously on the same connection.
// The buffer is similar to bufio.Reader / Writer but zero-copy-ish
// Also highly optimized for this particular use case.
type buffer struct {
buf []byte
rd io.Reader
idx int
length int
}
func newBuffer(rd io.Reader) buffer {
var b [defaultBufSize]byte
return buffer{
buf: b[:],
rd: rd,
}
}
// fill reads into the buffer until at least _need_ bytes are in it
func (b *buffer) fill(need int) error {
n := b.length
// move existing data to the beginning
if n > 0 && b.idx > 0 {
copy(b.buf[0:n], b.buf[b.idx:])
}
// grow buffer if necessary
// TODO: let the buffer shrink again at some point
// Maybe keep the org buf slice and swap back?
if need > len(b.buf) {
// Round up to the next multiple of the default size
newBuf := make([]byte, ((need/defaultBufSize)+1)*defaultBufSize)
copy(newBuf, b.buf)
b.buf = newBuf
}
b.idx = 0
for {
nn, err := b.rd.Read(b.buf[n:])
n += nn
switch err {
case nil:
if n < need {
continue
}
b.length = n
return nil
case io.EOF:
if n >= need {
b.length = n
return nil
}
return io.ErrUnexpectedEOF
default:
return err
}
}
}
// returns next N bytes from buffer.
// The returned slice is only guaranteed to be valid until the next read
func (b *buffer) readNext(need int) ([]byte, error) {
if b.length < need {
// refill
if err := b.fill(need); err != nil {
return nil, err
}
}
offset := b.idx
b.idx += need
b.length -= need
return b.buf[offset:b.idx], nil
}
// returns a buffer with the requested size.
// If possible, a slice from the existing buffer is returned.
// Otherwise a bigger buffer is made.
// Only one buffer (total) can be used at a time.
func (b *buffer) takeBuffer(length int) []byte {
if b.length > 0 {
return nil
}
// test (cheap) general case first
if length <= defaultBufSize || length <= cap(b.buf) {
return b.buf[:length]
}
if length < maxPacketSize {
b.buf = make([]byte, length)
return b.buf
}
return make([]byte, length)
}
// shortcut which can be used if the requested buffer is guaranteed to be
// smaller than defaultBufSize
// Only one buffer (total) can be used at a time.
func (b *buffer) takeSmallBuffer(length int) []byte {
if b.length == 0 {
return b.buf[:length]
}
return nil
}
// takeCompleteBuffer returns the complete existing buffer.
// This can be used if the necessary buffer size is unknown.
// Only one buffer (total) can be used at a time.
func (b *buffer) takeCompleteBuffer() []byte {
if b.length == 0 {
return b.buf
}
return nil
}

250
vendor/github.com/go-sql-driver/mysql/collations.go generated vendored Normal file
View File

@ -0,0 +1,250 @@
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
//
// Copyright 2014 The Go-MySQL-Driver Authors. All rights reserved.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
package mysql
const defaultCollation byte = 33 // utf8_general_ci
// A list of available collations mapped to the internal ID.
// To update this map use the following MySQL query:
// SELECT COLLATION_NAME, ID FROM information_schema.COLLATIONS
var collations = map[string]byte{
"big5_chinese_ci": 1,
"latin2_czech_cs": 2,
"dec8_swedish_ci": 3,
"cp850_general_ci": 4,
"latin1_german1_ci": 5,
"hp8_english_ci": 6,
"koi8r_general_ci": 7,
"latin1_swedish_ci": 8,
"latin2_general_ci": 9,
"swe7_swedish_ci": 10,
"ascii_general_ci": 11,
"ujis_japanese_ci": 12,
"sjis_japanese_ci": 13,
"cp1251_bulgarian_ci": 14,
"latin1_danish_ci": 15,
"hebrew_general_ci": 16,
"tis620_thai_ci": 18,
"euckr_korean_ci": 19,
"latin7_estonian_cs": 20,
"latin2_hungarian_ci": 21,
"koi8u_general_ci": 22,
"cp1251_ukrainian_ci": 23,
"gb2312_chinese_ci": 24,
"greek_general_ci": 25,
"cp1250_general_ci": 26,
"latin2_croatian_ci": 27,
"gbk_chinese_ci": 28,
"cp1257_lithuanian_ci": 29,
"latin5_turkish_ci": 30,
"latin1_german2_ci": 31,
"armscii8_general_ci": 32,
"utf8_general_ci": 33,
"cp1250_czech_cs": 34,
"ucs2_general_ci": 35,
"cp866_general_ci": 36,
"keybcs2_general_ci": 37,
"macce_general_ci": 38,
"macroman_general_ci": 39,
"cp852_general_ci": 40,
"latin7_general_ci": 41,
"latin7_general_cs": 42,
"macce_bin": 43,
"cp1250_croatian_ci": 44,
"utf8mb4_general_ci": 45,
"utf8mb4_bin": 46,
"latin1_bin": 47,
"latin1_general_ci": 48,
"latin1_general_cs": 49,
"cp1251_bin": 50,
"cp1251_general_ci": 51,
"cp1251_general_cs": 52,
"macroman_bin": 53,
"utf16_general_ci": 54,
"utf16_bin": 55,
"utf16le_general_ci": 56,
"cp1256_general_ci": 57,
"cp1257_bin": 58,
"cp1257_general_ci": 59,
"utf32_general_ci": 60,
"utf32_bin": 61,
"utf16le_bin": 62,
"binary": 63,
"armscii8_bin": 64,
"ascii_bin": 65,
"cp1250_bin": 66,
"cp1256_bin": 67,
"cp866_bin": 68,
"dec8_bin": 69,
"greek_bin": 70,
"hebrew_bin": 71,
"hp8_bin": 72,
"keybcs2_bin": 73,
"koi8r_bin": 74,
"koi8u_bin": 75,
"latin2_bin": 77,
"latin5_bin": 78,
"latin7_bin": 79,
"cp850_bin": 80,
"cp852_bin": 81,
"swe7_bin": 82,
"utf8_bin": 83,
"big5_bin": 84,
"euckr_bin": 85,
"gb2312_bin": 86,
"gbk_bin": 87,
"sjis_bin": 88,
"tis620_bin": 89,
"ucs2_bin": 90,
"ujis_bin": 91,
"geostd8_general_ci": 92,
"geostd8_bin": 93,
"latin1_spanish_ci": 94,
"cp932_japanese_ci": 95,
"cp932_bin": 96,
"eucjpms_japanese_ci": 97,
"eucjpms_bin": 98,
"cp1250_polish_ci": 99,
"utf16_unicode_ci": 101,
"utf16_icelandic_ci": 102,
"utf16_latvian_ci": 103,
"utf16_romanian_ci": 104,
"utf16_slovenian_ci": 105,
"utf16_polish_ci": 106,
"utf16_estonian_ci": 107,
"utf16_spanish_ci": 108,
"utf16_swedish_ci": 109,
"utf16_turkish_ci": 110,
"utf16_czech_ci": 111,
"utf16_danish_ci": 112,
"utf16_lithuanian_ci": 113,
"utf16_slovak_ci": 114,
"utf16_spanish2_ci": 115,
"utf16_roman_ci": 116,
"utf16_persian_ci": 117,
"utf16_esperanto_ci": 118,
"utf16_hungarian_ci": 119,
"utf16_sinhala_ci": 120,
"utf16_german2_ci": 121,
"utf16_croatian_ci": 122,
"utf16_unicode_520_ci": 123,
"utf16_vietnamese_ci": 124,
"ucs2_unicode_ci": 128,
"ucs2_icelandic_ci": 129,
"ucs2_latvian_ci": 130,
"ucs2_romanian_ci": 131,
"ucs2_slovenian_ci": 132,
"ucs2_polish_ci": 133,
"ucs2_estonian_ci": 134,
"ucs2_spanish_ci": 135,
"ucs2_swedish_ci": 136,
"ucs2_turkish_ci": 137,
"ucs2_czech_ci": 138,
"ucs2_danish_ci": 139,
"ucs2_lithuanian_ci": 140,
"ucs2_slovak_ci": 141,
"ucs2_spanish2_ci": 142,
"ucs2_roman_ci": 143,
"ucs2_persian_ci": 144,
"ucs2_esperanto_ci": 145,
"ucs2_hungarian_ci": 146,
"ucs2_sinhala_ci": 147,
"ucs2_german2_ci": 148,
"ucs2_croatian_ci": 149,
"ucs2_unicode_520_ci": 150,
"ucs2_vietnamese_ci": 151,
"ucs2_general_mysql500_ci": 159,
"utf32_unicode_ci": 160,
"utf32_icelandic_ci": 161,
"utf32_latvian_ci": 162,
"utf32_romanian_ci": 163,
"utf32_slovenian_ci": 164,
"utf32_polish_ci": 165,
"utf32_estonian_ci": 166,
"utf32_spanish_ci": 167,
"utf32_swedish_ci": 168,
"utf32_turkish_ci": 169,
"utf32_czech_ci": 170,
"utf32_danish_ci": 171,
"utf32_lithuanian_ci": 172,
"utf32_slovak_ci": 173,
"utf32_spanish2_ci": 174,
"utf32_roman_ci": 175,
"utf32_persian_ci": 176,
"utf32_esperanto_ci": 177,
"utf32_hungarian_ci": 178,
"utf32_sinhala_ci": 179,
"utf32_german2_ci": 180,
"utf32_croatian_ci": 181,
"utf32_unicode_520_ci": 182,
"utf32_vietnamese_ci": 183,
"utf8_unicode_ci": 192,
"utf8_icelandic_ci": 193,
"utf8_latvian_ci": 194,
"utf8_romanian_ci": 195,
"utf8_slovenian_ci": 196,
"utf8_polish_ci": 197,
"utf8_estonian_ci": 198,
"utf8_spanish_ci": 199,
"utf8_swedish_ci": 200,
"utf8_turkish_ci": 201,
"utf8_czech_ci": 202,
"utf8_danish_ci": 203,
"utf8_lithuanian_ci": 204,
"utf8_slovak_ci": 205,
"utf8_spanish2_ci": 206,
"utf8_roman_ci": 207,
"utf8_persian_ci": 208,
"utf8_esperanto_ci": 209,
"utf8_hungarian_ci": 210,
"utf8_sinhala_ci": 211,
"utf8_german2_ci": 212,
"utf8_croatian_ci": 213,
"utf8_unicode_520_ci": 214,
"utf8_vietnamese_ci": 215,
"utf8_general_mysql500_ci": 223,
"utf8mb4_unicode_ci": 224,
"utf8mb4_icelandic_ci": 225,
"utf8mb4_latvian_ci": 226,
"utf8mb4_romanian_ci": 227,
"utf8mb4_slovenian_ci": 228,
"utf8mb4_polish_ci": 229,
"utf8mb4_estonian_ci": 230,
"utf8mb4_spanish_ci": 231,
"utf8mb4_swedish_ci": 232,
"utf8mb4_turkish_ci": 233,
"utf8mb4_czech_ci": 234,
"utf8mb4_danish_ci": 235,
"utf8mb4_lithuanian_ci": 236,
"utf8mb4_slovak_ci": 237,
"utf8mb4_spanish2_ci": 238,
"utf8mb4_roman_ci": 239,
"utf8mb4_persian_ci": 240,
"utf8mb4_esperanto_ci": 241,
"utf8mb4_hungarian_ci": 242,
"utf8mb4_sinhala_ci": 243,
"utf8mb4_german2_ci": 244,
"utf8mb4_croatian_ci": 245,
"utf8mb4_unicode_520_ci": 246,
"utf8mb4_vietnamese_ci": 247,
}
// A blacklist of collations which is unsafe to interpolate parameters.
// These multibyte encodings may contains 0x5c (`\`) in their trailing bytes.
var unsafeCollations = map[byte]bool{
1: true, // big5_chinese_ci
13: true, // sjis_japanese_ci
28: true, // gbk_chinese_ci
84: true, // big5_bin
86: true, // gb2312_bin
87: true, // gbk_bin
88: true, // sjis_bin
95: true, // cp932_japanese_ci
96: true, // cp932_bin
}

403
vendor/github.com/go-sql-driver/mysql/connection.go generated vendored Normal file
View File

@ -0,0 +1,403 @@
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
//
// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
package mysql
import (
"crypto/tls"
"database/sql/driver"
"errors"
"net"
"strconv"
"strings"
"time"
)
type mysqlConn struct {
buf buffer
netConn net.Conn
affectedRows uint64
insertId uint64
cfg *config
maxPacketAllowed int
maxWriteSize int
flags clientFlag
status statusFlag
sequence uint8
parseTime bool
strict bool
}
type config struct {
user string
passwd string
net string
addr string
dbname string
params map[string]string
loc *time.Location
tls *tls.Config
timeout time.Duration
collation uint8
allowAllFiles bool
allowOldPasswords bool
allowCleartextPasswords bool
clientFoundRows bool
columnsWithAlias bool
interpolateParams bool
}
// Handles parameters set in DSN after the connection is established
func (mc *mysqlConn) handleParams() (err error) {
for param, val := range mc.cfg.params {
switch param {
// Charset
case "charset":
charsets := strings.Split(val, ",")
for i := range charsets {
// ignore errors here - a charset may not exist
err = mc.exec("SET NAMES " + charsets[i])
if err == nil {
break
}
}
if err != nil {
return
}
// time.Time parsing
case "parseTime":
var isBool bool
mc.parseTime, isBool = readBool(val)
if !isBool {
return errors.New("Invalid Bool value: " + val)
}
// Strict mode
case "strict":
var isBool bool
mc.strict, isBool = readBool(val)
if !isBool {
return errors.New("Invalid Bool value: " + val)
}
// Compression
case "compress":
err = errors.New("Compression not implemented yet")
return
// System Vars
default:
err = mc.exec("SET " + param + "=" + val + "")
if err != nil {
return
}
}
}
return
}
func (mc *mysqlConn) Begin() (driver.Tx, error) {
if mc.netConn == nil {
errLog.Print(ErrInvalidConn)
return nil, driver.ErrBadConn
}
err := mc.exec("START TRANSACTION")
if err == nil {
return &mysqlTx{mc}, err
}
return nil, err
}
func (mc *mysqlConn) Close() (err error) {
// Makes Close idempotent
if mc.netConn != nil {
err = mc.writeCommandPacket(comQuit)
if err == nil {
err = mc.netConn.Close()
} else {
mc.netConn.Close()
}
mc.netConn = nil
}
mc.cfg = nil
mc.buf.rd = nil
return
}
func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) {
if mc.netConn == nil {
errLog.Print(ErrInvalidConn)
return nil, driver.ErrBadConn
}
// Send command
err := mc.writeCommandPacketStr(comStmtPrepare, query)
if err != nil {
return nil, err
}
stmt := &mysqlStmt{
mc: mc,
}
// Read Result
columnCount, err := stmt.readPrepareResultPacket()
if err == nil {
if stmt.paramCount > 0 {
if err = mc.readUntilEOF(); err != nil {
return nil, err
}
}
if columnCount > 0 {
err = mc.readUntilEOF()
}
}
return stmt, err
}
func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (string, error) {
buf := mc.buf.takeCompleteBuffer()
if buf == nil {
// can not take the buffer. Something must be wrong with the connection
errLog.Print(ErrBusyBuffer)
return "", driver.ErrBadConn
}
buf = buf[:0]
argPos := 0
for i := 0; i < len(query); i++ {
q := strings.IndexByte(query[i:], '?')
if q == -1 {
buf = append(buf, query[i:]...)
break
}
buf = append(buf, query[i:i+q]...)
i += q
arg := args[argPos]
argPos++
if arg == nil {
buf = append(buf, "NULL"...)
continue
}
switch v := arg.(type) {
case int64:
buf = strconv.AppendInt(buf, v, 10)
case float64:
buf = strconv.AppendFloat(buf, v, 'g', -1, 64)
case bool:
if v {
buf = append(buf, '1')
} else {
buf = append(buf, '0')
}
case time.Time:
if v.IsZero() {
buf = append(buf, "'0000-00-00'"...)
} else {
v := v.In(mc.cfg.loc)
v = v.Add(time.Nanosecond * 500) // To round under microsecond
year := v.Year()
year100 := year / 100
year1 := year % 100
month := v.Month()
day := v.Day()
hour := v.Hour()
minute := v.Minute()
second := v.Second()
micro := v.Nanosecond() / 1000
buf = append(buf, []byte{
'\'',
digits10[year100], digits01[year100],
digits10[year1], digits01[year1],
'-',
digits10[month], digits01[month],
'-',
digits10[day], digits01[day],
' ',
digits10[hour], digits01[hour],
':',
digits10[minute], digits01[minute],
':',
digits10[second], digits01[second],
}...)
if micro != 0 {
micro10000 := micro / 10000
micro100 := micro / 100 % 100
micro1 := micro % 100
buf = append(buf, []byte{
'.',
digits10[micro10000], digits01[micro10000],
digits10[micro100], digits01[micro100],
digits10[micro1], digits01[micro1],
}...)
}
buf = append(buf, '\'')
}
case []byte:
if v == nil {
buf = append(buf, "NULL"...)
} else {
buf = append(buf, "_binary'"...)
if mc.status&statusNoBackslashEscapes == 0 {
buf = escapeBytesBackslash(buf, v)
} else {
buf = escapeBytesQuotes(buf, v)
}
buf = append(buf, '\'')
}
case string:
buf = append(buf, '\'')
if mc.status&statusNoBackslashEscapes == 0 {
buf = escapeStringBackslash(buf, v)
} else {
buf = escapeStringQuotes(buf, v)
}
buf = append(buf, '\'')
default:
return "", driver.ErrSkip
}
if len(buf)+4 > mc.maxPacketAllowed {
return "", driver.ErrSkip
}
}
if argPos != len(args) {
return "", driver.ErrSkip
}
return string(buf), nil
}
func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, error) {
if mc.netConn == nil {
errLog.Print(ErrInvalidConn)
return nil, driver.ErrBadConn
}
if len(args) != 0 {
if !mc.cfg.interpolateParams {
return nil, driver.ErrSkip
}
// try to interpolate the parameters to save extra roundtrips for preparing and closing a statement
prepared, err := mc.interpolateParams(query, args)
if err != nil {
return nil, err
}
query = prepared
args = nil
}
mc.affectedRows = 0
mc.insertId = 0
err := mc.exec(query)
if err == nil {
return &mysqlResult{
affectedRows: int64(mc.affectedRows),
insertId: int64(mc.insertId),
}, err
}
return nil, err
}
// Internal function to execute commands
func (mc *mysqlConn) exec(query string) error {
// Send command
err := mc.writeCommandPacketStr(comQuery, query)
if err != nil {
return err
}
// Read Result
resLen, err := mc.readResultSetHeaderPacket()
if err == nil && resLen > 0 {
if err = mc.readUntilEOF(); err != nil {
return err
}
err = mc.readUntilEOF()
}
return err
}
func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, error) {
if mc.netConn == nil {
errLog.Print(ErrInvalidConn)
return nil, driver.ErrBadConn
}
if len(args) != 0 {
if !mc.cfg.interpolateParams {
return nil, driver.ErrSkip
}
// try client-side prepare to reduce roundtrip
prepared, err := mc.interpolateParams(query, args)
if err != nil {
return nil, err
}
query = prepared
args = nil
}
// Send command
err := mc.writeCommandPacketStr(comQuery, query)
if err == nil {
// Read Result
var resLen int
resLen, err = mc.readResultSetHeaderPacket()
if err == nil {
rows := new(textRows)
rows.mc = mc
if resLen == 0 {
// no columns, no more data
return emptyRows{}, nil
}
// Columns
rows.columns, err = mc.readColumns(resLen)
return rows, err
}
}
return nil, err
}
// Gets the value of the given MySQL System Variable
// The returned byte slice is only valid until the next read
func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) {
// Send command
if err := mc.writeCommandPacketStr(comQuery, "SELECT @@"+name); err != nil {
return nil, err
}
// Read Result
resLen, err := mc.readResultSetHeaderPacket()
if err == nil {
rows := new(textRows)
rows.mc = mc
if resLen > 0 {
// Columns
if err := mc.readUntilEOF(); err != nil {
return nil, err
}
}
dest := make([]driver.Value, resLen)
if err = rows.readRow(dest); err == nil {
return dest[0].([]byte), mc.readUntilEOF()
}
}
return nil, err
}

162
vendor/github.com/go-sql-driver/mysql/const.go generated vendored Normal file
View File

@ -0,0 +1,162 @@
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
//
// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
package mysql
const (
minProtocolVersion byte = 10
maxPacketSize = 1<<24 - 1
timeFormat = "2006-01-02 15:04:05.999999"
)
// MySQL constants documentation:
// http://dev.mysql.com/doc/internals/en/client-server-protocol.html
const (
iOK byte = 0x00
iLocalInFile byte = 0xfb
iEOF byte = 0xfe
iERR byte = 0xff
)
// https://dev.mysql.com/doc/internals/en/capability-flags.html#packet-Protocol::CapabilityFlags
type clientFlag uint32
const (
clientLongPassword clientFlag = 1 << iota
clientFoundRows
clientLongFlag
clientConnectWithDB
clientNoSchema
clientCompress
clientODBC
clientLocalFiles
clientIgnoreSpace
clientProtocol41
clientInteractive
clientSSL
clientIgnoreSIGPIPE
clientTransactions
clientReserved
clientSecureConn
clientMultiStatements
clientMultiResults
clientPSMultiResults
clientPluginAuth
clientConnectAttrs
clientPluginAuthLenEncClientData
clientCanHandleExpiredPasswords
clientSessionTrack
clientDeprecateEOF
)
const (
comQuit byte = iota + 1
comInitDB
comQuery
comFieldList
comCreateDB
comDropDB
comRefresh
comShutdown
comStatistics
comProcessInfo
comConnect
comProcessKill
comDebug
comPing
comTime
comDelayedInsert
comChangeUser
comBinlogDump
comTableDump
comConnectOut
comRegisterSlave
comStmtPrepare
comStmtExecute
comStmtSendLongData
comStmtClose
comStmtReset
comSetOption
comStmtFetch
)
// https://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnType
const (
fieldTypeDecimal byte = iota
fieldTypeTiny
fieldTypeShort
fieldTypeLong
fieldTypeFloat
fieldTypeDouble
fieldTypeNULL
fieldTypeTimestamp
fieldTypeLongLong
fieldTypeInt24
fieldTypeDate
fieldTypeTime
fieldTypeDateTime
fieldTypeYear
fieldTypeNewDate
fieldTypeVarChar
fieldTypeBit
)
const (
fieldTypeNewDecimal byte = iota + 0xf6
fieldTypeEnum
fieldTypeSet
fieldTypeTinyBLOB
fieldTypeMediumBLOB
fieldTypeLongBLOB
fieldTypeBLOB
fieldTypeVarString
fieldTypeString
fieldTypeGeometry
)
type fieldFlag uint16
const (
flagNotNULL fieldFlag = 1 << iota
flagPriKey
flagUniqueKey
flagMultipleKey
flagBLOB
flagUnsigned
flagZeroFill
flagBinary
flagEnum
flagAutoIncrement
flagTimestamp
flagSet
flagUnknown1
flagUnknown2
flagUnknown3
flagUnknown4
)
// http://dev.mysql.com/doc/internals/en/status-flags.html
type statusFlag uint16
const (
statusInTrans statusFlag = 1 << iota
statusInAutocommit
statusReserved // Not in documentation
statusMoreResultsExists
statusNoGoodIndexUsed
statusNoIndexUsed
statusCursorExists
statusLastRowSent
statusDbDropped
statusNoBackslashEscapes
statusMetadataChanged
statusQueryWasSlow
statusPsOutParams
statusInTransReadonly
statusSessionStateChanged
)

149
vendor/github.com/go-sql-driver/mysql/driver.go generated vendored Normal file
View File

@ -0,0 +1,149 @@
// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
//
// The driver should be used via the database/sql package:
//
// import "database/sql"
// import _ "github.com/go-sql-driver/mysql"
//
// db, err := sql.Open("mysql", "user:password@/dbname")
//
// See https://github.com/go-sql-driver/mysql#usage for details
package mysql
import (
"database/sql"
"database/sql/driver"
"net"
)
// This struct is exported to make the driver directly accessible.
// In general the driver is used via the database/sql package.
type MySQLDriver struct{}
// DialFunc is a function which can be used to establish the network connection.
// Custom dial functions must be registered with RegisterDial
type DialFunc func(addr string) (net.Conn, error)
var dials map[string]DialFunc
// RegisterDial registers a custom dial function. It can then be used by the
// network address mynet(addr), where mynet is the registered new network.
// addr is passed as a parameter to the dial function.
func RegisterDial(net string, dial DialFunc) {
if dials == nil {
dials = make(map[string]DialFunc)
}
dials[net] = dial
}
// Open new Connection.
// See https://github.com/go-sql-driver/mysql#dsn-data-source-name for how
// the DSN string is formated
func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
var err error
// New mysqlConn
mc := &mysqlConn{
maxPacketAllowed: maxPacketSize,
maxWriteSize: maxPacketSize - 1,
}
mc.cfg, err = parseDSN(dsn)
if err != nil {
return nil, err
}
// Connect to Server
if dial, ok := dials[mc.cfg.net]; ok {
mc.netConn, err = dial(mc.cfg.addr)
} else {
nd := net.Dialer{Timeout: mc.cfg.timeout}
mc.netConn, err = nd.Dial(mc.cfg.net, mc.cfg.addr)
}
if err != nil {
return nil, err
}
// Enable TCP Keepalives on TCP connections
if tc, ok := mc.netConn.(*net.TCPConn); ok {
if err := tc.SetKeepAlive(true); err != nil {
// Don't send COM_QUIT before handshake.
mc.netConn.Close()
mc.netConn = nil
return nil, err
}
}
mc.buf = newBuffer(mc.netConn)
// Reading Handshake Initialization Packet
cipher, err := mc.readInitPacket()
if err != nil {
mc.Close()
return nil, err
}
// Send Client Authentication Packet
if err = mc.writeAuthPacket(cipher); err != nil {
mc.Close()
return nil, err
}
// Read Result Packet
err = mc.readResultOK()
if err != nil {
// Retry with old authentication method, if allowed
if mc.cfg != nil && mc.cfg.allowOldPasswords && err == ErrOldPassword {
if err = mc.writeOldAuthPacket(cipher); err != nil {
mc.Close()
return nil, err
}
if err = mc.readResultOK(); err != nil {
mc.Close()
return nil, err
}
} else if mc.cfg != nil && mc.cfg.allowCleartextPasswords && err == ErrCleartextPassword {
if err = mc.writeClearAuthPacket(); err != nil {
mc.Close()
return nil, err
}
if err = mc.readResultOK(); err != nil {
mc.Close()
return nil, err
}
} else {
mc.Close()
return nil, err
}
}
// Get max allowed packet size
maxap, err := mc.getSystemVar("max_allowed_packet")
if err != nil {
mc.Close()
return nil, err
}
mc.maxPacketAllowed = stringToInt(maxap) - 1
if mc.maxPacketAllowed < maxPacketSize {
mc.maxWriteSize = mc.maxPacketAllowed
}
// Handle DSN Params
err = mc.handleParams()
if err != nil {
mc.Close()
return nil, err
}
return mc, nil
}
func init() {
sql.Register("mysql", &MySQLDriver{})
}

131
vendor/github.com/go-sql-driver/mysql/errors.go generated vendored Normal file
View File

@ -0,0 +1,131 @@
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
//
// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
package mysql
import (
"database/sql/driver"
"errors"
"fmt"
"io"
"log"
"os"
)
// Various errors the driver might return. Can change between driver versions.
var (
ErrInvalidConn = errors.New("Invalid Connection")
ErrMalformPkt = errors.New("Malformed Packet")
ErrNoTLS = errors.New("TLS encryption requested but server does not support TLS")
ErrOldPassword = errors.New("This user requires old password authentication. If you still want to use it, please add 'allowOldPasswords=1' to your DSN. See also https://github.com/go-sql-driver/mysql/wiki/old_passwords")
ErrCleartextPassword = errors.New("This user requires clear text authentication. If you still want to use it, please add 'allowCleartextPasswords=1' to your DSN.")
ErrUnknownPlugin = errors.New("The authentication plugin is not supported.")
ErrOldProtocol = errors.New("MySQL-Server does not support required Protocol 41+")
ErrPktSync = errors.New("Commands out of sync. You can't run this command now")
ErrPktSyncMul = errors.New("Commands out of sync. Did you run multiple statements at once?")
ErrPktTooLarge = errors.New("Packet for query is too large. You can change this value on the server by adjusting the 'max_allowed_packet' variable.")
ErrBusyBuffer = errors.New("Busy buffer")
)
var errLog Logger = log.New(os.Stderr, "[MySQL] ", log.Ldate|log.Ltime|log.Lshortfile)
// Logger is used to log critical error messages.
type Logger interface {
Print(v ...interface{})
}
// SetLogger is used to set the logger for critical errors.
// The initial logger is os.Stderr.
func SetLogger(logger Logger) error {
if logger == nil {
return errors.New("logger is nil")
}
errLog = logger
return nil
}
// MySQLError is an error type which represents a single MySQL error
type MySQLError struct {
Number uint16
Message string
}
func (me *MySQLError) Error() string {
return fmt.Sprintf("Error %d: %s", me.Number, me.Message)
}
// MySQLWarnings is an error type which represents a group of one or more MySQL
// warnings
type MySQLWarnings []MySQLWarning
func (mws MySQLWarnings) Error() string {
var msg string
for i, warning := range mws {
if i > 0 {
msg += "\r\n"
}
msg += fmt.Sprintf(
"%s %s: %s",
warning.Level,
warning.Code,
warning.Message,
)
}
return msg
}
// MySQLWarning is an error type which represents a single MySQL warning.
// Warnings are returned in groups only. See MySQLWarnings
type MySQLWarning struct {
Level string
Code string
Message string
}
func (mc *mysqlConn) getWarnings() (err error) {
rows, err := mc.Query("SHOW WARNINGS", nil)
if err != nil {
return
}
var warnings = MySQLWarnings{}
var values = make([]driver.Value, 3)
for {
err = rows.Next(values)
switch err {
case nil:
warning := MySQLWarning{}
if raw, ok := values[0].([]byte); ok {
warning.Level = string(raw)
} else {
warning.Level = fmt.Sprintf("%s", values[0])
}
if raw, ok := values[1].([]byte); ok {
warning.Code = string(raw)
} else {
warning.Code = fmt.Sprintf("%s", values[1])
}
if raw, ok := values[2].([]byte); ok {
warning.Message = string(raw)
} else {
warning.Message = fmt.Sprintf("%s", values[0])
}
warnings = append(warnings, warning)
case io.EOF:
return warnings
default:
rows.Close()
return
}
}
}

182
vendor/github.com/go-sql-driver/mysql/infile.go generated vendored Normal file
View File

@ -0,0 +1,182 @@
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
//
// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
package mysql
import (
"fmt"
"io"
"os"
"strings"
"sync"
)
var (
fileRegister map[string]bool
fileRegisterLock sync.RWMutex
readerRegister map[string]func() io.Reader
readerRegisterLock sync.RWMutex
)
// RegisterLocalFile adds the given file to the file whitelist,
// so that it can be used by "LOAD DATA LOCAL INFILE <filepath>".
// Alternatively you can allow the use of all local files with
// the DSN parameter 'allowAllFiles=true'
//
// filePath := "/home/gopher/data.csv"
// mysql.RegisterLocalFile(filePath)
// err := db.Exec("LOAD DATA LOCAL INFILE '" + filePath + "' INTO TABLE foo")
// if err != nil {
// ...
//
func RegisterLocalFile(filePath string) {
fileRegisterLock.Lock()
// lazy map init
if fileRegister == nil {
fileRegister = make(map[string]bool)
}
fileRegister[strings.Trim(filePath, `"`)] = true
fileRegisterLock.Unlock()
}
// DeregisterLocalFile removes the given filepath from the whitelist.
func DeregisterLocalFile(filePath string) {
fileRegisterLock.Lock()
delete(fileRegister, strings.Trim(filePath, `"`))
fileRegisterLock.Unlock()
}
// RegisterReaderHandler registers a handler function which is used
// to receive a io.Reader.
// The Reader can be used by "LOAD DATA LOCAL INFILE Reader::<name>".
// If the handler returns a io.ReadCloser Close() is called when the
// request is finished.
//
// mysql.RegisterReaderHandler("data", func() io.Reader {
// var csvReader io.Reader // Some Reader that returns CSV data
// ... // Open Reader here
// return csvReader
// })
// err := db.Exec("LOAD DATA LOCAL INFILE 'Reader::data' INTO TABLE foo")
// if err != nil {
// ...
//
func RegisterReaderHandler(name string, handler func() io.Reader) {
readerRegisterLock.Lock()
// lazy map init
if readerRegister == nil {
readerRegister = make(map[string]func() io.Reader)
}
readerRegister[name] = handler
readerRegisterLock.Unlock()
}
// DeregisterReaderHandler removes the ReaderHandler function with
// the given name from the registry.
func DeregisterReaderHandler(name string) {
readerRegisterLock.Lock()
delete(readerRegister, name)
readerRegisterLock.Unlock()
}
func deferredClose(err *error, closer io.Closer) {
closeErr := closer.Close()
if *err == nil {
*err = closeErr
}
}
func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
var rdr io.Reader
var data []byte
if idx := strings.Index(name, "Reader::"); idx == 0 || (idx > 0 && name[idx-1] == '/') { // io.Reader
// The server might return an an absolute path. See issue #355.
name = name[idx+8:]
readerRegisterLock.RLock()
handler, inMap := readerRegister[name]
readerRegisterLock.RUnlock()
if inMap {
rdr = handler()
if rdr != nil {
data = make([]byte, 4+mc.maxWriteSize)
if cl, ok := rdr.(io.Closer); ok {
defer deferredClose(&err, cl)
}
} else {
err = fmt.Errorf("Reader '%s' is <nil>", name)
}
} else {
err = fmt.Errorf("Reader '%s' is not registered", name)
}
} else { // File
name = strings.Trim(name, `"`)
fileRegisterLock.RLock()
fr := fileRegister[name]
fileRegisterLock.RUnlock()
if mc.cfg.allowAllFiles || fr {
var file *os.File
var fi os.FileInfo
if file, err = os.Open(name); err == nil {
defer deferredClose(&err, file)
// get file size
if fi, err = file.Stat(); err == nil {
rdr = file
if fileSize := int(fi.Size()); fileSize <= mc.maxWriteSize {
data = make([]byte, 4+fileSize)
} else if fileSize <= mc.maxPacketAllowed {
data = make([]byte, 4+mc.maxWriteSize)
} else {
err = fmt.Errorf("Local File '%s' too large: Size: %d, Max: %d", name, fileSize, mc.maxPacketAllowed)
}
}
}
} else {
err = fmt.Errorf("Local File '%s' is not registered. Use the DSN parameter 'allowAllFiles=true' to allow all files", name)
}
}
// send content packets
if err == nil {
var n int
for err == nil {
n, err = rdr.Read(data[4:])
if n > 0 {
if ioErr := mc.writePacket(data[:4+n]); ioErr != nil {
return ioErr
}
}
}
if err == io.EOF {
err = nil
}
}
// send empty packet (termination)
if data == nil {
data = make([]byte, 4)
}
if ioErr := mc.writePacket(data[:4]); ioErr != nil {
return ioErr
}
// read OK packet
if err == nil {
return mc.readResultOK()
} else {
mc.readPacket()
}
return err
}

1182
vendor/github.com/go-sql-driver/mysql/packets.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

22
vendor/github.com/go-sql-driver/mysql/result.go generated vendored Normal file
View File

@ -0,0 +1,22 @@
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
//
// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
package mysql
type mysqlResult struct {
affectedRows int64
insertId int64
}
func (res *mysqlResult) LastInsertId() (int64, error) {
return res.insertId, nil
}
func (res *mysqlResult) RowsAffected() (int64, error) {
return res.affectedRows, nil
}

106
vendor/github.com/go-sql-driver/mysql/rows.go generated vendored Normal file
View File

@ -0,0 +1,106 @@
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
//
// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
package mysql
import (
"database/sql/driver"
"io"
)
type mysqlField struct {
tableName string
name string
flags fieldFlag
fieldType byte
decimals byte
}
type mysqlRows struct {
mc *mysqlConn
columns []mysqlField
}
type binaryRows struct {
mysqlRows
}
type textRows struct {
mysqlRows
}
type emptyRows struct{}
func (rows *mysqlRows) Columns() []string {
columns := make([]string, len(rows.columns))
if rows.mc.cfg.columnsWithAlias {
for i := range columns {
if tableName := rows.columns[i].tableName; len(tableName) > 0 {
columns[i] = tableName + "." + rows.columns[i].name
} else {
columns[i] = rows.columns[i].name
}
}
} else {
for i := range columns {
columns[i] = rows.columns[i].name
}
}
return columns
}
func (rows *mysqlRows) Close() error {
mc := rows.mc
if mc == nil {
return nil
}
if mc.netConn == nil {
return ErrInvalidConn
}
// Remove unread packets from stream
err := mc.readUntilEOF()
rows.mc = nil
return err
}
func (rows *binaryRows) Next(dest []driver.Value) error {
if mc := rows.mc; mc != nil {
if mc.netConn == nil {
return ErrInvalidConn
}
// Fetch next row from stream
return rows.readRow(dest)
}
return io.EOF
}
func (rows *textRows) Next(dest []driver.Value) error {
if mc := rows.mc; mc != nil {
if mc.netConn == nil {
return ErrInvalidConn
}
// Fetch next row from stream
return rows.readRow(dest)
}
return io.EOF
}
func (rows emptyRows) Columns() []string {
return nil
}
func (rows emptyRows) Close() error {
return nil
}
func (rows emptyRows) Next(dest []driver.Value) error {
return io.EOF
}

150
vendor/github.com/go-sql-driver/mysql/statement.go generated vendored Normal file
View File

@ -0,0 +1,150 @@
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
//
// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
package mysql
import (
"database/sql/driver"
"fmt"
"reflect"
"strconv"
)
type mysqlStmt struct {
mc *mysqlConn
id uint32
paramCount int
columns []mysqlField // cached from the first query
}
func (stmt *mysqlStmt) Close() error {
if stmt.mc == nil || stmt.mc.netConn == nil {
errLog.Print(ErrInvalidConn)
return driver.ErrBadConn
}
err := stmt.mc.writeCommandPacketUint32(comStmtClose, stmt.id)
stmt.mc = nil
return err
}
func (stmt *mysqlStmt) NumInput() int {
return stmt.paramCount
}
func (stmt *mysqlStmt) ColumnConverter(idx int) driver.ValueConverter {
return converter{}
}
func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) {
if stmt.mc.netConn == nil {
errLog.Print(ErrInvalidConn)
return nil, driver.ErrBadConn
}
// Send command
err := stmt.writeExecutePacket(args)
if err != nil {
return nil, err
}
mc := stmt.mc
mc.affectedRows = 0
mc.insertId = 0
// Read Result
resLen, err := mc.readResultSetHeaderPacket()
if err == nil {
if resLen > 0 {
// Columns
err = mc.readUntilEOF()
if err != nil {
return nil, err
}
// Rows
err = mc.readUntilEOF()
}
if err == nil {
return &mysqlResult{
affectedRows: int64(mc.affectedRows),
insertId: int64(mc.insertId),
}, nil
}
}
return nil, err
}
func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) {
if stmt.mc.netConn == nil {
errLog.Print(ErrInvalidConn)
return nil, driver.ErrBadConn
}
// Send command
err := stmt.writeExecutePacket(args)
if err != nil {
return nil, err
}
mc := stmt.mc
// Read Result
resLen, err := mc.readResultSetHeaderPacket()
if err != nil {
return nil, err
}
rows := new(binaryRows)
rows.mc = mc
if resLen > 0 {
// Columns
// If not cached, read them and cache them
if stmt.columns == nil {
rows.columns, err = mc.readColumns(resLen)
stmt.columns = rows.columns
} else {
rows.columns = stmt.columns
err = mc.readUntilEOF()
}
}
return rows, err
}
type converter struct{}
func (c converter) ConvertValue(v interface{}) (driver.Value, error) {
if driver.IsValue(v) {
return v, nil
}
rv := reflect.ValueOf(v)
switch rv.Kind() {
case reflect.Ptr:
// indirect pointers
if rv.IsNil() {
return nil, nil
}
return c.ConvertValue(rv.Elem().Interface())
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return rv.Int(), nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32:
return int64(rv.Uint()), nil
case reflect.Uint64:
u64 := rv.Uint()
if u64 >= 1<<63 {
return strconv.FormatUint(u64, 10), nil
}
return int64(u64), nil
case reflect.Float32, reflect.Float64:
return rv.Float(), nil
}
return nil, fmt.Errorf("unsupported type %T, a %s", v, rv.Kind())
}

31
vendor/github.com/go-sql-driver/mysql/transaction.go generated vendored Normal file
View File

@ -0,0 +1,31 @@
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
//
// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
package mysql
type mysqlTx struct {
mc *mysqlConn
}
func (tx *mysqlTx) Commit() (err error) {
if tx.mc == nil || tx.mc.netConn == nil {
return ErrInvalidConn
}
err = tx.mc.exec("COMMIT")
tx.mc = nil
return
}
func (tx *mysqlTx) Rollback() (err error) {
if tx.mc == nil || tx.mc.netConn == nil {
return ErrInvalidConn
}
err = tx.mc.exec("ROLLBACK")
tx.mc = nil
return
}

973
vendor/github.com/go-sql-driver/mysql/utils.go generated vendored Normal file
View File

@ -0,0 +1,973 @@
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
//
// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
package mysql
import (
"crypto/sha1"
"crypto/tls"
"database/sql/driver"
"encoding/binary"
"errors"
"fmt"
"io"
"net"
"net/url"
"strings"
"time"
)
var (
tlsConfigRegister map[string]*tls.Config // Register for custom tls.Configs
errInvalidDSNUnescaped = errors.New("Invalid DSN: Did you forget to escape a param value?")
errInvalidDSNAddr = errors.New("Invalid DSN: Network Address not terminated (missing closing brace)")
errInvalidDSNNoSlash = errors.New("Invalid DSN: Missing the slash separating the database name")
errInvalidDSNUnsafeCollation = errors.New("Invalid DSN: interpolateParams can be used with ascii, latin1, utf8 and utf8mb4 charset")
)
func init() {
tlsConfigRegister = make(map[string]*tls.Config)
}
// RegisterTLSConfig registers a custom tls.Config to be used with sql.Open.
// Use the key as a value in the DSN where tls=value.
//
// rootCertPool := x509.NewCertPool()
// pem, err := ioutil.ReadFile("/path/ca-cert.pem")
// if err != nil {
// log.Fatal(err)
// }
// if ok := rootCertPool.AppendCertsFromPEM(pem); !ok {
// log.Fatal("Failed to append PEM.")
// }
// clientCert := make([]tls.Certificate, 0, 1)
// certs, err := tls.LoadX509KeyPair("/path/client-cert.pem", "/path/client-key.pem")
// if err != nil {
// log.Fatal(err)
// }
// clientCert = append(clientCert, certs)
// mysql.RegisterTLSConfig("custom", &tls.Config{
// RootCAs: rootCertPool,
// Certificates: clientCert,
// })
// db, err := sql.Open("mysql", "user@tcp(localhost:3306)/test?tls=custom")
//
func RegisterTLSConfig(key string, config *tls.Config) error {
if _, isBool := readBool(key); isBool || strings.ToLower(key) == "skip-verify" {
return fmt.Errorf("Key '%s' is reserved", key)
}
tlsConfigRegister[key] = config
return nil
}
// DeregisterTLSConfig removes the tls.Config associated with key.
func DeregisterTLSConfig(key string) {
delete(tlsConfigRegister, key)
}
// parseDSN parses the DSN string to a config
func parseDSN(dsn string) (cfg *config, err error) {
// New config with some default values
cfg = &config{
loc: time.UTC,
collation: defaultCollation,
}
// [user[:password]@][net[(addr)]]/dbname[?param1=value1&paramN=valueN]
// Find the last '/' (since the password or the net addr might contain a '/')
foundSlash := false
for i := len(dsn) - 1; i >= 0; i-- {
if dsn[i] == '/' {
foundSlash = true
var j, k int
// left part is empty if i <= 0
if i > 0 {
// [username[:password]@][protocol[(address)]]
// Find the last '@' in dsn[:i]
for j = i; j >= 0; j-- {
if dsn[j] == '@' {
// username[:password]
// Find the first ':' in dsn[:j]
for k = 0; k < j; k++ {
if dsn[k] == ':' {
cfg.passwd = dsn[k+1 : j]
break
}
}
cfg.user = dsn[:k]
break
}
}
// [protocol[(address)]]
// Find the first '(' in dsn[j+1:i]
for k = j + 1; k < i; k++ {
if dsn[k] == '(' {
// dsn[i-1] must be == ')' if an address is specified
if dsn[i-1] != ')' {
if strings.ContainsRune(dsn[k+1:i], ')') {
return nil, errInvalidDSNUnescaped
}
return nil, errInvalidDSNAddr
}
cfg.addr = dsn[k+1 : i-1]
break
}
}
cfg.net = dsn[j+1 : k]
}
// dbname[?param1=value1&...&paramN=valueN]
// Find the first '?' in dsn[i+1:]
for j = i + 1; j < len(dsn); j++ {
if dsn[j] == '?' {
if err = parseDSNParams(cfg, dsn[j+1:]); err != nil {
return
}
break
}
}
cfg.dbname = dsn[i+1 : j]
break
}
}
if !foundSlash && len(dsn) > 0 {
return nil, errInvalidDSNNoSlash
}
if cfg.interpolateParams && unsafeCollations[cfg.collation] {
return nil, errInvalidDSNUnsafeCollation
}
// Set default network if empty
if cfg.net == "" {
cfg.net = "tcp"
}
// Set default address if empty
if cfg.addr == "" {
switch cfg.net {
case "tcp":
cfg.addr = "127.0.0.1:3306"
case "unix":
cfg.addr = "/tmp/mysql.sock"
default:
return nil, errors.New("Default addr for network '" + cfg.net + "' unknown")
}
}
return
}
// parseDSNParams parses the DSN "query string"
// Values must be url.QueryEscape'ed
func parseDSNParams(cfg *config, params string) (err error) {
for _, v := range strings.Split(params, "&") {
param := strings.SplitN(v, "=", 2)
if len(param) != 2 {
continue
}
// cfg params
switch value := param[1]; param[0] {
// Enable client side placeholder substitution
case "interpolateParams":
var isBool bool
cfg.interpolateParams, isBool = readBool(value)
if !isBool {
return fmt.Errorf("Invalid Bool value: %s", value)
}
// Disable INFILE whitelist / enable all files
case "allowAllFiles":
var isBool bool
cfg.allowAllFiles, isBool = readBool(value)
if !isBool {
return fmt.Errorf("Invalid Bool value: %s", value)
}
// Use cleartext authentication mode (MySQL 5.5.10+)
case "allowCleartextPasswords":
var isBool bool
cfg.allowCleartextPasswords, isBool = readBool(value)
if !isBool {
return fmt.Errorf("Invalid Bool value: %s", value)
}
// Use old authentication mode (pre MySQL 4.1)
case "allowOldPasswords":
var isBool bool
cfg.allowOldPasswords, isBool = readBool(value)
if !isBool {
return fmt.Errorf("Invalid Bool value: %s", value)
}
// Switch "rowsAffected" mode
case "clientFoundRows":
var isBool bool
cfg.clientFoundRows, isBool = readBool(value)
if !isBool {
return fmt.Errorf("Invalid Bool value: %s", value)
}
// Collation
case "collation":
collation, ok := collations[value]
if !ok {
// Note possibility for false negatives:
// could be triggered although the collation is valid if the
// collations map does not contain entries the server supports.
err = errors.New("unknown collation")
return
}
cfg.collation = collation
break
case "columnsWithAlias":
var isBool bool
cfg.columnsWithAlias, isBool = readBool(value)
if !isBool {
return fmt.Errorf("Invalid Bool value: %s", value)
}
// Time Location
case "loc":
if value, err = url.QueryUnescape(value); err != nil {
return
}
cfg.loc, err = time.LoadLocation(value)
if err != nil {
return
}
// Dial Timeout
case "timeout":
cfg.timeout, err = time.ParseDuration(value)
if err != nil {
return
}
// TLS-Encryption
case "tls":
boolValue, isBool := readBool(value)
if isBool {
if boolValue {
cfg.tls = &tls.Config{}
}
} else {
if strings.ToLower(value) == "skip-verify" {
cfg.tls = &tls.Config{InsecureSkipVerify: true}
} else if tlsConfig, ok := tlsConfigRegister[value]; ok {
if len(tlsConfig.ServerName) == 0 && !tlsConfig.InsecureSkipVerify {
host, _, err := net.SplitHostPort(cfg.addr)
if err == nil {
tlsConfig.ServerName = host
}
}
cfg.tls = tlsConfig
} else {
return fmt.Errorf("Invalid value / unknown config name: %s", value)
}
}
default:
// lazy init
if cfg.params == nil {
cfg.params = make(map[string]string)
}
if cfg.params[param[0]], err = url.QueryUnescape(value); err != nil {
return
}
}
}
return
}
// Returns the bool value of the input.
// The 2nd return value indicates if the input was a valid bool value
func readBool(input string) (value bool, valid bool) {
switch input {
case "1", "true", "TRUE", "True":
return true, true
case "0", "false", "FALSE", "False":
return false, true
}
// Not a valid bool value
return
}
/******************************************************************************
* Authentication *
******************************************************************************/
// Encrypt password using 4.1+ method
func scramblePassword(scramble, password []byte) []byte {
if len(password) == 0 {
return nil
}
// stage1Hash = SHA1(password)
crypt := sha1.New()
crypt.Write(password)
stage1 := crypt.Sum(nil)
// scrambleHash = SHA1(scramble + SHA1(stage1Hash))
// inner Hash
crypt.Reset()
crypt.Write(stage1)
hash := crypt.Sum(nil)
// outer Hash
crypt.Reset()
crypt.Write(scramble)
crypt.Write(hash)
scramble = crypt.Sum(nil)
// token = scrambleHash XOR stage1Hash
for i := range scramble {
scramble[i] ^= stage1[i]
}
return scramble
}
// Encrypt password using pre 4.1 (old password) method
// https://github.com/atcurtis/mariadb/blob/master/mysys/my_rnd.c
type myRnd struct {
seed1, seed2 uint32
}
const myRndMaxVal = 0x3FFFFFFF
// Pseudo random number generator
func newMyRnd(seed1, seed2 uint32) *myRnd {
return &myRnd{
seed1: seed1 % myRndMaxVal,
seed2: seed2 % myRndMaxVal,
}
}
// Tested to be equivalent to MariaDB's floating point variant
// http://play.golang.org/p/QHvhd4qved
// http://play.golang.org/p/RG0q4ElWDx
func (r *myRnd) NextByte() byte {
r.seed1 = (r.seed1*3 + r.seed2) % myRndMaxVal
r.seed2 = (r.seed1 + r.seed2 + 33) % myRndMaxVal
return byte(uint64(r.seed1) * 31 / myRndMaxVal)
}
// Generate binary hash from byte string using insecure pre 4.1 method
func pwHash(password []byte) (result [2]uint32) {
var add uint32 = 7
var tmp uint32
result[0] = 1345345333
result[1] = 0x12345671
for _, c := range password {
// skip spaces and tabs in password
if c == ' ' || c == '\t' {
continue
}
tmp = uint32(c)
result[0] ^= (((result[0] & 63) + add) * tmp) + (result[0] << 8)
result[1] += (result[1] << 8) ^ result[0]
add += tmp
}
// Remove sign bit (1<<31)-1)
result[0] &= 0x7FFFFFFF
result[1] &= 0x7FFFFFFF
return
}
// Encrypt password using insecure pre 4.1 method
func scrambleOldPassword(scramble, password []byte) []byte {
if len(password) == 0 {
return nil
}
scramble = scramble[:8]
hashPw := pwHash(password)
hashSc := pwHash(scramble)
r := newMyRnd(hashPw[0]^hashSc[0], hashPw[1]^hashSc[1])
var out [8]byte
for i := range out {
out[i] = r.NextByte() + 64
}
mask := r.NextByte()
for i := range out {
out[i] ^= mask
}
return out[:]
}
/******************************************************************************
* Time related utils *
******************************************************************************/
// NullTime represents a time.Time that may be NULL.
// NullTime implements the Scanner interface so
// it can be used as a scan destination:
//
// var nt NullTime
// err := db.QueryRow("SELECT time FROM foo WHERE id=?", id).Scan(&nt)
// ...
// if nt.Valid {
// // use nt.Time
// } else {
// // NULL value
// }
//
// This NullTime implementation is not driver-specific
type NullTime struct {
Time time.Time
Valid bool // Valid is true if Time is not NULL
}
// Scan implements the Scanner interface.
// The value type must be time.Time or string / []byte (formatted time-string),
// otherwise Scan fails.
func (nt *NullTime) Scan(value interface{}) (err error) {
if value == nil {
nt.Time, nt.Valid = time.Time{}, false
return
}
switch v := value.(type) {
case time.Time:
nt.Time, nt.Valid = v, true
return
case []byte:
nt.Time, err = parseDateTime(string(v), time.UTC)
nt.Valid = (err == nil)
return
case string:
nt.Time, err = parseDateTime(v, time.UTC)
nt.Valid = (err == nil)
return
}
nt.Valid = false
return fmt.Errorf("Can't convert %T to time.Time", value)
}
// Value implements the driver Valuer interface.
func (nt NullTime) Value() (driver.Value, error) {
if !nt.Valid {
return nil, nil
}
return nt.Time, nil
}
func parseDateTime(str string, loc *time.Location) (t time.Time, err error) {
base := "0000-00-00 00:00:00.0000000"
switch len(str) {
case 10, 19, 21, 22, 23, 24, 25, 26: // up to "YYYY-MM-DD HH:MM:SS.MMMMMM"
if str == base[:len(str)] {
return
}
t, err = time.Parse(timeFormat[:len(str)], str)
default:
err = fmt.Errorf("Invalid Time-String: %s", str)
return
}
// Adjust location
if err == nil && loc != time.UTC {
y, mo, d := t.Date()
h, mi, s := t.Clock()
t, err = time.Date(y, mo, d, h, mi, s, t.Nanosecond(), loc), nil
}
return
}
func parseBinaryDateTime(num uint64, data []byte, loc *time.Location) (driver.Value, error) {
switch num {
case 0:
return time.Time{}, nil
case 4:
return time.Date(
int(binary.LittleEndian.Uint16(data[:2])), // year
time.Month(data[2]), // month
int(data[3]), // day
0, 0, 0, 0,
loc,
), nil
case 7:
return time.Date(
int(binary.LittleEndian.Uint16(data[:2])), // year
time.Month(data[2]), // month
int(data[3]), // day
int(data[4]), // hour
int(data[5]), // minutes
int(data[6]), // seconds
0,
loc,
), nil
case 11:
return time.Date(
int(binary.LittleEndian.Uint16(data[:2])), // year
time.Month(data[2]), // month
int(data[3]), // day
int(data[4]), // hour
int(data[5]), // minutes
int(data[6]), // seconds
int(binary.LittleEndian.Uint32(data[7:11]))*1000, // nanoseconds
loc,
), nil
}
return nil, fmt.Errorf("Invalid DATETIME-packet length %d", num)
}
// zeroDateTime is used in formatBinaryDateTime to avoid an allocation
// if the DATE or DATETIME has the zero value.
// It must never be changed.
// The current behavior depends on database/sql copying the result.
var zeroDateTime = []byte("0000-00-00 00:00:00.000000")
const digits01 = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789"
const digits10 = "0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999"
func formatBinaryDateTime(src []byte, length uint8, justTime bool) (driver.Value, error) {
// length expects the deterministic length of the zero value,
// negative time and 100+ hours are automatically added if needed
if len(src) == 0 {
if justTime {
return zeroDateTime[11 : 11+length], nil
}
return zeroDateTime[:length], nil
}
var dst []byte // return value
var pt, p1, p2, p3 byte // current digit pair
var zOffs byte // offset of value in zeroDateTime
if justTime {
switch length {
case
8, // time (can be up to 10 when negative and 100+ hours)
10, 11, 12, 13, 14, 15: // time with fractional seconds
default:
return nil, fmt.Errorf("illegal TIME length %d", length)
}
switch len(src) {
case 8, 12:
default:
return nil, fmt.Errorf("Invalid TIME-packet length %d", len(src))
}
// +2 to enable negative time and 100+ hours
dst = make([]byte, 0, length+2)
if src[0] == 1 {
dst = append(dst, '-')
}
if src[1] != 0 {
hour := uint16(src[1])*24 + uint16(src[5])
pt = byte(hour / 100)
p1 = byte(hour - 100*uint16(pt))
dst = append(dst, digits01[pt])
} else {
p1 = src[5]
}
zOffs = 11
src = src[6:]
} else {
switch length {
case 10, 19, 21, 22, 23, 24, 25, 26:
default:
t := "DATE"
if length > 10 {
t += "TIME"
}
return nil, fmt.Errorf("illegal %s length %d", t, length)
}
switch len(src) {
case 4, 7, 11:
default:
t := "DATE"
if length > 10 {
t += "TIME"
}
return nil, fmt.Errorf("illegal %s-packet length %d", t, len(src))
}
dst = make([]byte, 0, length)
// start with the date
year := binary.LittleEndian.Uint16(src[:2])
pt = byte(year / 100)
p1 = byte(year - 100*uint16(pt))
p2, p3 = src[2], src[3]
dst = append(dst,
digits10[pt], digits01[pt],
digits10[p1], digits01[p1], '-',
digits10[p2], digits01[p2], '-',
digits10[p3], digits01[p3],
)
if length == 10 {
return dst, nil
}
if len(src) == 4 {
return append(dst, zeroDateTime[10:length]...), nil
}
dst = append(dst, ' ')
p1 = src[4] // hour
src = src[5:]
}
// p1 is 2-digit hour, src is after hour
p2, p3 = src[0], src[1]
dst = append(dst,
digits10[p1], digits01[p1], ':',
digits10[p2], digits01[p2], ':',
digits10[p3], digits01[p3],
)
if length <= byte(len(dst)) {
return dst, nil
}
src = src[2:]
if len(src) == 0 {
return append(dst, zeroDateTime[19:zOffs+length]...), nil
}
microsecs := binary.LittleEndian.Uint32(src[:4])
p1 = byte(microsecs / 10000)
microsecs -= 10000 * uint32(p1)
p2 = byte(microsecs / 100)
microsecs -= 100 * uint32(p2)
p3 = byte(microsecs)
switch decimals := zOffs + length - 20; decimals {
default:
return append(dst, '.',
digits10[p1], digits01[p1],
digits10[p2], digits01[p2],
digits10[p3], digits01[p3],
), nil
case 1:
return append(dst, '.',
digits10[p1],
), nil
case 2:
return append(dst, '.',
digits10[p1], digits01[p1],
), nil
case 3:
return append(dst, '.',
digits10[p1], digits01[p1],
digits10[p2],
), nil
case 4:
return append(dst, '.',
digits10[p1], digits01[p1],
digits10[p2], digits01[p2],
), nil
case 5:
return append(dst, '.',
digits10[p1], digits01[p1],
digits10[p2], digits01[p2],
digits10[p3],
), nil
}
}
/******************************************************************************
* Convert from and to bytes *
******************************************************************************/
func uint64ToBytes(n uint64) []byte {
return []byte{
byte(n),
byte(n >> 8),
byte(n >> 16),
byte(n >> 24),
byte(n >> 32),
byte(n >> 40),
byte(n >> 48),
byte(n >> 56),
}
}
func uint64ToString(n uint64) []byte {
var a [20]byte
i := 20
// U+0030 = 0
// ...
// U+0039 = 9
var q uint64
for n >= 10 {
i--
q = n / 10
a[i] = uint8(n-q*10) + 0x30
n = q
}
i--
a[i] = uint8(n) + 0x30
return a[i:]
}
// treats string value as unsigned integer representation
func stringToInt(b []byte) int {
val := 0
for i := range b {
val *= 10
val += int(b[i] - 0x30)
}
return val
}
// returns the string read as a bytes slice, wheter the value is NULL,
// the number of bytes read and an error, in case the string is longer than
// the input slice
func readLengthEncodedString(b []byte) ([]byte, bool, int, error) {
// Get length
num, isNull, n := readLengthEncodedInteger(b)
if num < 1 {
return b[n:n], isNull, n, nil
}
n += int(num)
// Check data length
if len(b) >= n {
return b[n-int(num) : n], false, n, nil
}
return nil, false, n, io.EOF
}
// returns the number of bytes skipped and an error, in case the string is
// longer than the input slice
func skipLengthEncodedString(b []byte) (int, error) {
// Get length
num, _, n := readLengthEncodedInteger(b)
if num < 1 {
return n, nil
}
n += int(num)
// Check data length
if len(b) >= n {
return n, nil
}
return n, io.EOF
}
// returns the number read, whether the value is NULL and the number of bytes read
func readLengthEncodedInteger(b []byte) (uint64, bool, int) {
// See issue #349
if len(b) == 0 {
return 0, true, 1
}
switch b[0] {
// 251: NULL
case 0xfb:
return 0, true, 1
// 252: value of following 2
case 0xfc:
return uint64(b[1]) | uint64(b[2])<<8, false, 3
// 253: value of following 3
case 0xfd:
return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16, false, 4
// 254: value of following 8
case 0xfe:
return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16 |
uint64(b[4])<<24 | uint64(b[5])<<32 | uint64(b[6])<<40 |
uint64(b[7])<<48 | uint64(b[8])<<56,
false, 9
}
// 0-250: value of first byte
return uint64(b[0]), false, 1
}
// encodes a uint64 value and appends it to the given bytes slice
func appendLengthEncodedInteger(b []byte, n uint64) []byte {
switch {
case n <= 250:
return append(b, byte(n))
case n <= 0xffff:
return append(b, 0xfc, byte(n), byte(n>>8))
case n <= 0xffffff:
return append(b, 0xfd, byte(n), byte(n>>8), byte(n>>16))
}
return append(b, 0xfe, byte(n), byte(n>>8), byte(n>>16), byte(n>>24),
byte(n>>32), byte(n>>40), byte(n>>48), byte(n>>56))
}
// reserveBuffer checks cap(buf) and expand buffer to len(buf) + appendSize.
// If cap(buf) is not enough, reallocate new buffer.
func reserveBuffer(buf []byte, appendSize int) []byte {
newSize := len(buf) + appendSize
if cap(buf) < newSize {
// Grow buffer exponentially
newBuf := make([]byte, len(buf)*2+appendSize)
copy(newBuf, buf)
buf = newBuf
}
return buf[:newSize]
}
// escapeBytesBackslash escapes []byte with backslashes (\)
// This escapes the contents of a string (provided as []byte) by adding backslashes before special
// characters, and turning others into specific escape sequences, such as
// turning newlines into \n and null bytes into \0.
// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L823-L932
func escapeBytesBackslash(buf, v []byte) []byte {
pos := len(buf)
buf = reserveBuffer(buf, len(v)*2)
for _, c := range v {
switch c {
case '\x00':
buf[pos] = '\\'
buf[pos+1] = '0'
pos += 2
case '\n':
buf[pos] = '\\'
buf[pos+1] = 'n'
pos += 2
case '\r':
buf[pos] = '\\'
buf[pos+1] = 'r'
pos += 2
case '\x1a':
buf[pos] = '\\'
buf[pos+1] = 'Z'
pos += 2
case '\'':
buf[pos] = '\\'
buf[pos+1] = '\''
pos += 2
case '"':
buf[pos] = '\\'
buf[pos+1] = '"'
pos += 2
case '\\':
buf[pos] = '\\'
buf[pos+1] = '\\'
pos += 2
default:
buf[pos] = c
pos += 1
}
}
return buf[:pos]
}
// escapeStringBackslash is similar to escapeBytesBackslash but for string.
func escapeStringBackslash(buf []byte, v string) []byte {
pos := len(buf)
buf = reserveBuffer(buf, len(v)*2)
for i := 0; i < len(v); i++ {
c := v[i]
switch c {
case '\x00':
buf[pos] = '\\'
buf[pos+1] = '0'
pos += 2
case '\n':
buf[pos] = '\\'
buf[pos+1] = 'n'
pos += 2
case '\r':
buf[pos] = '\\'
buf[pos+1] = 'r'
pos += 2
case '\x1a':
buf[pos] = '\\'
buf[pos+1] = 'Z'
pos += 2
case '\'':
buf[pos] = '\\'
buf[pos+1] = '\''
pos += 2
case '"':
buf[pos] = '\\'
buf[pos+1] = '"'
pos += 2
case '\\':
buf[pos] = '\\'
buf[pos+1] = '\\'
pos += 2
default:
buf[pos] = c
pos += 1
}
}
return buf[:pos]
}
// escapeBytesQuotes escapes apostrophes in []byte by doubling them up.
// This escapes the contents of a string by doubling up any apostrophes that
// it contains. This is used when the NO_BACKSLASH_ESCAPES SQL_MODE is in
// effect on the server.
// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L963-L1038
func escapeBytesQuotes(buf, v []byte) []byte {
pos := len(buf)
buf = reserveBuffer(buf, len(v)*2)
for _, c := range v {
if c == '\'' {
buf[pos] = '\''
buf[pos+1] = '\''
pos += 2
} else {
buf[pos] = c
pos++
}
}
return buf[:pos]
}
// escapeStringQuotes is similar to escapeBytesQuotes but for string.
func escapeStringQuotes(buf []byte, v string) []byte {
pos := len(buf)
buf = reserveBuffer(buf, len(v)*2)
for i := 0; i < len(v); i++ {
c := v[i]
if c == '\'' {
buf[pos] = '\''
buf[pos+1] = '\''
pos += 2
} else {
buf[pos] = c
pos++
}
}
return buf[:pos]
}

258
vendor/github.com/outbrain/golib/log/log.go generated vendored Normal file
View File

@ -0,0 +1,258 @@
/*
Copyright 2014 Outbrain Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package log
import (
"errors"
"fmt"
"log/syslog"
"os"
"runtime/debug"
"time"
)
// LogLevel indicates the severity of a log entry
type LogLevel int
func (this LogLevel) String() string {
switch this {
case FATAL:
return "FATAL"
case CRITICAL:
return "CRITICAL"
case ERROR:
return "ERROR"
case WARNING:
return "WARNING"
case NOTICE:
return "NOTICE"
case INFO:
return "INFO"
case DEBUG:
return "DEBUG"
}
return "unknown"
}
func LogLevelFromString(logLevelName string) (LogLevel, error) {
switch logLevelName {
case "FATAL":
return FATAL, nil
case "CRITICAL":
return CRITICAL, nil
case "ERROR":
return ERROR, nil
case "WARNING":
return WARNING, nil
case "NOTICE":
return NOTICE, nil
case "INFO":
return INFO, nil
case "DEBUG":
return DEBUG, nil
}
return 0, fmt.Errorf("Unknown LogLevel name: %+v", logLevelName)
}
const (
FATAL LogLevel = iota
CRITICAL
ERROR
WARNING
NOTICE
INFO
DEBUG
)
const TimeFormat = "2006-01-02 15:04:05"
// globalLogLevel indicates the global level filter for all logs (only entries with level equals or higher
// than this value will be logged)
var globalLogLevel LogLevel = DEBUG
var printStackTrace bool = false
// syslogWriter is optional, and defaults to nil (disabled)
var syslogLevel LogLevel = ERROR
var syslogWriter *syslog.Writer
// SetPrintStackTrace enables/disables dumping the stack upon error logging
func SetPrintStackTrace(shouldPrintStackTrace bool) {
printStackTrace = shouldPrintStackTrace
}
// SetLevel sets the global log level. Only entries with level equals or higher than
// this value will be logged
func SetLevel(logLevel LogLevel) {
globalLogLevel = logLevel
}
// GetLevel returns current global log level
func GetLevel() LogLevel {
return globalLogLevel
}
// EnableSyslogWriter enables, if possible, writes to syslog. These will execute _in addition_ to normal logging
func EnableSyslogWriter(tag string) (err error) {
syslogWriter, err = syslog.New(syslog.LOG_ERR, tag)
if err != nil {
syslogWriter = nil
}
return err
}
// SetSyslogLevel sets the minimal syslog level. Only entries with level equals or higher than
// this value will be logged. However, this is also capped by the global log level. That is,
// messages with lower level than global-log-level will be discarded at any case.
func SetSyslogLevel(logLevel LogLevel) {
syslogLevel = logLevel
}
// logFormattedEntry nicely formats and emits a log entry
func logFormattedEntry(logLevel LogLevel, message string, args ...interface{}) string {
if logLevel > globalLogLevel {
return ""
}
msgArgs := fmt.Sprintf(message, args...)
entryString := fmt.Sprintf("%s %s %s", time.Now().Format(TimeFormat), logLevel, msgArgs)
fmt.Fprintln(os.Stderr, entryString)
if syslogWriter != nil {
go func() error {
if logLevel > syslogLevel {
return nil
}
switch logLevel {
case FATAL:
return syslogWriter.Emerg(msgArgs)
case CRITICAL:
return syslogWriter.Crit(msgArgs)
case ERROR:
return syslogWriter.Err(msgArgs)
case WARNING:
return syslogWriter.Warning(msgArgs)
case NOTICE:
return syslogWriter.Notice(msgArgs)
case INFO:
return syslogWriter.Info(msgArgs)
case DEBUG:
return syslogWriter.Debug(msgArgs)
}
return nil
}()
}
return entryString
}
// logEntry emits a formatted log entry
func logEntry(logLevel LogLevel, message string, args ...interface{}) string {
entryString := message
for _, s := range args {
entryString += fmt.Sprintf(" %s", s)
}
return logFormattedEntry(logLevel, entryString)
}
// logErrorEntry emits a log entry based on given error object
func logErrorEntry(logLevel LogLevel, err error) error {
if err == nil {
// No error
return nil
}
entryString := fmt.Sprintf("%+v", err)
logEntry(logLevel, entryString)
if printStackTrace {
debug.PrintStack()
}
return err
}
func Debug(message string, args ...interface{}) string {
return logEntry(DEBUG, message, args...)
}
func Debugf(message string, args ...interface{}) string {
return logFormattedEntry(DEBUG, message, args...)
}
func Info(message string, args ...interface{}) string {
return logEntry(INFO, message, args...)
}
func Infof(message string, args ...interface{}) string {
return logFormattedEntry(INFO, message, args...)
}
func Notice(message string, args ...interface{}) string {
return logEntry(NOTICE, message, args...)
}
func Noticef(message string, args ...interface{}) string {
return logFormattedEntry(NOTICE, message, args...)
}
func Warning(message string, args ...interface{}) error {
return errors.New(logEntry(WARNING, message, args...))
}
func Warningf(message string, args ...interface{}) error {
return errors.New(logFormattedEntry(WARNING, message, args...))
}
func Error(message string, args ...interface{}) error {
return errors.New(logEntry(ERROR, message, args...))
}
func Errorf(message string, args ...interface{}) error {
return errors.New(logFormattedEntry(ERROR, message, args...))
}
func Errore(err error) error {
return logErrorEntry(ERROR, err)
}
func Critical(message string, args ...interface{}) error {
return errors.New(logEntry(CRITICAL, message, args...))
}
func Criticalf(message string, args ...interface{}) error {
return errors.New(logFormattedEntry(CRITICAL, message, args...))
}
func Criticale(err error) error {
return logErrorEntry(CRITICAL, err)
}
// Fatal emits a FATAL level entry and exists the program
func Fatal(message string, args ...interface{}) error {
logEntry(FATAL, message, args...)
os.Exit(1)
return errors.New(logEntry(CRITICAL, message, args...))
}
// Fatalf emits a FATAL level entry and exists the program
func Fatalf(message string, args ...interface{}) error {
logFormattedEntry(FATAL, message, args...)
os.Exit(1)
return errors.New(logFormattedEntry(CRITICAL, message, args...))
}
// Fatale emits a FATAL level entry and exists the program
func Fatale(err error) error {
logErrorEntry(FATAL, err)
os.Exit(1)
return err
}

333
vendor/github.com/outbrain/golib/sqlutils/sqlutils.go generated vendored Normal file
View File

@ -0,0 +1,333 @@
/*
Copyright 2014 Outbrain Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sqlutils
import (
"database/sql"
"encoding/json"
"errors"
"fmt"
_ "github.com/go-sql-driver/mysql"
"github.com/outbrain/golib/log"
"strconv"
"strings"
"sync"
)
// RowMap represents one row in a result set. Its objective is to allow
// for easy, typed getters by column name.
type RowMap map[string]CellData
// Cell data is the result of a single (atomic) column in a single row
type CellData sql.NullString
func (this *CellData) MarshalJSON() ([]byte, error) {
if this.Valid {
return json.Marshal(this.String)
} else {
return json.Marshal(nil)
}
}
func (this *CellData) NullString() *sql.NullString {
return (*sql.NullString)(this)
}
// RowData is the result of a single row, in positioned array format
type RowData []CellData
// MarshalJSON will marshal this map as JSON
func (this *RowData) MarshalJSON() ([]byte, error) {
cells := make([](*CellData), len(*this), len(*this))
for i, val := range *this {
d := CellData(val)
cells[i] = &d
}
return json.Marshal(cells)
}
// ResultData is an ordered row set of RowData
type ResultData []RowData
var EmptyResultData = ResultData{}
func (this *RowMap) GetString(key string) string {
return (*this)[key].String
}
// GetStringD returns a string from the map, or a default value if the key does not exist
func (this *RowMap) GetStringD(key string, def string) string {
if cell, ok := (*this)[key]; ok {
return cell.String
}
return def
}
func (this *RowMap) GetInt64(key string) int64 {
res, _ := strconv.ParseInt(this.GetString(key), 10, 0)
return res
}
func (this *RowMap) GetNullInt64(key string) sql.NullInt64 {
i, err := strconv.ParseInt(this.GetString(key), 10, 0)
if err == nil {
return sql.NullInt64{Int64: i, Valid: true}
} else {
return sql.NullInt64{Valid: false}
}
}
func (this *RowMap) GetInt(key string) int {
res, _ := strconv.Atoi(this.GetString(key))
return res
}
func (this *RowMap) GetIntD(key string, def int) int {
res, err := strconv.Atoi(this.GetString(key))
if err != nil {
return def
}
return res
}
func (this *RowMap) GetUint(key string) uint {
res, _ := strconv.Atoi(this.GetString(key))
return uint(res)
}
func (this *RowMap) GetUintD(key string, def uint) uint {
res, err := strconv.Atoi(this.GetString(key))
if err != nil {
return def
}
return uint(res)
}
func (this *RowMap) GetBool(key string) bool {
return this.GetInt(key) != 0
}
// knownDBs is a DB cache by uri
var knownDBs map[string]*sql.DB = make(map[string]*sql.DB)
var knownDBsMutex = &sync.Mutex{}
// GetDB returns a DB instance based on uri.
// bool result indicates whether the DB was returned from cache; err
func GetDB(mysql_uri string) (*sql.DB, bool, error) {
knownDBsMutex.Lock()
defer func() {
knownDBsMutex.Unlock()
}()
var exists bool
if _, exists = knownDBs[mysql_uri]; !exists {
if db, err := sql.Open("mysql", mysql_uri); err == nil {
knownDBs[mysql_uri] = db
} else {
return db, exists, err
}
}
return knownDBs[mysql_uri], exists, nil
}
// RowToArray is a convenience function, typically not called directly, which maps a
// single read database row into a NullString
func RowToArray(rows *sql.Rows, columns []string) []CellData {
buff := make([]interface{}, len(columns))
data := make([]CellData, len(columns))
for i, _ := range buff {
buff[i] = data[i].NullString()
}
rows.Scan(buff...)
return data
}
// ScanRowsToArrays is a convenience function, typically not called directly, which maps rows
// already read from the databse into arrays of NullString
func ScanRowsToArrays(rows *sql.Rows, on_row func([]CellData) error) error {
columns, _ := rows.Columns()
for rows.Next() {
arr := RowToArray(rows, columns)
err := on_row(arr)
if err != nil {
return err
}
}
return nil
}
func rowToMap(row []CellData, columns []string) map[string]CellData {
m := make(map[string]CellData)
for k, data_col := range row {
m[columns[k]] = data_col
}
return m
}
// ScanRowsToMaps is a convenience function, typically not called directly, which maps rows
// already read from the databse into RowMap entries.
func ScanRowsToMaps(rows *sql.Rows, on_row func(RowMap) error) error {
columns, _ := rows.Columns()
err := ScanRowsToArrays(rows, func(arr []CellData) error {
m := rowToMap(arr, columns)
err := on_row(m)
if err != nil {
return err
}
return nil
})
return err
}
// QueryRowsMap is a convenience function allowing querying a result set while poviding a callback
// function activated per read row.
func QueryRowsMap(db *sql.DB, query string, on_row func(RowMap) error, args ...interface{}) error {
var err error
defer func() {
if derr := recover(); derr != nil {
err = errors.New(fmt.Sprintf("QueryRowsMap unexpected error: %+v", derr))
}
}()
rows, err := db.Query(query, args...)
defer rows.Close()
if err != nil && err != sql.ErrNoRows {
return log.Errore(err)
}
err = ScanRowsToMaps(rows, on_row)
return err
}
// queryResultData returns a raw array of rows for a given query, optionally reading and returning column names
func queryResultData(db *sql.DB, query string, retrieveColumns bool, args ...interface{}) (ResultData, []string, error) {
var err error
defer func() {
if derr := recover(); derr != nil {
err = errors.New(fmt.Sprintf("QueryRowsMap unexpected error: %+v", derr))
}
}()
columns := []string{}
rows, err := db.Query(query, args...)
defer rows.Close()
if err != nil && err != sql.ErrNoRows {
return EmptyResultData, columns, log.Errore(err)
}
if retrieveColumns {
// Don't pay if you don't want to
columns, _ = rows.Columns()
}
resultData := ResultData{}
err = ScanRowsToArrays(rows, func(rowData []CellData) error {
resultData = append(resultData, rowData)
return nil
})
return resultData, columns, err
}
// QueryResultData returns a raw array of rows
func QueryResultData(db *sql.DB, query string, args ...interface{}) (ResultData, error) {
resultData, _, err := queryResultData(db, query, false, args...)
return resultData, err
}
// QueryResultDataNamed returns a raw array of rows, with column names
func QueryResultDataNamed(db *sql.DB, query string, args ...interface{}) (ResultData, []string, error) {
return queryResultData(db, query, true, args...)
}
// QueryRowsMapBuffered reads data from the database into a buffer, and only then applies the given function per row.
// This allows the application to take its time with processing the data, albeit consuming as much memory as required by
// the result set.
func QueryRowsMapBuffered(db *sql.DB, query string, on_row func(RowMap) error, args ...interface{}) error {
resultData, columns, err := queryResultData(db, query, true, args...)
if err != nil {
// Already logged
return err
}
for _, row := range resultData {
err = on_row(rowToMap(row, columns))
if err != nil {
return err
}
}
return nil
}
// ExecNoPrepare executes given query using given args on given DB, without using prepared statements.
func ExecNoPrepare(db *sql.DB, query string, args ...interface{}) (sql.Result, error) {
var err error
defer func() {
if derr := recover(); derr != nil {
err = errors.New(fmt.Sprintf("ExecNoPrepare unexpected error: %+v", derr))
}
}()
var res sql.Result
res, err = db.Exec(query, args...)
if err != nil {
log.Errore(err)
}
return res, err
}
// ExecQuery executes given query using given args on given DB. It will safele prepare, execute and close
// the statement.
func execInternal(silent bool, db *sql.DB, query string, args ...interface{}) (sql.Result, error) {
var err error
defer func() {
if derr := recover(); derr != nil {
err = errors.New(fmt.Sprintf("execInternal unexpected error: %+v", derr))
}
}()
stmt, err := db.Prepare(query)
if err != nil {
return nil, err
}
defer stmt.Close()
var res sql.Result
res, err = stmt.Exec(args...)
if err != nil && !silent {
log.Errore(err)
}
return res, err
}
// Exec executes given query using given args on given DB. It will safele prepare, execute and close
// the statement.
func Exec(db *sql.DB, query string, args ...interface{}) (sql.Result, error) {
return execInternal(false, db, query, args...)
}
// ExecSilently acts like Exec but does not report any error
func ExecSilently(db *sql.DB, query string, args ...interface{}) (sql.Result, error) {
return execInternal(true, db, query, args...)
}
func InClauseStringValues(terms []string) string {
quoted := []string{}
for _, s := range terms {
quoted = append(quoted, fmt.Sprintf("'%s'", strings.Replace(s, ",", "''", -1)))
}
return strings.Join(quoted, ", ")
}
// Convert variable length arguments into arguments array
func Args(args ...interface{}) []interface{} {
return args
}

28
vendor/gopkg.in/gcfg.v1/LICENSE generated vendored Normal file
View File

@ -0,0 +1,28 @@
Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go
Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

4
vendor/gopkg.in/gcfg.v1/README generated vendored Normal file
View File

@ -0,0 +1,4 @@
Gcfg reads INI-style configuration files into Go structs;
supports user-defined types and subsections.
Package docs: https://godoc.org/gopkg.in/gcfg.v1

118
vendor/gopkg.in/gcfg.v1/doc.go generated vendored Normal file
View File

@ -0,0 +1,118 @@
// Package gcfg reads "INI-style" text-based configuration files with
// "name=value" pairs grouped into sections (gcfg files).
//
// This package is still a work in progress; see the sections below for planned
// changes.
//
// Syntax
//
// The syntax is based on that used by git config:
// http://git-scm.com/docs/git-config#_syntax .
// There are some (planned) differences compared to the git config format:
// - improve data portability:
// - must be encoded in UTF-8 (for now) and must not contain the 0 byte
// - include and "path" type is not supported
// (path type may be implementable as a user-defined type)
// - internationalization
// - section and variable names can contain unicode letters, unicode digits
// (as defined in http://golang.org/ref/spec#Characters ) and hyphens
// (U+002D), starting with a unicode letter
// - disallow potentially ambiguous or misleading definitions:
// - `[sec.sub]` format is not allowed (deprecated in gitconfig)
// - `[sec ""]` is not allowed
// - use `[sec]` for section name "sec" and empty subsection name
// - (planned) within a single file, definitions must be contiguous for each:
// - section: '[secA]' -> '[secB]' -> '[secA]' is an error
// - subsection: '[sec "A"]' -> '[sec "B"]' -> '[sec "A"]' is an error
// - multivalued variable: 'multi=a' -> 'other=x' -> 'multi=b' is an error
//
// Data structure
//
// The functions in this package read values into a user-defined struct.
// Each section corresponds to a struct field in the config struct, and each
// variable in a section corresponds to a data field in the section struct.
// The mapping of each section or variable name to fields is done either based
// on the "gcfg" struct tag or by matching the name of the section or variable,
// ignoring case. In the latter case, hyphens '-' in section and variable names
// correspond to underscores '_' in field names.
// Fields must be exported; to use a section or variable name starting with a
// letter that is neither upper- or lower-case, prefix the field name with 'X'.
// (See https://code.google.com/p/go/issues/detail?id=5763#c4 .)
//
// For sections with subsections, the corresponding field in config must be a
// map, rather than a struct, with string keys and pointer-to-struct values.
// Values for subsection variables are stored in the map with the subsection
// name used as the map key.
// (Note that unlike section and variable names, subsection names are case
// sensitive.)
// When using a map, and there is a section with the same section name but
// without a subsection name, its values are stored with the empty string used
// as the key.
//
// The functions in this package panic if config is not a pointer to a struct,
// or when a field is not of a suitable type (either a struct or a map with
// string keys and pointer-to-struct values).
//
// Parsing of values
//
// The section structs in the config struct may contain single-valued or
// multi-valued variables. Variables of unnamed slice type (that is, a type
// starting with `[]`) are treated as multi-value; all others (including named
// slice types) are treated as single-valued variables.
//
// Single-valued variables are handled based on the type as follows.
// Unnamed pointer types (that is, types starting with `*`) are dereferenced,
// and if necessary, a new instance is allocated.
//
// For types implementing the encoding.TextUnmarshaler interface, the
// UnmarshalText method is used to set the value. Implementing this method is
// the recommended way for parsing user-defined types.
//
// For fields of string kind, the value string is assigned to the field, after
// unquoting and unescaping as needed.
// For fields of bool kind, the field is set to true if the value is "true",
// "yes", "on" or "1", and set to false if the value is "false", "no", "off" or
// "0", ignoring case. In addition, single-valued bool fields can be specified
// with a "blank" value (variable name without equals sign and value); in such
// case the value is set to true.
//
// Predefined integer types [u]int(|8|16|32|64) and big.Int are parsed as
// decimal or hexadecimal (if having '0x' prefix). (This is to prevent
// unintuitively handling zero-padded numbers as octal.) Other types having
// [u]int* as the underlying type, such as os.FileMode and uintptr allow
// decimal, hexadecimal, or octal values.
// Parsing mode for integer types can be overridden using the struct tag option
// ",int=mode" where mode is a combination of the 'd', 'h', and 'o' characters
// (each standing for decimal, hexadecimal, and octal, respectively.)
//
// All other types are parsed using fmt.Sscanf with the "%v" verb.
//
// For multi-valued variables, each individual value is parsed as above and
// appended to the slice. If the first value is specified as a "blank" value
// (variable name without equals sign and value), a new slice is allocated;
// that is any values previously set in the slice will be ignored.
//
// The types subpackage for provides helpers for parsing "enum-like" and integer
// types.
//
// TODO
//
// The following is a list of changes under consideration:
// - documentation
// - self-contained syntax documentation
// - more practical examples
// - move TODOs to issue tracker (eventually)
// - syntax
// - reconsider valid escape sequences
// (gitconfig doesn't support \r in value, \t in subsection name, etc.)
// - reading / parsing gcfg files
// - define internal representation structure
// - support multiple inputs (readers, strings, files)
// - support declaring encoding (?)
// - support varying fields sets for subsections (?)
// - writing gcfg files
// - error handling
// - make error context accessible programmatically?
// - limit input size?
//
package gcfg // import "gopkg.in/gcfg.v1"

132
vendor/gopkg.in/gcfg.v1/example_test.go generated vendored Normal file
View File

@ -0,0 +1,132 @@
package gcfg_test
import (
"fmt"
"log"
)
import "gopkg.in/gcfg.v1"
func ExampleReadStringInto() {
cfgStr := `; Comment line
[section]
name=value # comment`
cfg := struct {
Section struct {
Name string
}
}{}
err := gcfg.ReadStringInto(&cfg, cfgStr)
if err != nil {
log.Fatalf("Failed to parse gcfg data: %s", err)
}
fmt.Println(cfg.Section.Name)
// Output: value
}
func ExampleReadStringInto_bool() {
cfgStr := `; Comment line
[section]
switch=on`
cfg := struct {
Section struct {
Switch bool
}
}{}
err := gcfg.ReadStringInto(&cfg, cfgStr)
if err != nil {
log.Fatalf("Failed to parse gcfg data: %s", err)
}
fmt.Println(cfg.Section.Switch)
// Output: true
}
func ExampleReadStringInto_hyphens() {
cfgStr := `; Comment line
[section-name]
variable-name=value # comment`
cfg := struct {
Section_Name struct {
Variable_Name string
}
}{}
err := gcfg.ReadStringInto(&cfg, cfgStr)
if err != nil {
log.Fatalf("Failed to parse gcfg data: %s", err)
}
fmt.Println(cfg.Section_Name.Variable_Name)
// Output: value
}
func ExampleReadStringInto_tags() {
cfgStr := `; Comment line
[section]
var-name=value # comment`
cfg := struct {
Section struct {
FieldName string `gcfg:"var-name"`
}
}{}
err := gcfg.ReadStringInto(&cfg, cfgStr)
if err != nil {
log.Fatalf("Failed to parse gcfg data: %s", err)
}
fmt.Println(cfg.Section.FieldName)
// Output: value
}
func ExampleReadStringInto_subsections() {
cfgStr := `; Comment line
[profile "A"]
color = white
[profile "B"]
color = black
`
cfg := struct {
Profile map[string]*struct {
Color string
}
}{}
err := gcfg.ReadStringInto(&cfg, cfgStr)
if err != nil {
log.Fatalf("Failed to parse gcfg data: %s", err)
}
fmt.Printf("%s %s\n", cfg.Profile["A"].Color, cfg.Profile["B"].Color)
// Output: white black
}
func ExampleReadStringInto_multivalue() {
cfgStr := `; Comment line
[section]
multi=value1
multi=value2`
cfg := struct {
Section struct {
Multi []string
}
}{}
err := gcfg.ReadStringInto(&cfg, cfgStr)
if err != nil {
log.Fatalf("Failed to parse gcfg data: %s", err)
}
fmt.Println(cfg.Section.Multi)
// Output: [value1 value2]
}
func ExampleReadStringInto_unicode() {
cfgStr := `; Comment line
[]
= # comment`
cfg := struct {
X甲 struct {
X乙 string
}
}{}
err := gcfg.ReadStringInto(&cfg, cfgStr)
if err != nil {
log.Fatalf("Failed to parse gcfg data: %s", err)
}
fmt.Println(cfg.X甲.X乙)
// Output: 丙
}

7
vendor/gopkg.in/gcfg.v1/go1_0.go generated vendored Normal file
View File

@ -0,0 +1,7 @@
// +build !go1.2
package gcfg
type textUnmarshaler interface {
UnmarshalText(text []byte) error
}

9
vendor/gopkg.in/gcfg.v1/go1_2.go generated vendored Normal file
View File

@ -0,0 +1,9 @@
// +build go1.2
package gcfg
import (
"encoding"
)
type textUnmarshaler encoding.TextUnmarshaler

63
vendor/gopkg.in/gcfg.v1/issues_test.go generated vendored Normal file
View File

@ -0,0 +1,63 @@
package gcfg
import (
"fmt"
"math/big"
"strings"
"testing"
)
type Config1 struct {
Section struct {
Int int
BigInt big.Int
}
}
var testsIssue1 = []struct {
cfg string
typename string
}{
{"[section]\nint=X", "int"},
{"[section]\nint=", "int"},
{"[section]\nint=1A", "int"},
{"[section]\nbigint=X", "big.Int"},
{"[section]\nbigint=", "big.Int"},
{"[section]\nbigint=1A", "big.Int"},
}
// Value parse error should:
// - include plain type name
// - not include reflect internals
func TestIssue1(t *testing.T) {
for i, tt := range testsIssue1 {
var c Config1
err := ReadStringInto(&c, tt.cfg)
switch {
case err == nil:
t.Errorf("%d fail: got ok; wanted error", i)
case !strings.Contains(err.Error(), tt.typename):
t.Errorf("%d fail: error message doesn't contain type name %q: %v",
i, tt.typename, err)
case strings.Contains(err.Error(), "reflect"):
t.Errorf("%d fail: error message includes reflect internals: %v",
i, err)
default:
t.Logf("%d pass: %v", i, err)
}
}
}
type confIssue2 struct{ Main struct{ Foo string } }
var testsIssue2 = []readtest{
{"[main]\n;\nfoo = bar\n", &confIssue2{struct{ Foo string }{"bar"}}, true},
{"[main]\r\n;\r\nfoo = bar\r\n", &confIssue2{struct{ Foo string }{"bar"}}, true},
}
func TestIssue2(t *testing.T) {
for i, tt := range testsIssue2 {
id := fmt.Sprintf("issue2:%d", i)
testRead(t, id, tt)
}
}

188
vendor/gopkg.in/gcfg.v1/read.go generated vendored Normal file
View File

@ -0,0 +1,188 @@
package gcfg
import (
"fmt"
"io"
"io/ioutil"
"os"
"strings"
)
import (
"gopkg.in/gcfg.v1/scanner"
"gopkg.in/gcfg.v1/token"
)
var unescape = map[rune]rune{'\\': '\\', '"': '"', 'n': '\n', 't': '\t'}
// no error: invalid literals should be caught by scanner
func unquote(s string) string {
u, q, esc := make([]rune, 0, len(s)), false, false
for _, c := range s {
if esc {
uc, ok := unescape[c]
switch {
case ok:
u = append(u, uc)
fallthrough
case !q && c == '\n':
esc = false
continue
}
panic("invalid escape sequence")
}
switch c {
case '"':
q = !q
case '\\':
esc = true
default:
u = append(u, c)
}
}
if q {
panic("missing end quote")
}
if esc {
panic("invalid escape sequence")
}
return string(u)
}
func readInto(config interface{}, fset *token.FileSet, file *token.File, src []byte) error {
var s scanner.Scanner
var errs scanner.ErrorList
s.Init(file, src, func(p token.Position, m string) { errs.Add(p, m) }, 0)
sect, sectsub := "", ""
pos, tok, lit := s.Scan()
errfn := func(msg string) error {
return fmt.Errorf("%s: %s", fset.Position(pos), msg)
}
for {
if errs.Len() > 0 {
return errs.Err()
}
switch tok {
case token.EOF:
return nil
case token.EOL, token.COMMENT:
pos, tok, lit = s.Scan()
case token.LBRACK:
pos, tok, lit = s.Scan()
if errs.Len() > 0 {
return errs.Err()
}
if tok != token.IDENT {
return errfn("expected section name")
}
sect, sectsub = lit, ""
pos, tok, lit = s.Scan()
if errs.Len() > 0 {
return errs.Err()
}
if tok == token.STRING {
sectsub = unquote(lit)
if sectsub == "" {
return errfn("empty subsection name")
}
pos, tok, lit = s.Scan()
if errs.Len() > 0 {
return errs.Err()
}
}
if tok != token.RBRACK {
if sectsub == "" {
return errfn("expected subsection name or right bracket")
}
return errfn("expected right bracket")
}
pos, tok, lit = s.Scan()
if tok != token.EOL && tok != token.EOF && tok != token.COMMENT {
return errfn("expected EOL, EOF, or comment")
}
// If a section/subsection header was found, ensure a
// container object is created, even if there are no
// variables further down.
err := set(config, sect, sectsub, "", true, "")
if err != nil {
return err
}
case token.IDENT:
if sect == "" {
return errfn("expected section header")
}
n := lit
pos, tok, lit = s.Scan()
if errs.Len() > 0 {
return errs.Err()
}
blank, v := tok == token.EOF || tok == token.EOL || tok == token.COMMENT, ""
if !blank {
if tok != token.ASSIGN {
return errfn("expected '='")
}
pos, tok, lit = s.Scan()
if errs.Len() > 0 {
return errs.Err()
}
if tok != token.STRING {
return errfn("expected value")
}
v = unquote(lit)
pos, tok, lit = s.Scan()
if errs.Len() > 0 {
return errs.Err()
}
if tok != token.EOL && tok != token.EOF && tok != token.COMMENT {
return errfn("expected EOL, EOF, or comment")
}
}
err := set(config, sect, sectsub, n, blank, v)
if err != nil {
return err
}
default:
if sect == "" {
return errfn("expected section header")
}
return errfn("expected section header or variable declaration")
}
}
panic("never reached")
}
// ReadInto reads gcfg formatted data from reader and sets the values into the
// corresponding fields in config.
func ReadInto(config interface{}, reader io.Reader) error {
src, err := ioutil.ReadAll(reader)
if err != nil {
return err
}
fset := token.NewFileSet()
file := fset.AddFile("", fset.Base(), len(src))
return readInto(config, fset, file, src)
}
// ReadStringInto reads gcfg formatted data from str and sets the values into
// the corresponding fields in config.
func ReadStringInto(config interface{}, str string) error {
r := strings.NewReader(str)
return ReadInto(config, r)
}
// ReadFileInto reads gcfg formatted data from the file filename and sets the
// values into the corresponding fields in config.
func ReadFileInto(config interface{}, filename string) error {
f, err := os.Open(filename)
if err != nil {
return err
}
defer f.Close()
src, err := ioutil.ReadAll(f)
if err != nil {
return err
}
fset := token.NewFileSet()
file := fset.AddFile(filename, fset.Base(), len(src))
return readInto(config, fset, file, src)
}

338
vendor/gopkg.in/gcfg.v1/read_test.go generated vendored Normal file
View File

@ -0,0 +1,338 @@
package gcfg
import (
"fmt"
"math/big"
"os"
"reflect"
"testing"
)
const (
// 64 spaces
sp64 = " "
// 512 spaces
sp512 = sp64 + sp64 + sp64 + sp64 + sp64 + sp64 + sp64 + sp64
// 4096 spaces
sp4096 = sp512 + sp512 + sp512 + sp512 + sp512 + sp512 + sp512 + sp512
)
type cBasic struct {
Section cBasicS1
Hyphen_In_Section cBasicS2
unexported cBasicS1
Exported cBasicS3
TagName cBasicS1 `gcfg:"tag-name"`
}
type cBasicS1 struct {
Name string
Int int
PName *string
}
type cBasicS2 struct {
Hyphen_In_Name string
}
type cBasicS3 struct {
unexported string
}
type nonMulti []string
type unmarshalable string
func (u *unmarshalable) UnmarshalText(text []byte) error {
s := string(text)
if s == "error" {
return fmt.Errorf("%s", s)
}
*u = unmarshalable(s)
return nil
}
var _ textUnmarshaler = new(unmarshalable)
type cUni struct {
X甲 cUniS1
XSection cUniS2
}
type cUniS1 struct {
X乙 string
}
type cUniS2 struct {
XName string
}
type cMulti struct {
M1 cMultiS1
M2 cMultiS2
M3 cMultiS3
}
type cMultiS1 struct{ Multi []string }
type cMultiS2 struct{ NonMulti nonMulti }
type cMultiS3 struct{ PMulti *[]string }
type cSubs struct{ Sub map[string]*cSubsS1 }
type cSubsS1 struct{ Name string }
type cBool struct{ Section cBoolS1 }
type cBoolS1 struct{ Bool bool }
type cTxUnm struct{ Section cTxUnmS1 }
type cTxUnmS1 struct{ Name unmarshalable }
type cNum struct {
N1 cNumS1
N2 cNumS2
N3 cNumS3
}
type cNumS1 struct {
Int int
IntDHO int `gcfg:",int=dho"`
Big *big.Int
}
type cNumS2 struct {
MultiInt []int
MultiBig []*big.Int
}
type cNumS3 struct{ FileMode os.FileMode }
type readtest struct {
gcfg string
exp interface{}
ok bool
}
func newString(s string) *string { return &s }
func newStringSlice(s ...string) *[]string { return &s }
var readtests = []struct {
group string
tests []readtest
}{{"scanning", []readtest{
{"[section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
// hyphen in name
{"[hyphen-in-section]\nhyphen-in-name=value", &cBasic{Hyphen_In_Section: cBasicS2{Hyphen_In_Name: "value"}}, true},
// quoted string value
{"[section]\nname=\"\"", &cBasic{Section: cBasicS1{Name: ""}}, true},
{"[section]\nname=\" \"", &cBasic{Section: cBasicS1{Name: " "}}, true},
{"[section]\nname=\"value\"", &cBasic{Section: cBasicS1{Name: "value"}}, true},
{"[section]\nname=\" value \"", &cBasic{Section: cBasicS1{Name: " value "}}, true},
{"\n[section]\nname=\"va ; lue\"", &cBasic{Section: cBasicS1{Name: "va ; lue"}}, true},
{"[section]\nname=\"val\" \"ue\"", &cBasic{Section: cBasicS1{Name: "val ue"}}, true},
{"[section]\nname=\"value", &cBasic{}, false},
// escape sequences
{"[section]\nname=\"va\\\\lue\"", &cBasic{Section: cBasicS1{Name: "va\\lue"}}, true},
{"[section]\nname=\"va\\\"lue\"", &cBasic{Section: cBasicS1{Name: "va\"lue"}}, true},
{"[section]\nname=\"va\\nlue\"", &cBasic{Section: cBasicS1{Name: "va\nlue"}}, true},
{"[section]\nname=\"va\\tlue\"", &cBasic{Section: cBasicS1{Name: "va\tlue"}}, true},
{"\n[section]\nname=\\", &cBasic{}, false},
{"\n[section]\nname=\\a", &cBasic{}, false},
{"\n[section]\nname=\"val\\a\"", &cBasic{}, false},
{"\n[section]\nname=val\\", &cBasic{}, false},
{"\n[sub \"A\\\n\"]\nname=value", &cSubs{}, false},
{"\n[sub \"A\\\t\"]\nname=value", &cSubs{}, false},
// broken line
{"[section]\nname=value \\\n value", &cBasic{Section: cBasicS1{Name: "value value"}}, true},
{"[section]\nname=\"value \\\n value\"", &cBasic{}, false},
}}, {"scanning:whitespace", []readtest{
{" \n[section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
{" [section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
{"\t[section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
{"[ section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
{"[section ]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
{"[section]\n name=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
{"[section]\nname =value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
{"[section]\nname= value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
{"[section]\nname=value ", &cBasic{Section: cBasicS1{Name: "value"}}, true},
{"[section]\r\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
{"[section]\r\nname=value\r\n", &cBasic{Section: cBasicS1{Name: "value"}}, true},
{";cmnt\r\n[section]\r\nname=value\r\n", &cBasic{Section: cBasicS1{Name: "value"}}, true},
// long lines
{sp4096 + "[section]\nname=value\n", &cBasic{Section: cBasicS1{Name: "value"}}, true},
{"[" + sp4096 + "section]\nname=value\n", &cBasic{Section: cBasicS1{Name: "value"}}, true},
{"[section" + sp4096 + "]\nname=value\n", &cBasic{Section: cBasicS1{Name: "value"}}, true},
{"[section]" + sp4096 + "\nname=value\n", &cBasic{Section: cBasicS1{Name: "value"}}, true},
{"[section]\n" + sp4096 + "name=value\n", &cBasic{Section: cBasicS1{Name: "value"}}, true},
{"[section]\nname" + sp4096 + "=value\n", &cBasic{Section: cBasicS1{Name: "value"}}, true},
{"[section]\nname=" + sp4096 + "value\n", &cBasic{Section: cBasicS1{Name: "value"}}, true},
{"[section]\nname=value\n" + sp4096, &cBasic{Section: cBasicS1{Name: "value"}}, true},
}}, {"scanning:comments", []readtest{
{"; cmnt\n[section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
{"# cmnt\n[section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
{" ; cmnt\n[section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
{"\t; cmnt\n[section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
{"\n[section]; cmnt\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
{"\n[section] ; cmnt\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
{"\n[section]\nname=value; cmnt", &cBasic{Section: cBasicS1{Name: "value"}}, true},
{"\n[section]\nname=value ; cmnt", &cBasic{Section: cBasicS1{Name: "value"}}, true},
{"\n[section]\nname=\"value\" ; cmnt", &cBasic{Section: cBasicS1{Name: "value"}}, true},
{"\n[section]\nname=value ; \"cmnt", &cBasic{Section: cBasicS1{Name: "value"}}, true},
{"\n[section]\nname=\"va ; lue\" ; cmnt", &cBasic{Section: cBasicS1{Name: "va ; lue"}}, true},
{"\n[section]\nname=; cmnt", &cBasic{Section: cBasicS1{Name: ""}}, true},
}}, {"scanning:subsections", []readtest{
{"\n[sub \"A\"]\nname=value", &cSubs{map[string]*cSubsS1{"A": &cSubsS1{"value"}}}, true},
{"\n[sub \"b\"]\nname=value", &cSubs{map[string]*cSubsS1{"b": &cSubsS1{"value"}}}, true},
{"\n[sub \"A\\\\\"]\nname=value", &cSubs{map[string]*cSubsS1{"A\\": &cSubsS1{"value"}}}, true},
{"\n[sub \"A\\\"\"]\nname=value", &cSubs{map[string]*cSubsS1{"A\"": &cSubsS1{"value"}}}, true},
}}, {"syntax", []readtest{
// invalid line
{"\n[section]\n=", &cBasic{}, false},
// no section
{"name=value", &cBasic{}, false},
// empty section
{"\n[]\nname=value", &cBasic{}, false},
// empty subsection
{"\n[sub \"\"]\nname=value", &cSubs{}, false},
}}, {"setting", []readtest{
{"[section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
// pointer
{"[section]", &cBasic{Section: cBasicS1{PName: nil}}, true},
{"[section]\npname=value", &cBasic{Section: cBasicS1{PName: newString("value")}}, true},
{"[m3]", &cMulti{M3: cMultiS3{PMulti: nil}}, true},
{"[m3]\npmulti", &cMulti{M3: cMultiS3{PMulti: newStringSlice()}}, true},
{"[m3]\npmulti=value", &cMulti{M3: cMultiS3{PMulti: newStringSlice("value")}}, true},
{"[m3]\npmulti=value1\npmulti=value2", &cMulti{M3: cMultiS3{PMulti: newStringSlice("value1", "value2")}}, true},
// section name not matched
{"\n[nonexistent]\nname=value", &cBasic{}, false},
// subsection name not matched
{"\n[section \"nonexistent\"]\nname=value", &cBasic{}, false},
// variable name not matched
{"\n[section]\nnonexistent=value", &cBasic{}, false},
// hyphen in name
{"[hyphen-in-section]\nhyphen-in-name=value", &cBasic{Hyphen_In_Section: cBasicS2{Hyphen_In_Name: "value"}}, true},
// ignore unexported fields
{"[unexported]\nname=value", &cBasic{}, false},
{"[exported]\nunexported=value", &cBasic{}, false},
// 'X' prefix for non-upper/lower-case letters
{"[甲]\n乙=丙", &cUni{X甲: cUniS1{X乙: "丙"}}, true},
//{"[section]\nxname=value", &cBasic{XSection: cBasicS4{XName: "value"}}, false},
//{"[xsection]\nname=value", &cBasic{XSection: cBasicS4{XName: "value"}}, false},
// name specified as struct tag
{"[tag-name]\nname=value", &cBasic{TagName: cBasicS1{Name: "value"}}, true},
// empty subsections
{"\n[sub \"A\"]\n[sub \"B\"]", &cSubs{map[string]*cSubsS1{"A": &cSubsS1{}, "B": &cSubsS1{}}}, true},
}}, {"multivalue", []readtest{
// unnamed slice type: treat as multi-value
{"\n[m1]", &cMulti{M1: cMultiS1{}}, true},
{"\n[m1]\nmulti=value", &cMulti{M1: cMultiS1{[]string{"value"}}}, true},
{"\n[m1]\nmulti=value1\nmulti=value2", &cMulti{M1: cMultiS1{[]string{"value1", "value2"}}}, true},
// "blank" empties multi-valued slice -- here same result as above
{"\n[m1]\nmulti\nmulti=value1\nmulti=value2", &cMulti{M1: cMultiS1{[]string{"value1", "value2"}}}, true},
// named slice type: do not treat as multi-value
{"\n[m2]", &cMulti{}, true},
{"\n[m2]\nmulti=value", &cMulti{}, false},
{"\n[m2]\nmulti=value1\nmulti=value2", &cMulti{}, false},
}}, {"type:string", []readtest{
{"[section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
{"[section]\nname=", &cBasic{Section: cBasicS1{Name: ""}}, true},
}}, {"type:bool", []readtest{
// explicit values
{"[section]\nbool=true", &cBool{cBoolS1{true}}, true},
{"[section]\nbool=yes", &cBool{cBoolS1{true}}, true},
{"[section]\nbool=on", &cBool{cBoolS1{true}}, true},
{"[section]\nbool=1", &cBool{cBoolS1{true}}, true},
{"[section]\nbool=tRuE", &cBool{cBoolS1{true}}, true},
{"[section]\nbool=false", &cBool{cBoolS1{false}}, true},
{"[section]\nbool=no", &cBool{cBoolS1{false}}, true},
{"[section]\nbool=off", &cBool{cBoolS1{false}}, true},
{"[section]\nbool=0", &cBool{cBoolS1{false}}, true},
{"[section]\nbool=NO", &cBool{cBoolS1{false}}, true},
// "blank" value handled as true
{"[section]\nbool", &cBool{cBoolS1{true}}, true},
// bool parse errors
{"[section]\nbool=maybe", &cBool{}, false},
{"[section]\nbool=t", &cBool{}, false},
{"[section]\nbool=truer", &cBool{}, false},
{"[section]\nbool=2", &cBool{}, false},
{"[section]\nbool=-1", &cBool{}, false},
}}, {"type:numeric", []readtest{
{"[section]\nint=0", &cBasic{Section: cBasicS1{Int: 0}}, true},
{"[section]\nint=1", &cBasic{Section: cBasicS1{Int: 1}}, true},
{"[section]\nint=-1", &cBasic{Section: cBasicS1{Int: -1}}, true},
{"[section]\nint=0.2", &cBasic{}, false},
{"[section]\nint=1e3", &cBasic{}, false},
// primitive [u]int(|8|16|32|64) and big.Int is parsed as dec or hex (not octal)
{"[n1]\nint=010", &cNum{N1: cNumS1{Int: 10}}, true},
{"[n1]\nint=0x10", &cNum{N1: cNumS1{Int: 0x10}}, true},
{"[n1]\nbig=1", &cNum{N1: cNumS1{Big: big.NewInt(1)}}, true},
{"[n1]\nbig=0x10", &cNum{N1: cNumS1{Big: big.NewInt(0x10)}}, true},
{"[n1]\nbig=010", &cNum{N1: cNumS1{Big: big.NewInt(10)}}, true},
{"[n2]\nmultiint=010", &cNum{N2: cNumS2{MultiInt: []int{10}}}, true},
{"[n2]\nmultibig=010", &cNum{N2: cNumS2{MultiBig: []*big.Int{big.NewInt(10)}}}, true},
// set parse mode for int types via struct tag
{"[n1]\nintdho=010", &cNum{N1: cNumS1{IntDHO: 010}}, true},
// octal allowed for named type
{"[n3]\nfilemode=0777", &cNum{N3: cNumS3{FileMode: 0777}}, true},
}}, {"type:textUnmarshaler", []readtest{
{"[section]\nname=value", &cTxUnm{Section: cTxUnmS1{Name: "value"}}, true},
{"[section]\nname=error", &cTxUnm{}, false},
}},
}
func TestReadStringInto(t *testing.T) {
for _, tg := range readtests {
for i, tt := range tg.tests {
id := fmt.Sprintf("%s:%d", tg.group, i)
testRead(t, id, tt)
}
}
}
func TestReadStringIntoMultiBlankPreset(t *testing.T) {
tt := readtest{"\n[m1]\nmulti\nmulti=value1\nmulti=value2", &cMulti{M1: cMultiS1{[]string{"value1", "value2"}}}, true}
cfg := &cMulti{M1: cMultiS1{[]string{"preset1", "preset2"}}}
testReadInto(t, "multi:blank", tt, cfg)
}
func testRead(t *testing.T, id string, tt readtest) {
// get the type of the expected result
restyp := reflect.TypeOf(tt.exp).Elem()
// create a new instance to hold the actual result
res := reflect.New(restyp).Interface()
testReadInto(t, id, tt, res)
}
func testReadInto(t *testing.T, id string, tt readtest, res interface{}) {
err := ReadStringInto(res, tt.gcfg)
if tt.ok {
if err != nil {
t.Errorf("%s fail: got error %v, wanted ok", id, err)
return
} else if !reflect.DeepEqual(res, tt.exp) {
t.Errorf("%s fail: got value %#v, wanted value %#v", id, res, tt.exp)
return
}
if !testing.Short() {
t.Logf("%s pass: got value %#v", id, res)
}
} else { // !tt.ok
if err == nil {
t.Errorf("%s fail: got value %#v, wanted error", id, res)
return
}
if !testing.Short() {
t.Logf("%s pass: got error %v", id, err)
}
}
}
func TestReadFileInto(t *testing.T) {
res := &struct{ Section struct{ Name string } }{}
err := ReadFileInto(res, "testdata/gcfg_test.gcfg")
if err != nil {
t.Errorf(err.Error())
}
if "value" != res.Section.Name {
t.Errorf("got %q, wanted %q", res.Section.Name, "value")
}
}
func TestReadFileIntoUnicode(t *testing.T) {
res := &struct{ X甲 struct{ X乙 string } }{}
err := ReadFileInto(res, "testdata/gcfg_unicode_test.gcfg")
if err != nil {
t.Errorf(err.Error())
}
if "丙" != res.X甲.X乙 {
t.Errorf("got %q, wanted %q", res.X甲.X乙, "丙")
}
}

121
vendor/gopkg.in/gcfg.v1/scanner/errors.go generated vendored Normal file
View File

@ -0,0 +1,121 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package scanner
import (
"fmt"
"io"
"sort"
)
import (
"gopkg.in/gcfg.v1/token"
)
// In an ErrorList, an error is represented by an *Error.
// The position Pos, if valid, points to the beginning of
// the offending token, and the error condition is described
// by Msg.
//
type Error struct {
Pos token.Position
Msg string
}
// Error implements the error interface.
func (e Error) Error() string {
if e.Pos.Filename != "" || e.Pos.IsValid() {
// don't print "<unknown position>"
// TODO(gri) reconsider the semantics of Position.IsValid
return e.Pos.String() + ": " + e.Msg
}
return e.Msg
}
// ErrorList is a list of *Errors.
// The zero value for an ErrorList is an empty ErrorList ready to use.
//
type ErrorList []*Error
// Add adds an Error with given position and error message to an ErrorList.
func (p *ErrorList) Add(pos token.Position, msg string) {
*p = append(*p, &Error{pos, msg})
}
// Reset resets an ErrorList to no errors.
func (p *ErrorList) Reset() { *p = (*p)[0:0] }
// ErrorList implements the sort Interface.
func (p ErrorList) Len() int { return len(p) }
func (p ErrorList) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p ErrorList) Less(i, j int) bool {
e := &p[i].Pos
f := &p[j].Pos
if e.Filename < f.Filename {
return true
}
if e.Filename == f.Filename {
return e.Offset < f.Offset
}
return false
}
// Sort sorts an ErrorList. *Error entries are sorted by position,
// other errors are sorted by error message, and before any *Error
// entry.
//
func (p ErrorList) Sort() {
sort.Sort(p)
}
// RemoveMultiples sorts an ErrorList and removes all but the first error per line.
func (p *ErrorList) RemoveMultiples() {
sort.Sort(p)
var last token.Position // initial last.Line is != any legal error line
i := 0
for _, e := range *p {
if e.Pos.Filename != last.Filename || e.Pos.Line != last.Line {
last = e.Pos
(*p)[i] = e
i++
}
}
(*p) = (*p)[0:i]
}
// An ErrorList implements the error interface.
func (p ErrorList) Error() string {
switch len(p) {
case 0:
return "no errors"
case 1:
return p[0].Error()
}
return fmt.Sprintf("%s (and %d more errors)", p[0], len(p)-1)
}
// Err returns an error equivalent to this error list.
// If the list is empty, Err returns nil.
func (p ErrorList) Err() error {
if len(p) == 0 {
return nil
}
return p
}
// PrintError is a utility function that prints a list of errors to w,
// one error per line, if the err parameter is an ErrorList. Otherwise
// it prints the err string.
//
func PrintError(w io.Writer, err error) {
if list, ok := err.(ErrorList); ok {
for _, e := range list {
fmt.Fprintf(w, "%s\n", e)
}
} else if err != nil {
fmt.Fprintf(w, "%s\n", err)
}
}

46
vendor/gopkg.in/gcfg.v1/scanner/example_test.go generated vendored Normal file
View File

@ -0,0 +1,46 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package scanner_test
import (
"fmt"
)
import (
"gopkg.in/gcfg.v1/scanner"
"gopkg.in/gcfg.v1/token"
)
func ExampleScanner_Scan() {
// src is the input that we want to tokenize.
src := []byte(`[profile "A"]
color = blue ; Comment`)
// Initialize the scanner.
var s scanner.Scanner
fset := token.NewFileSet() // positions are relative to fset
file := fset.AddFile("", fset.Base(), len(src)) // register input "file"
s.Init(file, src, nil /* no error handler */, scanner.ScanComments)
// Repeated calls to Scan yield the token sequence found in the input.
for {
pos, tok, lit := s.Scan()
if tok == token.EOF {
break
}
fmt.Printf("%s\t%q\t%q\n", fset.Position(pos), tok, lit)
}
// output:
// 1:1 "[" ""
// 1:2 "IDENT" "profile"
// 1:10 "STRING" "\"A\""
// 1:13 "]" ""
// 1:14 "\n" ""
// 2:1 "IDENT" "color"
// 2:7 "=" ""
// 2:9 "STRING" "blue"
// 2:14 "COMMENT" "; Comment"
}

342
vendor/gopkg.in/gcfg.v1/scanner/scanner.go generated vendored Normal file
View File

@ -0,0 +1,342 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package scanner implements a scanner for gcfg configuration text.
// It takes a []byte as source which can then be tokenized
// through repeated calls to the Scan method.
//
// Note that the API for the scanner package may change to accommodate new
// features or implementation changes in gcfg.
//
package scanner
import (
"fmt"
"path/filepath"
"unicode"
"unicode/utf8"
)
import (
"gopkg.in/gcfg.v1/token"
)
// An ErrorHandler may be provided to Scanner.Init. If a syntax error is
// encountered and a handler was installed, the handler is called with a
// position and an error message. The position points to the beginning of
// the offending token.
//
type ErrorHandler func(pos token.Position, msg string)
// A Scanner holds the scanner's internal state while processing
// a given text. It can be allocated as part of another data
// structure but must be initialized via Init before use.
//
type Scanner struct {
// immutable state
file *token.File // source file handle
dir string // directory portion of file.Name()
src []byte // source
err ErrorHandler // error reporting; or nil
mode Mode // scanning mode
// scanning state
ch rune // current character
offset int // character offset
rdOffset int // reading offset (position after current character)
lineOffset int // current line offset
nextVal bool // next token is expected to be a value
// public state - ok to modify
ErrorCount int // number of errors encountered
}
// Read the next Unicode char into s.ch.
// s.ch < 0 means end-of-file.
//
func (s *Scanner) next() {
if s.rdOffset < len(s.src) {
s.offset = s.rdOffset
if s.ch == '\n' {
s.lineOffset = s.offset
s.file.AddLine(s.offset)
}
r, w := rune(s.src[s.rdOffset]), 1
switch {
case r == 0:
s.error(s.offset, "illegal character NUL")
case r >= 0x80:
// not ASCII
r, w = utf8.DecodeRune(s.src[s.rdOffset:])
if r == utf8.RuneError && w == 1 {
s.error(s.offset, "illegal UTF-8 encoding")
}
}
s.rdOffset += w
s.ch = r
} else {
s.offset = len(s.src)
if s.ch == '\n' {
s.lineOffset = s.offset
s.file.AddLine(s.offset)
}
s.ch = -1 // eof
}
}
// A mode value is a set of flags (or 0).
// They control scanner behavior.
//
type Mode uint
const (
ScanComments Mode = 1 << iota // return comments as COMMENT tokens
)
// Init prepares the scanner s to tokenize the text src by setting the
// scanner at the beginning of src. The scanner uses the file set file
// for position information and it adds line information for each line.
// It is ok to re-use the same file when re-scanning the same file as
// line information which is already present is ignored. Init causes a
// panic if the file size does not match the src size.
//
// Calls to Scan will invoke the error handler err if they encounter a
// syntax error and err is not nil. Also, for each error encountered,
// the Scanner field ErrorCount is incremented by one. The mode parameter
// determines how comments are handled.
//
// Note that Init may call err if there is an error in the first character
// of the file.
//
func (s *Scanner) Init(file *token.File, src []byte, err ErrorHandler, mode Mode) {
// Explicitly initialize all fields since a scanner may be reused.
if file.Size() != len(src) {
panic(fmt.Sprintf("file size (%d) does not match src len (%d)", file.Size(), len(src)))
}
s.file = file
s.dir, _ = filepath.Split(file.Name())
s.src = src
s.err = err
s.mode = mode
s.ch = ' '
s.offset = 0
s.rdOffset = 0
s.lineOffset = 0
s.ErrorCount = 0
s.nextVal = false
s.next()
}
func (s *Scanner) error(offs int, msg string) {
if s.err != nil {
s.err(s.file.Position(s.file.Pos(offs)), msg)
}
s.ErrorCount++
}
func (s *Scanner) scanComment() string {
// initial [;#] already consumed
offs := s.offset - 1 // position of initial [;#]
for s.ch != '\n' && s.ch >= 0 {
s.next()
}
return string(s.src[offs:s.offset])
}
func isLetter(ch rune) bool {
return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch >= 0x80 && unicode.IsLetter(ch)
}
func isDigit(ch rune) bool {
return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
}
func (s *Scanner) scanIdentifier() string {
offs := s.offset
for isLetter(s.ch) || isDigit(s.ch) || s.ch == '-' {
s.next()
}
return string(s.src[offs:s.offset])
}
func (s *Scanner) scanEscape(val bool) {
offs := s.offset
ch := s.ch
s.next() // always make progress
switch ch {
case '\\', '"':
// ok
case 'n', 't':
if val {
break // ok
}
fallthrough
default:
s.error(offs, "unknown escape sequence")
}
}
func (s *Scanner) scanString() string {
// '"' opening already consumed
offs := s.offset - 1
for s.ch != '"' {
ch := s.ch
s.next()
if ch == '\n' || ch < 0 {
s.error(offs, "string not terminated")
break
}
if ch == '\\' {
s.scanEscape(false)
}
}
s.next()
return string(s.src[offs:s.offset])
}
func stripCR(b []byte) []byte {
c := make([]byte, len(b))
i := 0
for _, ch := range b {
if ch != '\r' {
c[i] = ch
i++
}
}
return c[:i]
}
func (s *Scanner) scanValString() string {
offs := s.offset
hasCR := false
end := offs
inQuote := false
loop:
for inQuote || s.ch >= 0 && s.ch != '\n' && s.ch != ';' && s.ch != '#' {
ch := s.ch
s.next()
switch {
case inQuote && ch == '\\':
s.scanEscape(true)
case !inQuote && ch == '\\':
if s.ch == '\r' {
hasCR = true
s.next()
}
if s.ch != '\n' {
s.error(offs, "unquoted '\\' must be followed by new line")
break loop
}
s.next()
case ch == '"':
inQuote = !inQuote
case ch == '\r':
hasCR = true
case ch < 0 || inQuote && ch == '\n':
s.error(offs, "string not terminated")
break loop
}
if inQuote || !isWhiteSpace(ch) {
end = s.offset
}
}
lit := s.src[offs:end]
if hasCR {
lit = stripCR(lit)
}
return string(lit)
}
func isWhiteSpace(ch rune) bool {
return ch == ' ' || ch == '\t' || ch == '\r'
}
func (s *Scanner) skipWhitespace() {
for isWhiteSpace(s.ch) {
s.next()
}
}
// Scan scans the next token and returns the token position, the token,
// and its literal string if applicable. The source end is indicated by
// token.EOF.
//
// If the returned token is a literal (token.IDENT, token.STRING) or
// token.COMMENT, the literal string has the corresponding value.
//
// If the returned token is token.ILLEGAL, the literal string is the
// offending character.
//
// In all other cases, Scan returns an empty literal string.
//
// For more tolerant parsing, Scan will return a valid token if
// possible even if a syntax error was encountered. Thus, even
// if the resulting token sequence contains no illegal tokens,
// a client may not assume that no error occurred. Instead it
// must check the scanner's ErrorCount or the number of calls
// of the error handler, if there was one installed.
//
// Scan adds line information to the file added to the file
// set with Init. Token positions are relative to that file
// and thus relative to the file set.
//
func (s *Scanner) Scan() (pos token.Pos, tok token.Token, lit string) {
scanAgain:
s.skipWhitespace()
// current token start
pos = s.file.Pos(s.offset)
// determine token value
switch ch := s.ch; {
case s.nextVal:
lit = s.scanValString()
tok = token.STRING
s.nextVal = false
case isLetter(ch):
lit = s.scanIdentifier()
tok = token.IDENT
default:
s.next() // always make progress
switch ch {
case -1:
tok = token.EOF
case '\n':
tok = token.EOL
case '"':
tok = token.STRING
lit = s.scanString()
case '[':
tok = token.LBRACK
case ']':
tok = token.RBRACK
case ';', '#':
// comment
lit = s.scanComment()
if s.mode&ScanComments == 0 {
// skip comment
goto scanAgain
}
tok = token.COMMENT
case '=':
tok = token.ASSIGN
s.nextVal = true
default:
s.error(s.file.Offset(pos), fmt.Sprintf("illegal character %#U", ch))
tok = token.ILLEGAL
lit = string(ch)
}
}
return
}

417
vendor/gopkg.in/gcfg.v1/scanner/scanner_test.go generated vendored Normal file
View File

@ -0,0 +1,417 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package scanner
import (
"os"
"strings"
"testing"
)
import (
"gopkg.in/gcfg.v1/token"
)
var fset = token.NewFileSet()
const /* class */ (
special = iota
literal
operator
)
func tokenclass(tok token.Token) int {
switch {
case tok.IsLiteral():
return literal
case tok.IsOperator():
return operator
}
return special
}
type elt struct {
tok token.Token
lit string
class int
pre string
suf string
}
var tokens = [...]elt{
// Special tokens
{token.COMMENT, "; a comment", special, "", "\n"},
{token.COMMENT, "# a comment", special, "", "\n"},
// Operators and delimiters
{token.ASSIGN, "=", operator, "", "value"},
{token.LBRACK, "[", operator, "", ""},
{token.RBRACK, "]", operator, "", ""},
{token.EOL, "\n", operator, "", ""},
// Identifiers
{token.IDENT, "foobar", literal, "", ""},
{token.IDENT, "a۰۱۸", literal, "", ""},
{token.IDENT, "foo६४", literal, "", ""},
{token.IDENT, "bar", literal, "", ""},
{token.IDENT, "foo-bar", literal, "", ""},
{token.IDENT, "foo", literal, ";\n", ""},
// String literals (subsection names)
{token.STRING, `"foobar"`, literal, "", ""},
{token.STRING, `"\""`, literal, "", ""},
// String literals (values)
{token.STRING, `"\n"`, literal, "=", ""},
{token.STRING, `"foobar"`, literal, "=", ""},
{token.STRING, `"foo\nbar"`, literal, "=", ""},
{token.STRING, `"foo\"bar"`, literal, "=", ""},
{token.STRING, `"foo\\bar"`, literal, "=", ""},
{token.STRING, `"foobar"`, literal, "=", ""},
{token.STRING, `"foobar"`, literal, "= ", ""},
{token.STRING, `"foobar"`, literal, "=", "\n"},
{token.STRING, `"foobar"`, literal, "=", ";"},
{token.STRING, `"foobar"`, literal, "=", " ;"},
{token.STRING, `"foobar"`, literal, "=", "#"},
{token.STRING, `"foobar"`, literal, "=", " #"},
{token.STRING, "foobar", literal, "=", ""},
{token.STRING, "foobar", literal, "= ", ""},
{token.STRING, "foobar", literal, "=", " "},
{token.STRING, `"foo" "bar"`, literal, "=", " "},
{token.STRING, "foo\\\nbar", literal, "=", ""},
{token.STRING, "foo\\\r\nbar", literal, "=", ""},
}
const whitespace = " \t \n\n\n" // to separate tokens
var source = func() []byte {
var src []byte
for _, t := range tokens {
src = append(src, t.pre...)
src = append(src, t.lit...)
src = append(src, t.suf...)
src = append(src, whitespace...)
}
return src
}()
func newlineCount(s string) int {
n := 0
for i := 0; i < len(s); i++ {
if s[i] == '\n' {
n++
}
}
return n
}
func checkPos(t *testing.T, lit string, p token.Pos, expected token.Position) {
pos := fset.Position(p)
if pos.Filename != expected.Filename {
t.Errorf("bad filename for %q: got %s, expected %s", lit, pos.Filename, expected.Filename)
}
if pos.Offset != expected.Offset {
t.Errorf("bad position for %q: got %d, expected %d", lit, pos.Offset, expected.Offset)
}
if pos.Line != expected.Line {
t.Errorf("bad line for %q: got %d, expected %d", lit, pos.Line, expected.Line)
}
if pos.Column != expected.Column {
t.Errorf("bad column for %q: got %d, expected %d", lit, pos.Column, expected.Column)
}
}
// Verify that calling Scan() provides the correct results.
func TestScan(t *testing.T) {
// make source
src_linecount := newlineCount(string(source))
whitespace_linecount := newlineCount(whitespace)
index := 0
// error handler
eh := func(_ token.Position, msg string) {
t.Errorf("%d: error handler called (msg = %s)", index, msg)
}
// verify scan
var s Scanner
s.Init(fset.AddFile("", fset.Base(), len(source)), source, eh, ScanComments)
// epos is the expected position
epos := token.Position{
Filename: "",
Offset: 0,
Line: 1,
Column: 1,
}
for {
pos, tok, lit := s.Scan()
if lit == "" {
// no literal value for non-literal tokens
lit = tok.String()
}
e := elt{token.EOF, "", special, "", ""}
if index < len(tokens) {
e = tokens[index]
}
if tok == token.EOF {
lit = "<EOF>"
epos.Line = src_linecount
epos.Column = 2
}
if e.pre != "" && strings.ContainsRune("=;#", rune(e.pre[0])) {
epos.Column = 1
checkPos(t, lit, pos, epos)
var etok token.Token
if e.pre[0] == '=' {
etok = token.ASSIGN
} else {
etok = token.COMMENT
}
if tok != etok {
t.Errorf("bad token for %q: got %q, expected %q", lit, tok, etok)
}
pos, tok, lit = s.Scan()
}
epos.Offset += len(e.pre)
if tok != token.EOF {
epos.Column = 1 + len(e.pre)
}
if e.pre != "" && e.pre[len(e.pre)-1] == '\n' {
epos.Offset--
epos.Column--
checkPos(t, lit, pos, epos)
if tok != token.EOL {
t.Errorf("bad token for %q: got %q, expected %q", lit, tok, token.EOL)
}
epos.Line++
epos.Offset++
epos.Column = 1
pos, tok, lit = s.Scan()
}
checkPos(t, lit, pos, epos)
if tok != e.tok {
t.Errorf("bad token for %q: got %q, expected %q", lit, tok, e.tok)
}
if e.tok.IsLiteral() {
// no CRs in value string literals
elit := e.lit
if strings.ContainsRune(e.pre, '=') {
elit = string(stripCR([]byte(elit)))
epos.Offset += len(e.lit) - len(lit) // correct position
}
if lit != elit {
t.Errorf("bad literal for %q: got %q, expected %q", lit, lit, elit)
}
}
if tokenclass(tok) != e.class {
t.Errorf("bad class for %q: got %d, expected %d", lit, tokenclass(tok), e.class)
}
epos.Offset += len(lit) + len(e.suf) + len(whitespace)
epos.Line += newlineCount(lit) + newlineCount(e.suf) + whitespace_linecount
index++
if tok == token.EOF {
break
}
if e.suf == "value" {
pos, tok, lit = s.Scan()
if tok != token.STRING {
t.Errorf("bad token for %q: got %q, expected %q", lit, tok, token.STRING)
}
} else if strings.ContainsRune(e.suf, ';') || strings.ContainsRune(e.suf, '#') {
pos, tok, lit = s.Scan()
if tok != token.COMMENT {
t.Errorf("bad token for %q: got %q, expected %q", lit, tok, token.COMMENT)
}
}
// skip EOLs
for i := 0; i < whitespace_linecount+newlineCount(e.suf); i++ {
pos, tok, lit = s.Scan()
if tok != token.EOL {
t.Errorf("bad token for %q: got %q, expected %q", lit, tok, token.EOL)
}
}
}
if s.ErrorCount != 0 {
t.Errorf("found %d errors", s.ErrorCount)
}
}
func TestScanValStringEOF(t *testing.T) {
var s Scanner
src := "= value"
f := fset.AddFile("src", fset.Base(), len(src))
s.Init(f, []byte(src), nil, 0)
s.Scan() // =
s.Scan() // value
_, tok, _ := s.Scan() // EOF
if tok != token.EOF {
t.Errorf("bad token: got %s, expected %s", tok, token.EOF)
}
if s.ErrorCount > 0 {
t.Error("scanning error")
}
}
// Verify that initializing the same scanner more then once works correctly.
func TestInit(t *testing.T) {
var s Scanner
// 1st init
src1 := "\nname = value"
f1 := fset.AddFile("src1", fset.Base(), len(src1))
s.Init(f1, []byte(src1), nil, 0)
if f1.Size() != len(src1) {
t.Errorf("bad file size: got %d, expected %d", f1.Size(), len(src1))
}
s.Scan() // \n
s.Scan() // name
_, tok, _ := s.Scan() // =
if tok != token.ASSIGN {
t.Errorf("bad token: got %s, expected %s", tok, token.ASSIGN)
}
// 2nd init
src2 := "[section]"
f2 := fset.AddFile("src2", fset.Base(), len(src2))
s.Init(f2, []byte(src2), nil, 0)
if f2.Size() != len(src2) {
t.Errorf("bad file size: got %d, expected %d", f2.Size(), len(src2))
}
_, tok, _ = s.Scan() // [
if tok != token.LBRACK {
t.Errorf("bad token: got %s, expected %s", tok, token.LBRACK)
}
if s.ErrorCount != 0 {
t.Errorf("found %d errors", s.ErrorCount)
}
}
func TestStdErrorHandler(t *testing.T) {
const src = "@\n" + // illegal character, cause an error
"@ @\n" // two errors on the same line
var list ErrorList
eh := func(pos token.Position, msg string) { list.Add(pos, msg) }
var s Scanner
s.Init(fset.AddFile("File1", fset.Base(), len(src)), []byte(src), eh, 0)
for {
if _, tok, _ := s.Scan(); tok == token.EOF {
break
}
}
if len(list) != s.ErrorCount {
t.Errorf("found %d errors, expected %d", len(list), s.ErrorCount)
}
if len(list) != 3 {
t.Errorf("found %d raw errors, expected 3", len(list))
PrintError(os.Stderr, list)
}
list.Sort()
if len(list) != 3 {
t.Errorf("found %d sorted errors, expected 3", len(list))
PrintError(os.Stderr, list)
}
list.RemoveMultiples()
if len(list) != 2 {
t.Errorf("found %d one-per-line errors, expected 2", len(list))
PrintError(os.Stderr, list)
}
}
type errorCollector struct {
cnt int // number of errors encountered
msg string // last error message encountered
pos token.Position // last error position encountered
}
func checkError(t *testing.T, src string, tok token.Token, pos int, err string) {
var s Scanner
var h errorCollector
eh := func(pos token.Position, msg string) {
h.cnt++
h.msg = msg
h.pos = pos
}
s.Init(fset.AddFile("", fset.Base(), len(src)), []byte(src), eh, ScanComments)
if src[0] == '=' {
_, _, _ = s.Scan()
}
_, tok0, _ := s.Scan()
_, tok1, _ := s.Scan()
if tok0 != tok {
t.Errorf("%q: got %s, expected %s", src, tok0, tok)
}
if tok1 != token.EOF {
t.Errorf("%q: got %s, expected EOF", src, tok1)
}
cnt := 0
if err != "" {
cnt = 1
}
if h.cnt != cnt {
t.Errorf("%q: got cnt %d, expected %d", src, h.cnt, cnt)
}
if h.msg != err {
t.Errorf("%q: got msg %q, expected %q", src, h.msg, err)
}
if h.pos.Offset != pos {
t.Errorf("%q: got offset %d, expected %d", src, h.pos.Offset, pos)
}
}
var errors = []struct {
src string
tok token.Token
pos int
err string
}{
{"\a", token.ILLEGAL, 0, "illegal character U+0007"},
{"/", token.ILLEGAL, 0, "illegal character U+002F '/'"},
{"_", token.ILLEGAL, 0, "illegal character U+005F '_'"},
{``, token.ILLEGAL, 0, "illegal character U+2026 '…'"},
{`""`, token.STRING, 0, ""},
{`"`, token.STRING, 0, "string not terminated"},
{"\"\n", token.STRING, 0, "string not terminated"},
{`="`, token.STRING, 1, "string not terminated"},
{"=\"\n", token.STRING, 1, "string not terminated"},
{"=\\", token.STRING, 1, "unquoted '\\' must be followed by new line"},
{"=\\\r", token.STRING, 1, "unquoted '\\' must be followed by new line"},
{`"\z"`, token.STRING, 2, "unknown escape sequence"},
{`"\a"`, token.STRING, 2, "unknown escape sequence"},
{`"\b"`, token.STRING, 2, "unknown escape sequence"},
{`"\f"`, token.STRING, 2, "unknown escape sequence"},
{`"\r"`, token.STRING, 2, "unknown escape sequence"},
{`"\t"`, token.STRING, 2, "unknown escape sequence"},
{`"\v"`, token.STRING, 2, "unknown escape sequence"},
{`"\0"`, token.STRING, 2, "unknown escape sequence"},
}
func TestScanErrors(t *testing.T) {
for _, e := range errors {
checkError(t, e.src, e.tok, e.pos, e.err)
}
}
func BenchmarkScan(b *testing.B) {
b.StopTimer()
fset := token.NewFileSet()
file := fset.AddFile("", fset.Base(), len(source))
var s Scanner
b.StartTimer()
for i := b.N - 1; i >= 0; i-- {
s.Init(file, source, nil, ScanComments)
for {
_, tok, _ := s.Scan()
if tok == token.EOF {
break
}
}
}
}

293
vendor/gopkg.in/gcfg.v1/set.go generated vendored Normal file
View File

@ -0,0 +1,293 @@
package gcfg
import (
"fmt"
"math/big"
"reflect"
"strings"
"unicode"
"unicode/utf8"
"gopkg.in/gcfg.v1/types"
)
type tag struct {
ident string
intMode string
}
func newTag(ts string) tag {
t := tag{}
s := strings.Split(ts, ",")
t.ident = s[0]
for _, tse := range s[1:] {
if strings.HasPrefix(tse, "int=") {
t.intMode = tse[len("int="):]
}
}
return t
}
func fieldFold(v reflect.Value, name string) (reflect.Value, tag) {
var n string
r0, _ := utf8.DecodeRuneInString(name)
if unicode.IsLetter(r0) && !unicode.IsLower(r0) && !unicode.IsUpper(r0) {
n = "X"
}
n += strings.Replace(name, "-", "_", -1)
f, ok := v.Type().FieldByNameFunc(func(fieldName string) bool {
if !v.FieldByName(fieldName).CanSet() {
return false
}
f, _ := v.Type().FieldByName(fieldName)
t := newTag(f.Tag.Get("gcfg"))
if t.ident != "" {
return strings.EqualFold(t.ident, name)
}
return strings.EqualFold(n, fieldName)
})
if !ok {
return reflect.Value{}, tag{}
}
return v.FieldByName(f.Name), newTag(f.Tag.Get("gcfg"))
}
type setter func(destp interface{}, blank bool, val string, t tag) error
var errUnsupportedType = fmt.Errorf("unsupported type")
var errBlankUnsupported = fmt.Errorf("blank value not supported for type")
var setters = []setter{
typeSetter, textUnmarshalerSetter, kindSetter, scanSetter,
}
func textUnmarshalerSetter(d interface{}, blank bool, val string, t tag) error {
dtu, ok := d.(textUnmarshaler)
if !ok {
return errUnsupportedType
}
if blank {
return errBlankUnsupported
}
return dtu.UnmarshalText([]byte(val))
}
func boolSetter(d interface{}, blank bool, val string, t tag) error {
if blank {
reflect.ValueOf(d).Elem().Set(reflect.ValueOf(true))
return nil
}
b, err := types.ParseBool(val)
if err == nil {
reflect.ValueOf(d).Elem().Set(reflect.ValueOf(b))
}
return err
}
func intMode(mode string) types.IntMode {
var m types.IntMode
if strings.ContainsAny(mode, "dD") {
m |= types.Dec
}
if strings.ContainsAny(mode, "hH") {
m |= types.Hex
}
if strings.ContainsAny(mode, "oO") {
m |= types.Oct
}
return m
}
var typeModes = map[reflect.Type]types.IntMode{
reflect.TypeOf(int(0)): types.Dec | types.Hex,
reflect.TypeOf(int8(0)): types.Dec | types.Hex,
reflect.TypeOf(int16(0)): types.Dec | types.Hex,
reflect.TypeOf(int32(0)): types.Dec | types.Hex,
reflect.TypeOf(int64(0)): types.Dec | types.Hex,
reflect.TypeOf(uint(0)): types.Dec | types.Hex,
reflect.TypeOf(uint8(0)): types.Dec | types.Hex,
reflect.TypeOf(uint16(0)): types.Dec | types.Hex,
reflect.TypeOf(uint32(0)): types.Dec | types.Hex,
reflect.TypeOf(uint64(0)): types.Dec | types.Hex,
// use default mode (allow dec/hex/oct) for uintptr type
reflect.TypeOf(big.Int{}): types.Dec | types.Hex,
}
func intModeDefault(t reflect.Type) types.IntMode {
m, ok := typeModes[t]
if !ok {
m = types.Dec | types.Hex | types.Oct
}
return m
}
func intSetter(d interface{}, blank bool, val string, t tag) error {
if blank {
return errBlankUnsupported
}
mode := intMode(t.intMode)
if mode == 0 {
mode = intModeDefault(reflect.TypeOf(d).Elem())
}
return types.ParseInt(d, val, mode)
}
func stringSetter(d interface{}, blank bool, val string, t tag) error {
if blank {
return errBlankUnsupported
}
dsp, ok := d.(*string)
if !ok {
return errUnsupportedType
}
*dsp = val
return nil
}
var kindSetters = map[reflect.Kind]setter{
reflect.String: stringSetter,
reflect.Bool: boolSetter,
reflect.Int: intSetter,
reflect.Int8: intSetter,
reflect.Int16: intSetter,
reflect.Int32: intSetter,
reflect.Int64: intSetter,
reflect.Uint: intSetter,
reflect.Uint8: intSetter,
reflect.Uint16: intSetter,
reflect.Uint32: intSetter,
reflect.Uint64: intSetter,
reflect.Uintptr: intSetter,
}
var typeSetters = map[reflect.Type]setter{
reflect.TypeOf(big.Int{}): intSetter,
}
func typeSetter(d interface{}, blank bool, val string, tt tag) error {
t := reflect.ValueOf(d).Type().Elem()
setter, ok := typeSetters[t]
if !ok {
return errUnsupportedType
}
return setter(d, blank, val, tt)
}
func kindSetter(d interface{}, blank bool, val string, tt tag) error {
k := reflect.ValueOf(d).Type().Elem().Kind()
setter, ok := kindSetters[k]
if !ok {
return errUnsupportedType
}
return setter(d, blank, val, tt)
}
func scanSetter(d interface{}, blank bool, val string, tt tag) error {
if blank {
return errBlankUnsupported
}
return types.ScanFully(d, val, 'v')
}
func set(cfg interface{}, sect, sub, name string, blank bool, value string) error {
vPCfg := reflect.ValueOf(cfg)
if vPCfg.Kind() != reflect.Ptr || vPCfg.Elem().Kind() != reflect.Struct {
panic(fmt.Errorf("config must be a pointer to a struct"))
}
vCfg := vPCfg.Elem()
vSect, _ := fieldFold(vCfg, sect)
if !vSect.IsValid() {
return fmt.Errorf("invalid section: section %q", sect)
}
if vSect.Kind() == reflect.Map {
vst := vSect.Type()
if vst.Key().Kind() != reflect.String ||
vst.Elem().Kind() != reflect.Ptr ||
vst.Elem().Elem().Kind() != reflect.Struct {
panic(fmt.Errorf("map field for section must have string keys and "+
" pointer-to-struct values: section %q", sect))
}
if vSect.IsNil() {
vSect.Set(reflect.MakeMap(vst))
}
k := reflect.ValueOf(sub)
pv := vSect.MapIndex(k)
if !pv.IsValid() {
vType := vSect.Type().Elem().Elem()
pv = reflect.New(vType)
vSect.SetMapIndex(k, pv)
}
vSect = pv.Elem()
} else if vSect.Kind() != reflect.Struct {
panic(fmt.Errorf("field for section must be a map or a struct: "+
"section %q", sect))
} else if sub != "" {
return fmt.Errorf("invalid subsection: "+
"section %q subsection %q", sect, sub)
}
// Empty name is a special value, meaning that only the
// section/subsection object is to be created, with no values set.
if name == "" {
return nil
}
vVar, t := fieldFold(vSect, name)
if !vVar.IsValid() {
return fmt.Errorf("invalid variable: "+
"section %q subsection %q variable %q", sect, sub, name)
}
// vVal is either single-valued var, or newly allocated value within multi-valued var
var vVal reflect.Value
// multi-value if unnamed slice type
isMulti := vVar.Type().Name() == "" && vVar.Kind() == reflect.Slice ||
vVar.Type().Name() == "" && vVar.Kind() == reflect.Ptr && vVar.Type().Elem().Name() == "" && vVar.Type().Elem().Kind() == reflect.Slice
if isMulti && vVar.Kind() == reflect.Ptr {
if vVar.IsNil() {
vVar.Set(reflect.New(vVar.Type().Elem()))
}
vVar = vVar.Elem()
}
if isMulti && blank {
vVar.Set(reflect.Zero(vVar.Type()))
return nil
}
if isMulti {
vVal = reflect.New(vVar.Type().Elem()).Elem()
} else {
vVal = vVar
}
isDeref := vVal.Type().Name() == "" && vVal.Type().Kind() == reflect.Ptr
isNew := isDeref && vVal.IsNil()
// vAddr is address of value to set (dereferenced & allocated as needed)
var vAddr reflect.Value
switch {
case isNew:
vAddr = reflect.New(vVal.Type().Elem())
case isDeref && !isNew:
vAddr = vVal
default:
vAddr = vVal.Addr()
}
vAddrI := vAddr.Interface()
err, ok := error(nil), false
for _, s := range setters {
err = s(vAddrI, blank, value, t)
if err == nil {
ok = true
break
}
if err != errUnsupportedType {
return err
}
}
if !ok {
// in case all setters returned errUnsupportedType
return err
}
if isNew { // set reference if it was dereferenced and newly allocated
vVal.Set(vAddr)
}
if isMulti { // append if multi-valued
vVar.Set(reflect.Append(vVar, vVal))
}
return nil
}

3
vendor/gopkg.in/gcfg.v1/testdata/gcfg_test.gcfg generated vendored Normal file
View File

@ -0,0 +1,3 @@
; Comment line
[section]
name=value # comment

View File

@ -0,0 +1,3 @@
; Comment line
[甲]
乙=丙 # comment

435
vendor/gopkg.in/gcfg.v1/token/position.go generated vendored Normal file
View File

@ -0,0 +1,435 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// TODO(gri) consider making this a separate package outside the go directory.
package token
import (
"fmt"
"sort"
"sync"
)
// -----------------------------------------------------------------------------
// Positions
// Position describes an arbitrary source position
// including the file, line, and column location.
// A Position is valid if the line number is > 0.
//
type Position struct {
Filename string // filename, if any
Offset int // offset, starting at 0
Line int // line number, starting at 1
Column int // column number, starting at 1 (character count)
}
// IsValid returns true if the position is valid.
func (pos *Position) IsValid() bool { return pos.Line > 0 }
// String returns a string in one of several forms:
//
// file:line:column valid position with file name
// line:column valid position without file name
// file invalid position with file name
// - invalid position without file name
//
func (pos Position) String() string {
s := pos.Filename
if pos.IsValid() {
if s != "" {
s += ":"
}
s += fmt.Sprintf("%d:%d", pos.Line, pos.Column)
}
if s == "" {
s = "-"
}
return s
}
// Pos is a compact encoding of a source position within a file set.
// It can be converted into a Position for a more convenient, but much
// larger, representation.
//
// The Pos value for a given file is a number in the range [base, base+size],
// where base and size are specified when adding the file to the file set via
// AddFile.
//
// To create the Pos value for a specific source offset, first add
// the respective file to the current file set (via FileSet.AddFile)
// and then call File.Pos(offset) for that file. Given a Pos value p
// for a specific file set fset, the corresponding Position value is
// obtained by calling fset.Position(p).
//
// Pos values can be compared directly with the usual comparison operators:
// If two Pos values p and q are in the same file, comparing p and q is
// equivalent to comparing the respective source file offsets. If p and q
// are in different files, p < q is true if the file implied by p was added
// to the respective file set before the file implied by q.
//
type Pos int
// The zero value for Pos is NoPos; there is no file and line information
// associated with it, and NoPos().IsValid() is false. NoPos is always
// smaller than any other Pos value. The corresponding Position value
// for NoPos is the zero value for Position.
//
const NoPos Pos = 0
// IsValid returns true if the position is valid.
func (p Pos) IsValid() bool {
return p != NoPos
}
// -----------------------------------------------------------------------------
// File
// A File is a handle for a file belonging to a FileSet.
// A File has a name, size, and line offset table.
//
type File struct {
set *FileSet
name string // file name as provided to AddFile
base int // Pos value range for this file is [base...base+size]
size int // file size as provided to AddFile
// lines and infos are protected by set.mutex
lines []int
infos []lineInfo
}
// Name returns the file name of file f as registered with AddFile.
func (f *File) Name() string {
return f.name
}
// Base returns the base offset of file f as registered with AddFile.
func (f *File) Base() int {
return f.base
}
// Size returns the size of file f as registered with AddFile.
func (f *File) Size() int {
return f.size
}
// LineCount returns the number of lines in file f.
func (f *File) LineCount() int {
f.set.mutex.RLock()
n := len(f.lines)
f.set.mutex.RUnlock()
return n
}
// AddLine adds the line offset for a new line.
// The line offset must be larger than the offset for the previous line
// and smaller than the file size; otherwise the line offset is ignored.
//
func (f *File) AddLine(offset int) {
f.set.mutex.Lock()
if i := len(f.lines); (i == 0 || f.lines[i-1] < offset) && offset < f.size {
f.lines = append(f.lines, offset)
}
f.set.mutex.Unlock()
}
// SetLines sets the line offsets for a file and returns true if successful.
// The line offsets are the offsets of the first character of each line;
// for instance for the content "ab\nc\n" the line offsets are {0, 3}.
// An empty file has an empty line offset table.
// Each line offset must be larger than the offset for the previous line
// and smaller than the file size; otherwise SetLines fails and returns
// false.
//
func (f *File) SetLines(lines []int) bool {
// verify validity of lines table
size := f.size
for i, offset := range lines {
if i > 0 && offset <= lines[i-1] || size <= offset {
return false
}
}
// set lines table
f.set.mutex.Lock()
f.lines = lines
f.set.mutex.Unlock()
return true
}
// SetLinesForContent sets the line offsets for the given file content.
func (f *File) SetLinesForContent(content []byte) {
var lines []int
line := 0
for offset, b := range content {
if line >= 0 {
lines = append(lines, line)
}
line = -1
if b == '\n' {
line = offset + 1
}
}
// set lines table
f.set.mutex.Lock()
f.lines = lines
f.set.mutex.Unlock()
}
// A lineInfo object describes alternative file and line number
// information (such as provided via a //line comment in a .go
// file) for a given file offset.
type lineInfo struct {
// fields are exported to make them accessible to gob
Offset int
Filename string
Line int
}
// AddLineInfo adds alternative file and line number information for
// a given file offset. The offset must be larger than the offset for
// the previously added alternative line info and smaller than the
// file size; otherwise the information is ignored.
//
// AddLineInfo is typically used to register alternative position
// information for //line filename:line comments in source files.
//
func (f *File) AddLineInfo(offset int, filename string, line int) {
f.set.mutex.Lock()
if i := len(f.infos); i == 0 || f.infos[i-1].Offset < offset && offset < f.size {
f.infos = append(f.infos, lineInfo{offset, filename, line})
}
f.set.mutex.Unlock()
}
// Pos returns the Pos value for the given file offset;
// the offset must be <= f.Size().
// f.Pos(f.Offset(p)) == p.
//
func (f *File) Pos(offset int) Pos {
if offset > f.size {
panic("illegal file offset")
}
return Pos(f.base + offset)
}
// Offset returns the offset for the given file position p;
// p must be a valid Pos value in that file.
// f.Offset(f.Pos(offset)) == offset.
//
func (f *File) Offset(p Pos) int {
if int(p) < f.base || int(p) > f.base+f.size {
panic("illegal Pos value")
}
return int(p) - f.base
}
// Line returns the line number for the given file position p;
// p must be a Pos value in that file or NoPos.
//
func (f *File) Line(p Pos) int {
// TODO(gri) this can be implemented much more efficiently
return f.Position(p).Line
}
func searchLineInfos(a []lineInfo, x int) int {
return sort.Search(len(a), func(i int) bool { return a[i].Offset > x }) - 1
}
// info returns the file name, line, and column number for a file offset.
func (f *File) info(offset int) (filename string, line, column int) {
filename = f.name
if i := searchInts(f.lines, offset); i >= 0 {
line, column = i+1, offset-f.lines[i]+1
}
if len(f.infos) > 0 {
// almost no files have extra line infos
if i := searchLineInfos(f.infos, offset); i >= 0 {
alt := &f.infos[i]
filename = alt.Filename
if i := searchInts(f.lines, alt.Offset); i >= 0 {
line += alt.Line - i - 1
}
}
}
return
}
func (f *File) position(p Pos) (pos Position) {
offset := int(p) - f.base
pos.Offset = offset
pos.Filename, pos.Line, pos.Column = f.info(offset)
return
}
// Position returns the Position value for the given file position p;
// p must be a Pos value in that file or NoPos.
//
func (f *File) Position(p Pos) (pos Position) {
if p != NoPos {
if int(p) < f.base || int(p) > f.base+f.size {
panic("illegal Pos value")
}
pos = f.position(p)
}
return
}
// -----------------------------------------------------------------------------
// FileSet
// A FileSet represents a set of source files.
// Methods of file sets are synchronized; multiple goroutines
// may invoke them concurrently.
//
type FileSet struct {
mutex sync.RWMutex // protects the file set
base int // base offset for the next file
files []*File // list of files in the order added to the set
last *File // cache of last file looked up
}
// NewFileSet creates a new file set.
func NewFileSet() *FileSet {
s := new(FileSet)
s.base = 1 // 0 == NoPos
return s
}
// Base returns the minimum base offset that must be provided to
// AddFile when adding the next file.
//
func (s *FileSet) Base() int {
s.mutex.RLock()
b := s.base
s.mutex.RUnlock()
return b
}
// AddFile adds a new file with a given filename, base offset, and file size
// to the file set s and returns the file. Multiple files may have the same
// name. The base offset must not be smaller than the FileSet's Base(), and
// size must not be negative.
//
// Adding the file will set the file set's Base() value to base + size + 1
// as the minimum base value for the next file. The following relationship
// exists between a Pos value p for a given file offset offs:
//
// int(p) = base + offs
//
// with offs in the range [0, size] and thus p in the range [base, base+size].
// For convenience, File.Pos may be used to create file-specific position
// values from a file offset.
//
func (s *FileSet) AddFile(filename string, base, size int) *File {
s.mutex.Lock()
defer s.mutex.Unlock()
if base < s.base || size < 0 {
panic("illegal base or size")
}
// base >= s.base && size >= 0
f := &File{s, filename, base, size, []int{0}, nil}
base += size + 1 // +1 because EOF also has a position
if base < 0 {
panic("token.Pos offset overflow (> 2G of source code in file set)")
}
// add the file to the file set
s.base = base
s.files = append(s.files, f)
s.last = f
return f
}
// Iterate calls f for the files in the file set in the order they were added
// until f returns false.
//
func (s *FileSet) Iterate(f func(*File) bool) {
for i := 0; ; i++ {
var file *File
s.mutex.RLock()
if i < len(s.files) {
file = s.files[i]
}
s.mutex.RUnlock()
if file == nil || !f(file) {
break
}
}
}
func searchFiles(a []*File, x int) int {
return sort.Search(len(a), func(i int) bool { return a[i].base > x }) - 1
}
func (s *FileSet) file(p Pos) *File {
// common case: p is in last file
if f := s.last; f != nil && f.base <= int(p) && int(p) <= f.base+f.size {
return f
}
// p is not in last file - search all files
if i := searchFiles(s.files, int(p)); i >= 0 {
f := s.files[i]
// f.base <= int(p) by definition of searchFiles
if int(p) <= f.base+f.size {
s.last = f
return f
}
}
return nil
}
// File returns the file that contains the position p.
// If no such file is found (for instance for p == NoPos),
// the result is nil.
//
func (s *FileSet) File(p Pos) (f *File) {
if p != NoPos {
s.mutex.RLock()
f = s.file(p)
s.mutex.RUnlock()
}
return
}
// Position converts a Pos in the fileset into a general Position.
func (s *FileSet) Position(p Pos) (pos Position) {
if p != NoPos {
s.mutex.RLock()
if f := s.file(p); f != nil {
pos = f.position(p)
}
s.mutex.RUnlock()
}
return
}
// -----------------------------------------------------------------------------
// Helper functions
func searchInts(a []int, x int) int {
// This function body is a manually inlined version of:
//
// return sort.Search(len(a), func(i int) bool { return a[i] > x }) - 1
//
// With better compiler optimizations, this may not be needed in the
// future, but at the moment this change improves the go/printer
// benchmark performance by ~30%. This has a direct impact on the
// speed of gofmt and thus seems worthwhile (2011-04-29).
// TODO(gri): Remove this when compilers have caught up.
i, j := 0, len(a)
for i < j {
h := i + (j-i)/2 // avoid overflow when computing h
// i ≤ h < j
if a[h] <= x {
i = h + 1
} else {
j = h
}
}
return i - 1
}

181
vendor/gopkg.in/gcfg.v1/token/position_test.go generated vendored Normal file
View File

@ -0,0 +1,181 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package token
import (
"fmt"
"testing"
)
func checkPos(t *testing.T, msg string, p, q Position) {
if p.Filename != q.Filename {
t.Errorf("%s: expected filename = %q; got %q", msg, q.Filename, p.Filename)
}
if p.Offset != q.Offset {
t.Errorf("%s: expected offset = %d; got %d", msg, q.Offset, p.Offset)
}
if p.Line != q.Line {
t.Errorf("%s: expected line = %d; got %d", msg, q.Line, p.Line)
}
if p.Column != q.Column {
t.Errorf("%s: expected column = %d; got %d", msg, q.Column, p.Column)
}
}
func TestNoPos(t *testing.T) {
if NoPos.IsValid() {
t.Errorf("NoPos should not be valid")
}
var fset *FileSet
checkPos(t, "nil NoPos", fset.Position(NoPos), Position{})
fset = NewFileSet()
checkPos(t, "fset NoPos", fset.Position(NoPos), Position{})
}
var tests = []struct {
filename string
source []byte // may be nil
size int
lines []int
}{
{"a", []byte{}, 0, []int{}},
{"b", []byte("01234"), 5, []int{0}},
{"c", []byte("\n\n\n\n\n\n\n\n\n"), 9, []int{0, 1, 2, 3, 4, 5, 6, 7, 8}},
{"d", nil, 100, []int{0, 5, 10, 20, 30, 70, 71, 72, 80, 85, 90, 99}},
{"e", nil, 777, []int{0, 80, 100, 120, 130, 180, 267, 455, 500, 567, 620}},
{"f", []byte("package p\n\nimport \"fmt\""), 23, []int{0, 10, 11}},
{"g", []byte("package p\n\nimport \"fmt\"\n"), 24, []int{0, 10, 11}},
{"h", []byte("package p\n\nimport \"fmt\"\n "), 25, []int{0, 10, 11, 24}},
}
func linecol(lines []int, offs int) (int, int) {
prevLineOffs := 0
for line, lineOffs := range lines {
if offs < lineOffs {
return line, offs - prevLineOffs + 1
}
prevLineOffs = lineOffs
}
return len(lines), offs - prevLineOffs + 1
}
func verifyPositions(t *testing.T, fset *FileSet, f *File, lines []int) {
for offs := 0; offs < f.Size(); offs++ {
p := f.Pos(offs)
offs2 := f.Offset(p)
if offs2 != offs {
t.Errorf("%s, Offset: expected offset %d; got %d", f.Name(), offs, offs2)
}
line, col := linecol(lines, offs)
msg := fmt.Sprintf("%s (offs = %d, p = %d)", f.Name(), offs, p)
checkPos(t, msg, f.Position(f.Pos(offs)), Position{f.Name(), offs, line, col})
checkPos(t, msg, fset.Position(p), Position{f.Name(), offs, line, col})
}
}
func makeTestSource(size int, lines []int) []byte {
src := make([]byte, size)
for _, offs := range lines {
if offs > 0 {
src[offs-1] = '\n'
}
}
return src
}
func TestPositions(t *testing.T) {
const delta = 7 // a non-zero base offset increment
fset := NewFileSet()
for _, test := range tests {
// verify consistency of test case
if test.source != nil && len(test.source) != test.size {
t.Errorf("%s: inconsistent test case: expected file size %d; got %d", test.filename, test.size, len(test.source))
}
// add file and verify name and size
f := fset.AddFile(test.filename, fset.Base()+delta, test.size)
if f.Name() != test.filename {
t.Errorf("expected filename %q; got %q", test.filename, f.Name())
}
if f.Size() != test.size {
t.Errorf("%s: expected file size %d; got %d", f.Name(), test.size, f.Size())
}
if fset.File(f.Pos(0)) != f {
t.Errorf("%s: f.Pos(0) was not found in f", f.Name())
}
// add lines individually and verify all positions
for i, offset := range test.lines {
f.AddLine(offset)
if f.LineCount() != i+1 {
t.Errorf("%s, AddLine: expected line count %d; got %d", f.Name(), i+1, f.LineCount())
}
// adding the same offset again should be ignored
f.AddLine(offset)
if f.LineCount() != i+1 {
t.Errorf("%s, AddLine: expected unchanged line count %d; got %d", f.Name(), i+1, f.LineCount())
}
verifyPositions(t, fset, f, test.lines[0:i+1])
}
// add lines with SetLines and verify all positions
if ok := f.SetLines(test.lines); !ok {
t.Errorf("%s: SetLines failed", f.Name())
}
if f.LineCount() != len(test.lines) {
t.Errorf("%s, SetLines: expected line count %d; got %d", f.Name(), len(test.lines), f.LineCount())
}
verifyPositions(t, fset, f, test.lines)
// add lines with SetLinesForContent and verify all positions
src := test.source
if src == nil {
// no test source available - create one from scratch
src = makeTestSource(test.size, test.lines)
}
f.SetLinesForContent(src)
if f.LineCount() != len(test.lines) {
t.Errorf("%s, SetLinesForContent: expected line count %d; got %d", f.Name(), len(test.lines), f.LineCount())
}
verifyPositions(t, fset, f, test.lines)
}
}
func TestLineInfo(t *testing.T) {
fset := NewFileSet()
f := fset.AddFile("foo", fset.Base(), 500)
lines := []int{0, 42, 77, 100, 210, 220, 277, 300, 333, 401}
// add lines individually and provide alternative line information
for _, offs := range lines {
f.AddLine(offs)
f.AddLineInfo(offs, "bar", 42)
}
// verify positions for all offsets
for offs := 0; offs <= f.Size(); offs++ {
p := f.Pos(offs)
_, col := linecol(lines, offs)
msg := fmt.Sprintf("%s (offs = %d, p = %d)", f.Name(), offs, p)
checkPos(t, msg, f.Position(f.Pos(offs)), Position{"bar", offs, 42, col})
checkPos(t, msg, fset.Position(p), Position{"bar", offs, 42, col})
}
}
func TestFiles(t *testing.T) {
fset := NewFileSet()
for i, test := range tests {
fset.AddFile(test.filename, fset.Base(), test.size)
j := 0
fset.Iterate(func(f *File) bool {
if f.Name() != tests[j].filename {
t.Errorf("expected filename = %s; got %s", tests[j].filename, f.Name())
}
j++
return true
})
if j != i+1 {
t.Errorf("expected %d files; got %d", i+1, j)
}
}
}

56
vendor/gopkg.in/gcfg.v1/token/serialize.go generated vendored Normal file
View File

@ -0,0 +1,56 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package token
type serializedFile struct {
// fields correspond 1:1 to fields with same (lower-case) name in File
Name string
Base int
Size int
Lines []int
Infos []lineInfo
}
type serializedFileSet struct {
Base int
Files []serializedFile
}
// Read calls decode to deserialize a file set into s; s must not be nil.
func (s *FileSet) Read(decode func(interface{}) error) error {
var ss serializedFileSet
if err := decode(&ss); err != nil {
return err
}
s.mutex.Lock()
s.base = ss.Base
files := make([]*File, len(ss.Files))
for i := 0; i < len(ss.Files); i++ {
f := &ss.Files[i]
files[i] = &File{s, f.Name, f.Base, f.Size, f.Lines, f.Infos}
}
s.files = files
s.last = nil
s.mutex.Unlock()
return nil
}
// Write calls encode to serialize the file set s.
func (s *FileSet) Write(encode func(interface{}) error) error {
var ss serializedFileSet
s.mutex.Lock()
ss.Base = s.base
files := make([]serializedFile, len(s.files))
for i, f := range s.files {
files[i] = serializedFile{f.name, f.base, f.size, f.lines, f.infos}
}
ss.Files = files
s.mutex.Unlock()
return encode(ss)
}

111
vendor/gopkg.in/gcfg.v1/token/serialize_test.go generated vendored Normal file
View File

@ -0,0 +1,111 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package token
import (
"bytes"
"encoding/gob"
"fmt"
"testing"
)
// equal returns nil if p and q describe the same file set;
// otherwise it returns an error describing the discrepancy.
func equal(p, q *FileSet) error {
if p == q {
// avoid deadlock if p == q
return nil
}
// not strictly needed for the test
p.mutex.Lock()
q.mutex.Lock()
defer q.mutex.Unlock()
defer p.mutex.Unlock()
if p.base != q.base {
return fmt.Errorf("different bases: %d != %d", p.base, q.base)
}
if len(p.files) != len(q.files) {
return fmt.Errorf("different number of files: %d != %d", len(p.files), len(q.files))
}
for i, f := range p.files {
g := q.files[i]
if f.set != p {
return fmt.Errorf("wrong fileset for %q", f.name)
}
if g.set != q {
return fmt.Errorf("wrong fileset for %q", g.name)
}
if f.name != g.name {
return fmt.Errorf("different filenames: %q != %q", f.name, g.name)
}
if f.base != g.base {
return fmt.Errorf("different base for %q: %d != %d", f.name, f.base, g.base)
}
if f.size != g.size {
return fmt.Errorf("different size for %q: %d != %d", f.name, f.size, g.size)
}
for j, l := range f.lines {
m := g.lines[j]
if l != m {
return fmt.Errorf("different offsets for %q", f.name)
}
}
for j, l := range f.infos {
m := g.infos[j]
if l.Offset != m.Offset || l.Filename != m.Filename || l.Line != m.Line {
return fmt.Errorf("different infos for %q", f.name)
}
}
}
// we don't care about .last - it's just a cache
return nil
}
func checkSerialize(t *testing.T, p *FileSet) {
var buf bytes.Buffer
encode := func(x interface{}) error {
return gob.NewEncoder(&buf).Encode(x)
}
if err := p.Write(encode); err != nil {
t.Errorf("writing fileset failed: %s", err)
return
}
q := NewFileSet()
decode := func(x interface{}) error {
return gob.NewDecoder(&buf).Decode(x)
}
if err := q.Read(decode); err != nil {
t.Errorf("reading fileset failed: %s", err)
return
}
if err := equal(p, q); err != nil {
t.Errorf("filesets not identical: %s", err)
}
}
func TestSerialization(t *testing.T) {
p := NewFileSet()
checkSerialize(t, p)
// add some files
for i := 0; i < 10; i++ {
f := p.AddFile(fmt.Sprintf("file%d", i), p.Base()+i, i*100)
checkSerialize(t, p)
// add some lines and alternative file infos
line := 1000
for offs := 0; offs < f.Size(); offs += 40 + i {
f.AddLine(offs)
if offs%7 == 0 {
f.AddLineInfo(offs, fmt.Sprintf("file%d", offs), line)
line += 33
}
}
checkSerialize(t, p)
}
}

83
vendor/gopkg.in/gcfg.v1/token/token.go generated vendored Normal file
View File

@ -0,0 +1,83 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package token defines constants representing the lexical tokens of the gcfg
// configuration syntax and basic operations on tokens (printing, predicates).
//
// Note that the API for the token package may change to accommodate new
// features or implementation changes in gcfg.
//
package token
import "strconv"
// Token is the set of lexical tokens of the gcfg configuration syntax.
type Token int
// The list of tokens.
const (
// Special tokens
ILLEGAL Token = iota
EOF
COMMENT
literal_beg
// Identifiers and basic type literals
// (these tokens stand for classes of literals)
IDENT // section-name, variable-name
STRING // "subsection-name", variable value
literal_end
operator_beg
// Operators and delimiters
ASSIGN // =
LBRACK // [
RBRACK // ]
EOL // \n
operator_end
)
var tokens = [...]string{
ILLEGAL: "ILLEGAL",
EOF: "EOF",
COMMENT: "COMMENT",
IDENT: "IDENT",
STRING: "STRING",
ASSIGN: "=",
LBRACK: "[",
RBRACK: "]",
EOL: "\n",
}
// String returns the string corresponding to the token tok.
// For operators and delimiters, the string is the actual token character
// sequence (e.g., for the token ASSIGN, the string is "="). For all other
// tokens the string corresponds to the token constant name (e.g. for the
// token IDENT, the string is "IDENT").
//
func (tok Token) String() string {
s := ""
if 0 <= tok && tok < Token(len(tokens)) {
s = tokens[tok]
}
if s == "" {
s = "token(" + strconv.Itoa(int(tok)) + ")"
}
return s
}
// Predicates
// IsLiteral returns true for tokens corresponding to identifiers
// and basic type literals; it returns false otherwise.
//
func (tok Token) IsLiteral() bool { return literal_beg < tok && tok < literal_end }
// IsOperator returns true for tokens corresponding to operators and
// delimiters; it returns false otherwise.
//
func (tok Token) IsOperator() bool { return operator_beg < tok && tok < operator_end }

23
vendor/gopkg.in/gcfg.v1/types/bool.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
package types
// BoolValues defines the name and value mappings for ParseBool.
var BoolValues = map[string]interface{}{
"true": true, "yes": true, "on": true, "1": true,
"false": false, "no": false, "off": false, "0": false,
}
var boolParser = func() *EnumParser {
ep := &EnumParser{}
ep.AddVals(BoolValues)
return ep
}()
// ParseBool parses bool values according to the definitions in BoolValues.
// Parsing is case-insensitive.
func ParseBool(s string) (bool, error) {
v, err := boolParser.Parse(s)
if err != nil {
return false, err
}
return v.(bool), nil
}

4
vendor/gopkg.in/gcfg.v1/types/doc.go generated vendored Normal file
View File

@ -0,0 +1,4 @@
// Package types defines helpers for type conversions.
//
// The API for this package is not finalized yet.
package types

44
vendor/gopkg.in/gcfg.v1/types/enum.go generated vendored Normal file
View File

@ -0,0 +1,44 @@
package types
import (
"fmt"
"reflect"
"strings"
)
// EnumParser parses "enum" values; i.e. a predefined set of strings to
// predefined values.
type EnumParser struct {
Type string // type name; if not set, use type of first value added
CaseMatch bool // if true, matching of strings is case-sensitive
// PrefixMatch bool
vals map[string]interface{}
}
// AddVals adds strings and values to an EnumParser.
func (ep *EnumParser) AddVals(vals map[string]interface{}) {
if ep.vals == nil {
ep.vals = make(map[string]interface{})
}
for k, v := range vals {
if ep.Type == "" {
ep.Type = reflect.TypeOf(v).Name()
}
if !ep.CaseMatch {
k = strings.ToLower(k)
}
ep.vals[k] = v
}
}
// Parse parses the string and returns the value or an error.
func (ep EnumParser) Parse(s string) (interface{}, error) {
if !ep.CaseMatch {
s = strings.ToLower(s)
}
v, ok := ep.vals[s]
if !ok {
return false, fmt.Errorf("failed to parse %s %#q", ep.Type, s)
}
return v, nil
}

29
vendor/gopkg.in/gcfg.v1/types/enum_test.go generated vendored Normal file
View File

@ -0,0 +1,29 @@
package types
import (
"testing"
)
func TestEnumParserBool(t *testing.T) {
for _, tt := range []struct {
val string
res bool
ok bool
}{
{val: "tRuE", res: true, ok: true},
{val: "False", res: false, ok: true},
{val: "t", ok: false},
} {
b, err := ParseBool(tt.val)
switch {
case tt.ok && err != nil:
t.Errorf("%q: got error %v, want %v", tt.val, err, tt.res)
case !tt.ok && err == nil:
t.Errorf("%q: got %v, want error", tt.val, b)
case tt.ok && b != tt.res:
t.Errorf("%q: got %v, want %v", tt.val, b, tt.res)
default:
t.Logf("%q: got %v, %v", tt.val, b, err)
}
}
}

86
vendor/gopkg.in/gcfg.v1/types/int.go generated vendored Normal file
View File

@ -0,0 +1,86 @@
package types
import (
"fmt"
"strings"
)
// An IntMode is a mode for parsing integer values, representing a set of
// accepted bases.
type IntMode uint8
// IntMode values for ParseInt; can be combined using binary or.
const (
Dec IntMode = 1 << iota
Hex
Oct
)
// String returns a string representation of IntMode; e.g. `IntMode(Dec|Hex)`.
func (m IntMode) String() string {
var modes []string
if m&Dec != 0 {
modes = append(modes, "Dec")
}
if m&Hex != 0 {
modes = append(modes, "Hex")
}
if m&Oct != 0 {
modes = append(modes, "Oct")
}
return "IntMode(" + strings.Join(modes, "|") + ")"
}
var errIntAmbig = fmt.Errorf("ambiguous integer value; must include '0' prefix")
func prefix0(val string) bool {
return strings.HasPrefix(val, "0") || strings.HasPrefix(val, "-0")
}
func prefix0x(val string) bool {
return strings.HasPrefix(val, "0x") || strings.HasPrefix(val, "-0x")
}
// ParseInt parses val using mode into intptr, which must be a pointer to an
// integer kind type. Non-decimal value require prefix `0` or `0x` in the cases
// when mode permits ambiguity of base; otherwise the prefix can be omitted.
func ParseInt(intptr interface{}, val string, mode IntMode) error {
val = strings.TrimSpace(val)
verb := byte(0)
switch mode {
case Dec:
verb = 'd'
case Dec + Hex:
if prefix0x(val) {
verb = 'v'
} else {
verb = 'd'
}
case Dec + Oct:
if prefix0(val) && !prefix0x(val) {
verb = 'v'
} else {
verb = 'd'
}
case Dec + Hex + Oct:
verb = 'v'
case Hex:
if prefix0x(val) {
verb = 'v'
} else {
verb = 'x'
}
case Oct:
verb = 'o'
case Hex + Oct:
if prefix0(val) {
verb = 'v'
} else {
return errIntAmbig
}
}
if verb == 0 {
panic("unsupported mode")
}
return ScanFully(intptr, val, verb)
}

67
vendor/gopkg.in/gcfg.v1/types/int_test.go generated vendored Normal file
View File

@ -0,0 +1,67 @@
package types
import (
"reflect"
"testing"
)
func elem(p interface{}) interface{} {
return reflect.ValueOf(p).Elem().Interface()
}
func TestParseInt(t *testing.T) {
for _, tt := range []struct {
val string
mode IntMode
exp interface{}
ok bool
}{
{"0", Dec, int(0), true},
{"10", Dec, int(10), true},
{"-10", Dec, int(-10), true},
{"x", Dec, int(0), false},
{"0xa", Hex, int(0xa), true},
{"a", Hex, int(0xa), true},
{"10", Hex, int(0x10), true},
{"-0xa", Hex, int(-0xa), true},
{"0x", Hex, int(0x0), true}, // Scanf doesn't require digit behind 0x
{"-0x", Hex, int(0x0), true}, // Scanf doesn't require digit behind 0x
{"-a", Hex, int(-0xa), true},
{"-10", Hex, int(-0x10), true},
{"x", Hex, int(0), false},
{"10", Oct, int(010), true},
{"010", Oct, int(010), true},
{"-10", Oct, int(-010), true},
{"-010", Oct, int(-010), true},
{"10", Dec | Hex, int(10), true},
{"010", Dec | Hex, int(10), true},
{"0x10", Dec | Hex, int(0x10), true},
{"10", Dec | Oct, int(10), true},
{"010", Dec | Oct, int(010), true},
{"0x10", Dec | Oct, int(0), false},
{"10", Hex | Oct, int(0), false}, // need prefix to distinguish Hex/Oct
{"010", Hex | Oct, int(010), true},
{"0x10", Hex | Oct, int(0x10), true},
{"10", Dec | Hex | Oct, int(10), true},
{"010", Dec | Hex | Oct, int(010), true},
{"0x10", Dec | Hex | Oct, int(0x10), true},
} {
typ := reflect.TypeOf(tt.exp)
res := reflect.New(typ).Interface()
err := ParseInt(res, tt.val, tt.mode)
switch {
case tt.ok && err != nil:
t.Errorf("ParseInt(%v, %#v, %v): fail; got error %v, want ok",
typ, tt.val, tt.mode, err)
case !tt.ok && err == nil:
t.Errorf("ParseInt(%v, %#v, %v): fail; got %v, want error",
typ, tt.val, tt.mode, elem(res))
case tt.ok && !reflect.DeepEqual(elem(res), tt.exp):
t.Errorf("ParseInt(%v, %#v, %v): fail; got %v, want %v",
typ, tt.val, tt.mode, elem(res), tt.exp)
default:
t.Logf("ParseInt(%v, %#v, %s): pass; got %v, error %v",
typ, tt.val, tt.mode, elem(res), err)
}
}
}

23
vendor/gopkg.in/gcfg.v1/types/scan.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
package types
import (
"fmt"
"io"
"reflect"
)
// ScanFully uses fmt.Sscanf with verb to fully scan val into ptr.
func ScanFully(ptr interface{}, val string, verb byte) error {
t := reflect.ValueOf(ptr).Elem().Type()
// attempt to read extra bytes to make sure the value is consumed
var b []byte
n, err := fmt.Sscanf(val, "%"+string(verb)+"%s", ptr, &b)
switch {
case n < 1 || n == 1 && err != io.EOF:
return fmt.Errorf("failed to parse %q as %v: %v", val, t, err)
case n > 1:
return fmt.Errorf("failed to parse %q as %v: extra characters %q", val, t, string(b))
}
// n == 1 && err == io.EOF
return nil
}

36
vendor/gopkg.in/gcfg.v1/types/scan_test.go generated vendored Normal file
View File

@ -0,0 +1,36 @@
package types
import (
"reflect"
"testing"
)
func TestScanFully(t *testing.T) {
for _, tt := range []struct {
val string
verb byte
res interface{}
ok bool
}{
{"a", 'v', int(0), false},
{"0x", 'v', int(0), true},
{"0x", 'd', int(0), false},
} {
d := reflect.New(reflect.TypeOf(tt.res)).Interface()
err := ScanFully(d, tt.val, tt.verb)
switch {
case tt.ok && err != nil:
t.Errorf("ScanFully(%T, %q, '%c'): want ok, got error %v",
d, tt.val, tt.verb, err)
case !tt.ok && err == nil:
t.Errorf("ScanFully(%T, %q, '%c'): want error, got %v",
d, tt.val, tt.verb, elem(d))
case tt.ok && err == nil && !reflect.DeepEqual(tt.res, elem(d)):
t.Errorf("ScanFully(%T, %q, '%c'): want %v, got %v",
d, tt.val, tt.verb, tt.res, elem(d))
default:
t.Logf("ScanFully(%T, %q, '%c') = %v; *ptr==%v",
d, tt.val, tt.verb, err, elem(d))
}
}
}